• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Block Translation Table
3  * Copyright (c) 2014-2015, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 #include <linux/highmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/blkdev.h>
17 #include <linux/module.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/hdreg.h>
21 #include <linux/genhd.h>
22 #include <linux/sizes.h>
23 #include <linux/ndctl.h>
24 #include <linux/fs.h>
25 #include <linux/nd.h>
26 #include "btt.h"
27 #include "nd.h"
28 
29 enum log_ent_request {
30 	LOG_NEW_ENT = 0,
31 	LOG_OLD_ENT
32 };
33 
34 static int btt_major;
35 
arena_read_bytes(struct arena_info * arena,resource_size_t offset,void * buf,size_t n)36 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
37 		void *buf, size_t n)
38 {
39 	struct nd_btt *nd_btt = arena->nd_btt;
40 	struct nd_namespace_common *ndns = nd_btt->ndns;
41 
42 	/* arena offsets are 4K from the base of the device */
43 	offset += SZ_4K;
44 	return nvdimm_read_bytes(ndns, offset, buf, n);
45 }
46 
arena_write_bytes(struct arena_info * arena,resource_size_t offset,void * buf,size_t n)47 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
48 		void *buf, size_t n)
49 {
50 	struct nd_btt *nd_btt = arena->nd_btt;
51 	struct nd_namespace_common *ndns = nd_btt->ndns;
52 
53 	/* arena offsets are 4K from the base of the device */
54 	offset += SZ_4K;
55 	return nvdimm_write_bytes(ndns, offset, buf, n);
56 }
57 
btt_info_write(struct arena_info * arena,struct btt_sb * super)58 static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
59 {
60 	int ret;
61 
62 	ret = arena_write_bytes(arena, arena->info2off, super,
63 			sizeof(struct btt_sb));
64 	if (ret)
65 		return ret;
66 
67 	return arena_write_bytes(arena, arena->infooff, super,
68 			sizeof(struct btt_sb));
69 }
70 
btt_info_read(struct arena_info * arena,struct btt_sb * super)71 static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
72 {
73 	WARN_ON(!super);
74 	return arena_read_bytes(arena, arena->infooff, super,
75 			sizeof(struct btt_sb));
76 }
77 
78 /*
79  * 'raw' version of btt_map write
80  * Assumptions:
81  *   mapping is in little-endian
82  *   mapping contains 'E' and 'Z' flags as desired
83  */
__btt_map_write(struct arena_info * arena,u32 lba,__le32 mapping)84 static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping)
85 {
86 	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
87 
88 	WARN_ON(lba >= arena->external_nlba);
89 	return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE);
90 }
91 
btt_map_write(struct arena_info * arena,u32 lba,u32 mapping,u32 z_flag,u32 e_flag)92 static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
93 			u32 z_flag, u32 e_flag)
94 {
95 	u32 ze;
96 	__le32 mapping_le;
97 
98 	/*
99 	 * This 'mapping' is supposed to be just the LBA mapping, without
100 	 * any flags set, so strip the flag bits.
101 	 */
102 	mapping &= MAP_LBA_MASK;
103 
104 	ze = (z_flag << 1) + e_flag;
105 	switch (ze) {
106 	case 0:
107 		/*
108 		 * We want to set neither of the Z or E flags, and
109 		 * in the actual layout, this means setting the bit
110 		 * positions of both to '1' to indicate a 'normal'
111 		 * map entry
112 		 */
113 		mapping |= MAP_ENT_NORMAL;
114 		break;
115 	case 1:
116 		mapping |= (1 << MAP_ERR_SHIFT);
117 		break;
118 	case 2:
119 		mapping |= (1 << MAP_TRIM_SHIFT);
120 		break;
121 	default:
122 		/*
123 		 * The case where Z and E are both sent in as '1' could be
124 		 * construed as a valid 'normal' case, but we decide not to,
125 		 * to avoid confusion
126 		 */
127 		WARN_ONCE(1, "Invalid use of Z and E flags\n");
128 		return -EIO;
129 	}
130 
131 	mapping_le = cpu_to_le32(mapping);
132 	return __btt_map_write(arena, lba, mapping_le);
133 }
134 
btt_map_read(struct arena_info * arena,u32 lba,u32 * mapping,int * trim,int * error)135 static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
136 			int *trim, int *error)
137 {
138 	int ret;
139 	__le32 in;
140 	u32 raw_mapping, postmap, ze, z_flag, e_flag;
141 	u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
142 
143 	WARN_ON(lba >= arena->external_nlba);
144 
145 	ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE);
146 	if (ret)
147 		return ret;
148 
149 	raw_mapping = le32_to_cpu(in);
150 
151 	z_flag = (raw_mapping & MAP_TRIM_MASK) >> MAP_TRIM_SHIFT;
152 	e_flag = (raw_mapping & MAP_ERR_MASK) >> MAP_ERR_SHIFT;
153 	ze = (z_flag << 1) + e_flag;
154 	postmap = raw_mapping & MAP_LBA_MASK;
155 
156 	/* Reuse the {z,e}_flag variables for *trim and *error */
157 	z_flag = 0;
158 	e_flag = 0;
159 
160 	switch (ze) {
161 	case 0:
162 		/* Initial state. Return postmap = premap */
163 		*mapping = lba;
164 		break;
165 	case 1:
166 		*mapping = postmap;
167 		e_flag = 1;
168 		break;
169 	case 2:
170 		*mapping = postmap;
171 		z_flag = 1;
172 		break;
173 	case 3:
174 		*mapping = postmap;
175 		break;
176 	default:
177 		return -EIO;
178 	}
179 
180 	if (trim)
181 		*trim = z_flag;
182 	if (error)
183 		*error = e_flag;
184 
185 	return ret;
186 }
187 
btt_log_read_pair(struct arena_info * arena,u32 lane,struct log_entry * ent)188 static int btt_log_read_pair(struct arena_info *arena, u32 lane,
189 			struct log_entry *ent)
190 {
191 	WARN_ON(!ent);
192 	return arena_read_bytes(arena,
193 			arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
194 			2 * LOG_ENT_SIZE);
195 }
196 
197 static struct dentry *debugfs_root;
198 
arena_debugfs_init(struct arena_info * a,struct dentry * parent,int idx)199 static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
200 				int idx)
201 {
202 	char dirname[32];
203 	struct dentry *d;
204 
205 	/* If for some reason, parent bttN was not created, exit */
206 	if (!parent)
207 		return;
208 
209 	snprintf(dirname, 32, "arena%d", idx);
210 	d = debugfs_create_dir(dirname, parent);
211 	if (IS_ERR_OR_NULL(d))
212 		return;
213 	a->debugfs_dir = d;
214 
215 	debugfs_create_x64("size", S_IRUGO, d, &a->size);
216 	debugfs_create_x64("external_lba_start", S_IRUGO, d,
217 				&a->external_lba_start);
218 	debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
219 	debugfs_create_u32("internal_lbasize", S_IRUGO, d,
220 				&a->internal_lbasize);
221 	debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
222 	debugfs_create_u32("external_lbasize", S_IRUGO, d,
223 				&a->external_lbasize);
224 	debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
225 	debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
226 	debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
227 	debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
228 	debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
229 	debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
230 	debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
231 	debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
232 	debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
233 	debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
234 }
235 
btt_debugfs_init(struct btt * btt)236 static void btt_debugfs_init(struct btt *btt)
237 {
238 	int i = 0;
239 	struct arena_info *arena;
240 
241 	btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
242 						debugfs_root);
243 	if (IS_ERR_OR_NULL(btt->debugfs_dir))
244 		return;
245 
246 	list_for_each_entry(arena, &btt->arena_list, list) {
247 		arena_debugfs_init(arena, btt->debugfs_dir, i);
248 		i++;
249 	}
250 }
251 
252 /*
253  * This function accepts two log entries, and uses the
254  * sequence number to find the 'older' entry.
255  * It also updates the sequence number in this old entry to
256  * make it the 'new' one if the mark_flag is set.
257  * Finally, it returns which of the entries was the older one.
258  *
259  * TODO The logic feels a bit kludge-y. make it better..
260  */
btt_log_get_old(struct log_entry * ent)261 static int btt_log_get_old(struct log_entry *ent)
262 {
263 	int old;
264 
265 	/*
266 	 * the first ever time this is seen, the entry goes into [0]
267 	 * the next time, the following logic works out to put this
268 	 * (next) entry into [1]
269 	 */
270 	if (ent[0].seq == 0) {
271 		ent[0].seq = cpu_to_le32(1);
272 		return 0;
273 	}
274 
275 	if (ent[0].seq == ent[1].seq)
276 		return -EINVAL;
277 	if (le32_to_cpu(ent[0].seq) + le32_to_cpu(ent[1].seq) > 5)
278 		return -EINVAL;
279 
280 	if (le32_to_cpu(ent[0].seq) < le32_to_cpu(ent[1].seq)) {
281 		if (le32_to_cpu(ent[1].seq) - le32_to_cpu(ent[0].seq) == 1)
282 			old = 0;
283 		else
284 			old = 1;
285 	} else {
286 		if (le32_to_cpu(ent[0].seq) - le32_to_cpu(ent[1].seq) == 1)
287 			old = 1;
288 		else
289 			old = 0;
290 	}
291 
292 	return old;
293 }
294 
to_dev(struct arena_info * arena)295 static struct device *to_dev(struct arena_info *arena)
296 {
297 	return &arena->nd_btt->dev;
298 }
299 
300 /*
301  * This function copies the desired (old/new) log entry into ent if
302  * it is not NULL. It returns the sub-slot number (0 or 1)
303  * where the desired log entry was found. Negative return values
304  * indicate errors.
305  */
btt_log_read(struct arena_info * arena,u32 lane,struct log_entry * ent,int old_flag)306 static int btt_log_read(struct arena_info *arena, u32 lane,
307 			struct log_entry *ent, int old_flag)
308 {
309 	int ret;
310 	int old_ent, ret_ent;
311 	struct log_entry log[2];
312 
313 	ret = btt_log_read_pair(arena, lane, log);
314 	if (ret)
315 		return -EIO;
316 
317 	old_ent = btt_log_get_old(log);
318 	if (old_ent < 0 || old_ent > 1) {
319 		dev_info(to_dev(arena),
320 				"log corruption (%d): lane %d seq [%d, %d]\n",
321 			old_ent, lane, log[0].seq, log[1].seq);
322 		/* TODO set error state? */
323 		return -EIO;
324 	}
325 
326 	ret_ent = (old_flag ? old_ent : (1 - old_ent));
327 
328 	if (ent != NULL)
329 		memcpy(ent, &log[ret_ent], LOG_ENT_SIZE);
330 
331 	return ret_ent;
332 }
333 
334 /*
335  * This function commits a log entry to media
336  * It does _not_ prepare the freelist entry for the next write
337  * btt_flog_write is the wrapper for updating the freelist elements
338  */
__btt_log_write(struct arena_info * arena,u32 lane,u32 sub,struct log_entry * ent)339 static int __btt_log_write(struct arena_info *arena, u32 lane,
340 			u32 sub, struct log_entry *ent)
341 {
342 	int ret;
343 	/*
344 	 * Ignore the padding in log_entry for calculating log_half.
345 	 * The entry is 'committed' when we write the sequence number,
346 	 * and we want to ensure that that is the last thing written.
347 	 * We don't bother writing the padding as that would be extra
348 	 * media wear and write amplification
349 	 */
350 	unsigned int log_half = (LOG_ENT_SIZE - 2 * sizeof(u64)) / 2;
351 	u64 ns_off = arena->logoff + (((2 * lane) + sub) * LOG_ENT_SIZE);
352 	void *src = ent;
353 
354 	/* split the 16B write into atomic, durable halves */
355 	ret = arena_write_bytes(arena, ns_off, src, log_half);
356 	if (ret)
357 		return ret;
358 
359 	ns_off += log_half;
360 	src += log_half;
361 	return arena_write_bytes(arena, ns_off, src, log_half);
362 }
363 
btt_flog_write(struct arena_info * arena,u32 lane,u32 sub,struct log_entry * ent)364 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
365 			struct log_entry *ent)
366 {
367 	int ret;
368 
369 	ret = __btt_log_write(arena, lane, sub, ent);
370 	if (ret)
371 		return ret;
372 
373 	/* prepare the next free entry */
374 	arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
375 	if (++(arena->freelist[lane].seq) == 4)
376 		arena->freelist[lane].seq = 1;
377 	arena->freelist[lane].block = le32_to_cpu(ent->old_map);
378 
379 	return ret;
380 }
381 
382 /*
383  * This function initializes the BTT map to the initial state, which is
384  * all-zeroes, and indicates an identity mapping
385  */
btt_map_init(struct arena_info * arena)386 static int btt_map_init(struct arena_info *arena)
387 {
388 	int ret = -EINVAL;
389 	void *zerobuf;
390 	size_t offset = 0;
391 	size_t chunk_size = SZ_2M;
392 	size_t mapsize = arena->logoff - arena->mapoff;
393 
394 	zerobuf = kzalloc(chunk_size, GFP_KERNEL);
395 	if (!zerobuf)
396 		return -ENOMEM;
397 
398 	while (mapsize) {
399 		size_t size = min(mapsize, chunk_size);
400 
401 		ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
402 				size);
403 		if (ret)
404 			goto free;
405 
406 		offset += size;
407 		mapsize -= size;
408 		cond_resched();
409 	}
410 
411  free:
412 	kfree(zerobuf);
413 	return ret;
414 }
415 
416 /*
417  * This function initializes the BTT log with 'fake' entries pointing
418  * to the initial reserved set of blocks as being free
419  */
btt_log_init(struct arena_info * arena)420 static int btt_log_init(struct arena_info *arena)
421 {
422 	int ret;
423 	u32 i;
424 	struct log_entry log, zerolog;
425 
426 	memset(&zerolog, 0, sizeof(zerolog));
427 
428 	for (i = 0; i < arena->nfree; i++) {
429 		log.lba = cpu_to_le32(i);
430 		log.old_map = cpu_to_le32(arena->external_nlba + i);
431 		log.new_map = cpu_to_le32(arena->external_nlba + i);
432 		log.seq = cpu_to_le32(LOG_SEQ_INIT);
433 		ret = __btt_log_write(arena, i, 0, &log);
434 		if (ret)
435 			return ret;
436 		ret = __btt_log_write(arena, i, 1, &zerolog);
437 		if (ret)
438 			return ret;
439 	}
440 
441 	return 0;
442 }
443 
btt_freelist_init(struct arena_info * arena)444 static int btt_freelist_init(struct arena_info *arena)
445 {
446 	int new, ret;
447 	u32 i, map_entry;
448 	struct log_entry log_new;
449 
450 	arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
451 					GFP_KERNEL);
452 	if (!arena->freelist)
453 		return -ENOMEM;
454 
455 	for (i = 0; i < arena->nfree; i++) {
456 		new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
457 		if (new < 0)
458 			return new;
459 
460 		/* sub points to the next one to be overwritten */
461 		arena->freelist[i].sub = 1 - new;
462 		arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
463 		arena->freelist[i].block = le32_to_cpu(log_new.old_map);
464 
465 		/* This implies a newly created or untouched flog entry */
466 		if (log_new.old_map == log_new.new_map)
467 			continue;
468 
469 		/* Check if map recovery is needed */
470 		ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
471 				NULL, NULL);
472 		if (ret)
473 			return ret;
474 		if ((le32_to_cpu(log_new.new_map) != map_entry) &&
475 				(le32_to_cpu(log_new.old_map) == map_entry)) {
476 			/*
477 			 * Last transaction wrote the flog, but wasn't able
478 			 * to complete the map write. So fix up the map.
479 			 */
480 			ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
481 					le32_to_cpu(log_new.new_map), 0, 0);
482 			if (ret)
483 				return ret;
484 		}
485 
486 	}
487 
488 	return 0;
489 }
490 
btt_rtt_init(struct arena_info * arena)491 static int btt_rtt_init(struct arena_info *arena)
492 {
493 	arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
494 	if (arena->rtt == NULL)
495 		return -ENOMEM;
496 
497 	return 0;
498 }
499 
btt_maplocks_init(struct arena_info * arena)500 static int btt_maplocks_init(struct arena_info *arena)
501 {
502 	u32 i;
503 
504 	arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
505 				GFP_KERNEL);
506 	if (!arena->map_locks)
507 		return -ENOMEM;
508 
509 	for (i = 0; i < arena->nfree; i++)
510 		spin_lock_init(&arena->map_locks[i].lock);
511 
512 	return 0;
513 }
514 
alloc_arena(struct btt * btt,size_t size,size_t start,size_t arena_off)515 static struct arena_info *alloc_arena(struct btt *btt, size_t size,
516 				size_t start, size_t arena_off)
517 {
518 	struct arena_info *arena;
519 	u64 logsize, mapsize, datasize;
520 	u64 available = size;
521 
522 	arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
523 	if (!arena)
524 		return NULL;
525 	arena->nd_btt = btt->nd_btt;
526 
527 	if (!size)
528 		return arena;
529 
530 	arena->size = size;
531 	arena->external_lba_start = start;
532 	arena->external_lbasize = btt->lbasize;
533 	arena->internal_lbasize = roundup(arena->external_lbasize,
534 					INT_LBASIZE_ALIGNMENT);
535 	arena->nfree = BTT_DEFAULT_NFREE;
536 	arena->version_major = 1;
537 	arena->version_minor = 1;
538 
539 	if (available % BTT_PG_SIZE)
540 		available -= (available % BTT_PG_SIZE);
541 
542 	/* Two pages are reserved for the super block and its copy */
543 	available -= 2 * BTT_PG_SIZE;
544 
545 	/* The log takes a fixed amount of space based on nfree */
546 	logsize = roundup(2 * arena->nfree * sizeof(struct log_entry),
547 				BTT_PG_SIZE);
548 	available -= logsize;
549 
550 	/* Calculate optimal split between map and data area */
551 	arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
552 			arena->internal_lbasize + MAP_ENT_SIZE);
553 	arena->external_nlba = arena->internal_nlba - arena->nfree;
554 
555 	mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
556 	datasize = available - mapsize;
557 
558 	/* 'Absolute' values, relative to start of storage space */
559 	arena->infooff = arena_off;
560 	arena->dataoff = arena->infooff + BTT_PG_SIZE;
561 	arena->mapoff = arena->dataoff + datasize;
562 	arena->logoff = arena->mapoff + mapsize;
563 	arena->info2off = arena->logoff + logsize;
564 	return arena;
565 }
566 
free_arenas(struct btt * btt)567 static void free_arenas(struct btt *btt)
568 {
569 	struct arena_info *arena, *next;
570 
571 	list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
572 		list_del(&arena->list);
573 		kfree(arena->rtt);
574 		kfree(arena->map_locks);
575 		kfree(arena->freelist);
576 		debugfs_remove_recursive(arena->debugfs_dir);
577 		kfree(arena);
578 	}
579 }
580 
581 /*
582  * This function reads an existing valid btt superblock and
583  * populates the corresponding arena_info struct
584  */
parse_arena_meta(struct arena_info * arena,struct btt_sb * super,u64 arena_off)585 static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
586 				u64 arena_off)
587 {
588 	arena->internal_nlba = le32_to_cpu(super->internal_nlba);
589 	arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
590 	arena->external_nlba = le32_to_cpu(super->external_nlba);
591 	arena->external_lbasize = le32_to_cpu(super->external_lbasize);
592 	arena->nfree = le32_to_cpu(super->nfree);
593 	arena->version_major = le16_to_cpu(super->version_major);
594 	arena->version_minor = le16_to_cpu(super->version_minor);
595 
596 	arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
597 			le64_to_cpu(super->nextoff));
598 	arena->infooff = arena_off;
599 	arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
600 	arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
601 	arena->logoff = arena_off + le64_to_cpu(super->logoff);
602 	arena->info2off = arena_off + le64_to_cpu(super->info2off);
603 
604 	arena->size = (le64_to_cpu(super->nextoff) > 0)
605 		? (le64_to_cpu(super->nextoff))
606 		: (arena->info2off - arena->infooff + BTT_PG_SIZE);
607 
608 	arena->flags = le32_to_cpu(super->flags);
609 }
610 
discover_arenas(struct btt * btt)611 static int discover_arenas(struct btt *btt)
612 {
613 	int ret = 0;
614 	struct arena_info *arena;
615 	struct btt_sb *super;
616 	size_t remaining = btt->rawsize;
617 	u64 cur_nlba = 0;
618 	size_t cur_off = 0;
619 	int num_arenas = 0;
620 
621 	super = kzalloc(sizeof(*super), GFP_KERNEL);
622 	if (!super)
623 		return -ENOMEM;
624 
625 	while (remaining) {
626 		/* Alloc memory for arena */
627 		arena = alloc_arena(btt, 0, 0, 0);
628 		if (!arena) {
629 			ret = -ENOMEM;
630 			goto out_super;
631 		}
632 
633 		arena->infooff = cur_off;
634 		ret = btt_info_read(arena, super);
635 		if (ret)
636 			goto out;
637 
638 		if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
639 			if (remaining == btt->rawsize) {
640 				btt->init_state = INIT_NOTFOUND;
641 				dev_info(to_dev(arena), "No existing arenas\n");
642 				goto out;
643 			} else {
644 				dev_info(to_dev(arena),
645 						"Found corrupted metadata!\n");
646 				ret = -ENODEV;
647 				goto out;
648 			}
649 		}
650 
651 		arena->external_lba_start = cur_nlba;
652 		parse_arena_meta(arena, super, cur_off);
653 
654 		ret = btt_freelist_init(arena);
655 		if (ret)
656 			goto out;
657 
658 		ret = btt_rtt_init(arena);
659 		if (ret)
660 			goto out;
661 
662 		ret = btt_maplocks_init(arena);
663 		if (ret)
664 			goto out;
665 
666 		list_add_tail(&arena->list, &btt->arena_list);
667 
668 		remaining -= arena->size;
669 		cur_off += arena->size;
670 		cur_nlba += arena->external_nlba;
671 		num_arenas++;
672 
673 		if (arena->nextoff == 0)
674 			break;
675 	}
676 	btt->num_arenas = num_arenas;
677 	btt->nlba = cur_nlba;
678 	btt->init_state = INIT_READY;
679 
680 	kfree(super);
681 	return ret;
682 
683  out:
684 	kfree(arena);
685 	free_arenas(btt);
686  out_super:
687 	kfree(super);
688 	return ret;
689 }
690 
create_arenas(struct btt * btt)691 static int create_arenas(struct btt *btt)
692 {
693 	size_t remaining = btt->rawsize;
694 	size_t cur_off = 0;
695 
696 	while (remaining) {
697 		struct arena_info *arena;
698 		size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
699 
700 		remaining -= arena_size;
701 		if (arena_size < ARENA_MIN_SIZE)
702 			break;
703 
704 		arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
705 		if (!arena) {
706 			free_arenas(btt);
707 			return -ENOMEM;
708 		}
709 		btt->nlba += arena->external_nlba;
710 		if (remaining >= ARENA_MIN_SIZE)
711 			arena->nextoff = arena->size;
712 		else
713 			arena->nextoff = 0;
714 		cur_off += arena_size;
715 		list_add_tail(&arena->list, &btt->arena_list);
716 	}
717 
718 	return 0;
719 }
720 
721 /*
722  * This function completes arena initialization by writing
723  * all the metadata.
724  * It is only called for an uninitialized arena when a write
725  * to that arena occurs for the first time.
726  */
btt_arena_write_layout(struct arena_info * arena)727 static int btt_arena_write_layout(struct arena_info *arena)
728 {
729 	int ret;
730 	u64 sum;
731 	struct btt_sb *super;
732 	struct nd_btt *nd_btt = arena->nd_btt;
733 	const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
734 
735 	ret = btt_map_init(arena);
736 	if (ret)
737 		return ret;
738 
739 	ret = btt_log_init(arena);
740 	if (ret)
741 		return ret;
742 
743 	super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
744 	if (!super)
745 		return -ENOMEM;
746 
747 	strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
748 	memcpy(super->uuid, nd_btt->uuid, 16);
749 	memcpy(super->parent_uuid, parent_uuid, 16);
750 	super->flags = cpu_to_le32(arena->flags);
751 	super->version_major = cpu_to_le16(arena->version_major);
752 	super->version_minor = cpu_to_le16(arena->version_minor);
753 	super->external_lbasize = cpu_to_le32(arena->external_lbasize);
754 	super->external_nlba = cpu_to_le32(arena->external_nlba);
755 	super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
756 	super->internal_nlba = cpu_to_le32(arena->internal_nlba);
757 	super->nfree = cpu_to_le32(arena->nfree);
758 	super->infosize = cpu_to_le32(sizeof(struct btt_sb));
759 	super->nextoff = cpu_to_le64(arena->nextoff);
760 	/*
761 	 * Subtract arena->infooff (arena start) so numbers are relative
762 	 * to 'this' arena
763 	 */
764 	super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
765 	super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
766 	super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
767 	super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
768 
769 	super->flags = 0;
770 	sum = nd_sb_checksum((struct nd_gen_sb *) super);
771 	super->checksum = cpu_to_le64(sum);
772 
773 	ret = btt_info_write(arena, super);
774 
775 	kfree(super);
776 	return ret;
777 }
778 
779 /*
780  * This function completes the initialization for the BTT namespace
781  * such that it is ready to accept IOs
782  */
btt_meta_init(struct btt * btt)783 static int btt_meta_init(struct btt *btt)
784 {
785 	int ret = 0;
786 	struct arena_info *arena;
787 
788 	mutex_lock(&btt->init_lock);
789 	list_for_each_entry(arena, &btt->arena_list, list) {
790 		ret = btt_arena_write_layout(arena);
791 		if (ret)
792 			goto unlock;
793 
794 		ret = btt_freelist_init(arena);
795 		if (ret)
796 			goto unlock;
797 
798 		ret = btt_rtt_init(arena);
799 		if (ret)
800 			goto unlock;
801 
802 		ret = btt_maplocks_init(arena);
803 		if (ret)
804 			goto unlock;
805 	}
806 
807 	btt->init_state = INIT_READY;
808 
809  unlock:
810 	mutex_unlock(&btt->init_lock);
811 	return ret;
812 }
813 
btt_meta_size(struct btt * btt)814 static u32 btt_meta_size(struct btt *btt)
815 {
816 	return btt->lbasize - btt->sector_size;
817 }
818 
819 /*
820  * This function calculates the arena in which the given LBA lies
821  * by doing a linear walk. This is acceptable since we expect only
822  * a few arenas. If we have backing devices that get much larger,
823  * we can construct a balanced binary tree of arenas at init time
824  * so that this range search becomes faster.
825  */
lba_to_arena(struct btt * btt,sector_t sector,__u32 * premap,struct arena_info ** arena)826 static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
827 				struct arena_info **arena)
828 {
829 	struct arena_info *arena_list;
830 	__u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
831 
832 	list_for_each_entry(arena_list, &btt->arena_list, list) {
833 		if (lba < arena_list->external_nlba) {
834 			*arena = arena_list;
835 			*premap = lba;
836 			return 0;
837 		}
838 		lba -= arena_list->external_nlba;
839 	}
840 
841 	return -EIO;
842 }
843 
844 /*
845  * The following (lock_map, unlock_map) are mostly just to improve
846  * readability, since they index into an array of locks
847  */
lock_map(struct arena_info * arena,u32 premap)848 static void lock_map(struct arena_info *arena, u32 premap)
849 		__acquires(&arena->map_locks[idx].lock)
850 {
851 	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
852 
853 	spin_lock(&arena->map_locks[idx].lock);
854 }
855 
unlock_map(struct arena_info * arena,u32 premap)856 static void unlock_map(struct arena_info *arena, u32 premap)
857 		__releases(&arena->map_locks[idx].lock)
858 {
859 	u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
860 
861 	spin_unlock(&arena->map_locks[idx].lock);
862 }
863 
to_namespace_offset(struct arena_info * arena,u64 lba)864 static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
865 {
866 	return arena->dataoff + ((u64)lba * arena->internal_lbasize);
867 }
868 
btt_data_read(struct arena_info * arena,struct page * page,unsigned int off,u32 lba,u32 len)869 static int btt_data_read(struct arena_info *arena, struct page *page,
870 			unsigned int off, u32 lba, u32 len)
871 {
872 	int ret;
873 	u64 nsoff = to_namespace_offset(arena, lba);
874 	void *mem = kmap_atomic(page);
875 
876 	ret = arena_read_bytes(arena, nsoff, mem + off, len);
877 	kunmap_atomic(mem);
878 
879 	return ret;
880 }
881 
btt_data_write(struct arena_info * arena,u32 lba,struct page * page,unsigned int off,u32 len)882 static int btt_data_write(struct arena_info *arena, u32 lba,
883 			struct page *page, unsigned int off, u32 len)
884 {
885 	int ret;
886 	u64 nsoff = to_namespace_offset(arena, lba);
887 	void *mem = kmap_atomic(page);
888 
889 	ret = arena_write_bytes(arena, nsoff, mem + off, len);
890 	kunmap_atomic(mem);
891 
892 	return ret;
893 }
894 
zero_fill_data(struct page * page,unsigned int off,u32 len)895 static void zero_fill_data(struct page *page, unsigned int off, u32 len)
896 {
897 	void *mem = kmap_atomic(page);
898 
899 	memset(mem + off, 0, len);
900 	kunmap_atomic(mem);
901 }
902 
903 #ifdef CONFIG_BLK_DEV_INTEGRITY
btt_rw_integrity(struct btt * btt,struct bio_integrity_payload * bip,struct arena_info * arena,u32 postmap,int rw)904 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
905 			struct arena_info *arena, u32 postmap, int rw)
906 {
907 	unsigned int len = btt_meta_size(btt);
908 	u64 meta_nsoff;
909 	int ret = 0;
910 
911 	if (bip == NULL)
912 		return 0;
913 
914 	meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
915 
916 	while (len) {
917 		unsigned int cur_len;
918 		struct bio_vec bv;
919 		void *mem;
920 
921 		bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
922 		/*
923 		 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
924 		 * .bv_offset already adjusted for iter->bi_bvec_done, and we
925 		 * can use those directly
926 		 */
927 
928 		cur_len = min(len, bv.bv_len);
929 		mem = kmap_atomic(bv.bv_page);
930 		if (rw)
931 			ret = arena_write_bytes(arena, meta_nsoff,
932 					mem + bv.bv_offset, cur_len);
933 		else
934 			ret = arena_read_bytes(arena, meta_nsoff,
935 					mem + bv.bv_offset, cur_len);
936 
937 		kunmap_atomic(mem);
938 		if (ret)
939 			return ret;
940 
941 		len -= cur_len;
942 		meta_nsoff += cur_len;
943 		bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len);
944 	}
945 
946 	return ret;
947 }
948 
949 #else /* CONFIG_BLK_DEV_INTEGRITY */
btt_rw_integrity(struct btt * btt,struct bio_integrity_payload * bip,struct arena_info * arena,u32 postmap,int rw)950 static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
951 			struct arena_info *arena, u32 postmap, int rw)
952 {
953 	return 0;
954 }
955 #endif
956 
btt_read_pg(struct btt * btt,struct bio_integrity_payload * bip,struct page * page,unsigned int off,sector_t sector,unsigned int len)957 static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
958 			struct page *page, unsigned int off, sector_t sector,
959 			unsigned int len)
960 {
961 	int ret = 0;
962 	int t_flag, e_flag;
963 	struct arena_info *arena = NULL;
964 	u32 lane = 0, premap, postmap;
965 
966 	while (len) {
967 		u32 cur_len;
968 
969 		lane = nd_region_acquire_lane(btt->nd_region);
970 
971 		ret = lba_to_arena(btt, sector, &premap, &arena);
972 		if (ret)
973 			goto out_lane;
974 
975 		cur_len = min(btt->sector_size, len);
976 
977 		ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag);
978 		if (ret)
979 			goto out_lane;
980 
981 		/*
982 		 * We loop to make sure that the post map LBA didn't change
983 		 * from under us between writing the RTT and doing the actual
984 		 * read.
985 		 */
986 		while (1) {
987 			u32 new_map;
988 
989 			if (t_flag) {
990 				zero_fill_data(page, off, cur_len);
991 				goto out_lane;
992 			}
993 
994 			if (e_flag) {
995 				ret = -EIO;
996 				goto out_lane;
997 			}
998 
999 			arena->rtt[lane] = RTT_VALID | postmap;
1000 			/*
1001 			 * Barrier to make sure this write is not reordered
1002 			 * to do the verification map_read before the RTT store
1003 			 */
1004 			barrier();
1005 
1006 			ret = btt_map_read(arena, premap, &new_map, &t_flag,
1007 						&e_flag);
1008 			if (ret)
1009 				goto out_rtt;
1010 
1011 			if (postmap == new_map)
1012 				break;
1013 
1014 			postmap = new_map;
1015 		}
1016 
1017 		ret = btt_data_read(arena, page, off, postmap, cur_len);
1018 		if (ret)
1019 			goto out_rtt;
1020 
1021 		if (bip) {
1022 			ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1023 			if (ret)
1024 				goto out_rtt;
1025 		}
1026 
1027 		arena->rtt[lane] = RTT_INVALID;
1028 		nd_region_release_lane(btt->nd_region, lane);
1029 
1030 		len -= cur_len;
1031 		off += cur_len;
1032 		sector += btt->sector_size >> SECTOR_SHIFT;
1033 	}
1034 
1035 	return 0;
1036 
1037  out_rtt:
1038 	arena->rtt[lane] = RTT_INVALID;
1039  out_lane:
1040 	nd_region_release_lane(btt->nd_region, lane);
1041 	return ret;
1042 }
1043 
btt_write_pg(struct btt * btt,struct bio_integrity_payload * bip,sector_t sector,struct page * page,unsigned int off,unsigned int len)1044 static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1045 			sector_t sector, struct page *page, unsigned int off,
1046 			unsigned int len)
1047 {
1048 	int ret = 0;
1049 	struct arena_info *arena = NULL;
1050 	u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1051 	struct log_entry log;
1052 	int sub;
1053 
1054 	while (len) {
1055 		u32 cur_len;
1056 
1057 		lane = nd_region_acquire_lane(btt->nd_region);
1058 
1059 		ret = lba_to_arena(btt, sector, &premap, &arena);
1060 		if (ret)
1061 			goto out_lane;
1062 		cur_len = min(btt->sector_size, len);
1063 
1064 		if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1065 			ret = -EIO;
1066 			goto out_lane;
1067 		}
1068 
1069 		new_postmap = arena->freelist[lane].block;
1070 
1071 		/* Wait if the new block is being read from */
1072 		for (i = 0; i < arena->nfree; i++)
1073 			while (arena->rtt[i] == (RTT_VALID | new_postmap))
1074 				cpu_relax();
1075 
1076 
1077 		if (new_postmap >= arena->internal_nlba) {
1078 			ret = -EIO;
1079 			goto out_lane;
1080 		}
1081 
1082 		ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1083 		if (ret)
1084 			goto out_lane;
1085 
1086 		if (bip) {
1087 			ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1088 						WRITE);
1089 			if (ret)
1090 				goto out_lane;
1091 		}
1092 
1093 		lock_map(arena, premap);
1094 		ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL);
1095 		if (ret)
1096 			goto out_map;
1097 		if (old_postmap >= arena->internal_nlba) {
1098 			ret = -EIO;
1099 			goto out_map;
1100 		}
1101 
1102 		log.lba = cpu_to_le32(premap);
1103 		log.old_map = cpu_to_le32(old_postmap);
1104 		log.new_map = cpu_to_le32(new_postmap);
1105 		log.seq = cpu_to_le32(arena->freelist[lane].seq);
1106 		sub = arena->freelist[lane].sub;
1107 		ret = btt_flog_write(arena, lane, sub, &log);
1108 		if (ret)
1109 			goto out_map;
1110 
1111 		ret = btt_map_write(arena, premap, new_postmap, 0, 0);
1112 		if (ret)
1113 			goto out_map;
1114 
1115 		unlock_map(arena, premap);
1116 		nd_region_release_lane(btt->nd_region, lane);
1117 
1118 		len -= cur_len;
1119 		off += cur_len;
1120 		sector += btt->sector_size >> SECTOR_SHIFT;
1121 	}
1122 
1123 	return 0;
1124 
1125  out_map:
1126 	unlock_map(arena, premap);
1127  out_lane:
1128 	nd_region_release_lane(btt->nd_region, lane);
1129 	return ret;
1130 }
1131 
btt_do_bvec(struct btt * btt,struct bio_integrity_payload * bip,struct page * page,unsigned int len,unsigned int off,int rw,sector_t sector)1132 static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1133 			struct page *page, unsigned int len, unsigned int off,
1134 			int rw, sector_t sector)
1135 {
1136 	int ret;
1137 
1138 	if (rw == READ) {
1139 		ret = btt_read_pg(btt, bip, page, off, sector, len);
1140 		flush_dcache_page(page);
1141 	} else {
1142 		flush_dcache_page(page);
1143 		ret = btt_write_pg(btt, bip, sector, page, off, len);
1144 	}
1145 
1146 	return ret;
1147 }
1148 
btt_make_request(struct request_queue * q,struct bio * bio)1149 static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1150 {
1151 	struct bio_integrity_payload *bip = bio_integrity(bio);
1152 	struct btt *btt = q->queuedata;
1153 	struct bvec_iter iter;
1154 	unsigned long start;
1155 	struct bio_vec bvec;
1156 	int err = 0, rw;
1157 	bool do_acct;
1158 
1159 	/*
1160 	 * bio_integrity_enabled also checks if the bio already has an
1161 	 * integrity payload attached. If it does, we *don't* do a
1162 	 * bio_integrity_prep here - the payload has been generated by
1163 	 * another kernel subsystem, and we just pass it through.
1164 	 */
1165 	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1166 		bio->bi_error = -EIO;
1167 		goto out;
1168 	}
1169 
1170 	do_acct = nd_iostat_start(bio, &start);
1171 	rw = bio_data_dir(bio);
1172 	bio_for_each_segment(bvec, bio, iter) {
1173 		unsigned int len = bvec.bv_len;
1174 
1175 		BUG_ON(len > PAGE_SIZE);
1176 		/* Make sure len is in multiples of sector size. */
1177 		/* XXX is this right? */
1178 		BUG_ON(len < btt->sector_size);
1179 		BUG_ON(len % btt->sector_size);
1180 
1181 		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1182 				rw, iter.bi_sector);
1183 		if (err) {
1184 			dev_info(&btt->nd_btt->dev,
1185 					"io error in %s sector %lld, len %d,\n",
1186 					(rw == READ) ? "READ" : "WRITE",
1187 					(unsigned long long) iter.bi_sector, len);
1188 			bio->bi_error = err;
1189 			break;
1190 		}
1191 	}
1192 	if (do_acct)
1193 		nd_iostat_end(bio, start);
1194 
1195 out:
1196 	bio_endio(bio);
1197 	return BLK_QC_T_NONE;
1198 }
1199 
btt_rw_page(struct block_device * bdev,sector_t sector,struct page * page,int rw)1200 static int btt_rw_page(struct block_device *bdev, sector_t sector,
1201 		struct page *page, int rw)
1202 {
1203 	struct btt *btt = bdev->bd_disk->private_data;
1204 	int rc;
1205 
1206 	rc = btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
1207 	if (rc == 0)
1208 		page_endio(page, rw & WRITE, 0);
1209 
1210 	return rc;
1211 }
1212 
1213 
btt_getgeo(struct block_device * bd,struct hd_geometry * geo)1214 static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1215 {
1216 	/* some standard values */
1217 	geo->heads = 1 << 6;
1218 	geo->sectors = 1 << 5;
1219 	geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1220 	return 0;
1221 }
1222 
1223 static const struct block_device_operations btt_fops = {
1224 	.owner =		THIS_MODULE,
1225 	.rw_page =		btt_rw_page,
1226 	.getgeo =		btt_getgeo,
1227 	.revalidate_disk =	nvdimm_revalidate_disk,
1228 };
1229 
btt_blk_init(struct btt * btt)1230 static int btt_blk_init(struct btt *btt)
1231 {
1232 	struct nd_btt *nd_btt = btt->nd_btt;
1233 	struct nd_namespace_common *ndns = nd_btt->ndns;
1234 
1235 	/* create a new disk and request queue for btt */
1236 	btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1237 	if (!btt->btt_queue)
1238 		return -ENOMEM;
1239 
1240 	btt->btt_disk = alloc_disk(0);
1241 	if (!btt->btt_disk) {
1242 		blk_cleanup_queue(btt->btt_queue);
1243 		return -ENOMEM;
1244 	}
1245 
1246 	nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1247 	btt->btt_disk->driverfs_dev = &btt->nd_btt->dev;
1248 	btt->btt_disk->major = btt_major;
1249 	btt->btt_disk->first_minor = 0;
1250 	btt->btt_disk->fops = &btt_fops;
1251 	btt->btt_disk->private_data = btt;
1252 	btt->btt_disk->queue = btt->btt_queue;
1253 	btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1254 
1255 	blk_queue_make_request(btt->btt_queue, btt_make_request);
1256 	blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1257 	blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1258 	blk_queue_bounce_limit(btt->btt_queue, BLK_BOUNCE_ANY);
1259 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1260 	btt->btt_queue->queuedata = btt;
1261 
1262 	set_capacity(btt->btt_disk, 0);
1263 	add_disk(btt->btt_disk);
1264 	if (btt_meta_size(btt)) {
1265 		int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1266 
1267 		if (rc) {
1268 			del_gendisk(btt->btt_disk);
1269 			put_disk(btt->btt_disk);
1270 			blk_cleanup_queue(btt->btt_queue);
1271 			return rc;
1272 		}
1273 	}
1274 	set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1275 	revalidate_disk(btt->btt_disk);
1276 
1277 	return 0;
1278 }
1279 
btt_blk_cleanup(struct btt * btt)1280 static void btt_blk_cleanup(struct btt *btt)
1281 {
1282 	del_gendisk(btt->btt_disk);
1283 	put_disk(btt->btt_disk);
1284 	blk_cleanup_queue(btt->btt_queue);
1285 }
1286 
1287 /**
1288  * btt_init - initialize a block translation table for the given device
1289  * @nd_btt:	device with BTT geometry and backing device info
1290  * @rawsize:	raw size in bytes of the backing device
1291  * @lbasize:	lba size of the backing device
1292  * @uuid:	A uuid for the backing device - this is stored on media
1293  * @maxlane:	maximum number of parallel requests the device can handle
1294  *
1295  * Initialize a Block Translation Table on a backing device to provide
1296  * single sector power fail atomicity.
1297  *
1298  * Context:
1299  * Might sleep.
1300  *
1301  * Returns:
1302  * Pointer to a new struct btt on success, NULL on failure.
1303  */
btt_init(struct nd_btt * nd_btt,unsigned long long rawsize,u32 lbasize,u8 * uuid,struct nd_region * nd_region)1304 static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1305 		u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1306 {
1307 	int ret;
1308 	struct btt *btt;
1309 	struct device *dev = &nd_btt->dev;
1310 
1311 	btt = kzalloc(sizeof(struct btt), GFP_KERNEL);
1312 	if (!btt)
1313 		return NULL;
1314 
1315 	btt->nd_btt = nd_btt;
1316 	btt->rawsize = rawsize;
1317 	btt->lbasize = lbasize;
1318 	btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1319 	INIT_LIST_HEAD(&btt->arena_list);
1320 	mutex_init(&btt->init_lock);
1321 	btt->nd_region = nd_region;
1322 
1323 	ret = discover_arenas(btt);
1324 	if (ret) {
1325 		dev_err(dev, "init: error in arena_discover: %d\n", ret);
1326 		goto out_free;
1327 	}
1328 
1329 	if (btt->init_state != INIT_READY && nd_region->ro) {
1330 		dev_info(dev, "%s is read-only, unable to init btt metadata\n",
1331 				dev_name(&nd_region->dev));
1332 		goto out_free;
1333 	} else if (btt->init_state != INIT_READY) {
1334 		btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1335 			((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1336 		dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1337 				btt->num_arenas, rawsize);
1338 
1339 		ret = create_arenas(btt);
1340 		if (ret) {
1341 			dev_info(dev, "init: create_arenas: %d\n", ret);
1342 			goto out_free;
1343 		}
1344 
1345 		ret = btt_meta_init(btt);
1346 		if (ret) {
1347 			dev_err(dev, "init: error in meta_init: %d\n", ret);
1348 			goto out_free;
1349 		}
1350 	}
1351 
1352 	ret = btt_blk_init(btt);
1353 	if (ret) {
1354 		dev_err(dev, "init: error in blk_init: %d\n", ret);
1355 		goto out_free;
1356 	}
1357 
1358 	btt_debugfs_init(btt);
1359 
1360 	return btt;
1361 
1362  out_free:
1363 	kfree(btt);
1364 	return NULL;
1365 }
1366 
1367 /**
1368  * btt_fini - de-initialize a BTT
1369  * @btt:	the BTT handle that was generated by btt_init
1370  *
1371  * De-initialize a Block Translation Table on device removal
1372  *
1373  * Context:
1374  * Might sleep.
1375  */
btt_fini(struct btt * btt)1376 static void btt_fini(struct btt *btt)
1377 {
1378 	if (btt) {
1379 		btt_blk_cleanup(btt);
1380 		free_arenas(btt);
1381 		debugfs_remove_recursive(btt->debugfs_dir);
1382 		kfree(btt);
1383 	}
1384 }
1385 
nvdimm_namespace_attach_btt(struct nd_namespace_common * ndns)1386 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1387 {
1388 	struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1389 	struct nd_region *nd_region;
1390 	struct btt *btt;
1391 	size_t rawsize;
1392 
1393 	if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize)
1394 		return -ENODEV;
1395 
1396 	rawsize = nvdimm_namespace_capacity(ndns) - SZ_4K;
1397 	if (rawsize < ARENA_MIN_SIZE) {
1398 		return -ENXIO;
1399 	}
1400 	nd_region = to_nd_region(nd_btt->dev.parent);
1401 	btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1402 			nd_region);
1403 	if (!btt)
1404 		return -ENOMEM;
1405 	nd_btt->btt = btt;
1406 
1407 	return 0;
1408 }
1409 EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1410 
nvdimm_namespace_detach_btt(struct nd_namespace_common * ndns)1411 int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns)
1412 {
1413 	struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1414 	struct btt *btt = nd_btt->btt;
1415 
1416 	btt_fini(btt);
1417 	nd_btt->btt = NULL;
1418 
1419 	return 0;
1420 }
1421 EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1422 
nd_btt_init(void)1423 static int __init nd_btt_init(void)
1424 {
1425 	int rc;
1426 
1427 	btt_major = register_blkdev(0, "btt");
1428 	if (btt_major < 0)
1429 		return btt_major;
1430 
1431 	debugfs_root = debugfs_create_dir("btt", NULL);
1432 	if (IS_ERR_OR_NULL(debugfs_root)) {
1433 		rc = -ENXIO;
1434 		goto err_debugfs;
1435 	}
1436 
1437 	return 0;
1438 
1439  err_debugfs:
1440 	unregister_blkdev(btt_major, "btt");
1441 
1442 	return rc;
1443 }
1444 
nd_btt_exit(void)1445 static void __exit nd_btt_exit(void)
1446 {
1447 	debugfs_remove_recursive(debugfs_root);
1448 	unregister_blkdev(btt_major, "btt");
1449 }
1450 
1451 MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1452 MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1453 MODULE_LICENSE("GPL v2");
1454 module_init(nd_btt_init);
1455 module_exit(nd_btt_exit);
1456