Lines Matching +full:four +full:- +full:lane
3 * Copyright (c) 2014-2015, Intel Corporation.
26 #include <linux/backing-dev.h>
37 return &arena->nd_btt->dev; in to_dev()
42 return offset + nd_btt->initial_offset; in adjust_initial_offset()
48 struct nd_btt *nd_btt = arena->nd_btt; in arena_read_bytes()
49 struct nd_namespace_common *ndns = nd_btt->ndns; in arena_read_bytes()
59 struct nd_btt *nd_btt = arena->nd_btt; in arena_write_bytes()
60 struct nd_namespace_common *ndns = nd_btt->ndns; in arena_write_bytes()
76 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512), in btt_info_write()
77 "arena->infooff: %#llx is unaligned\n", arena->infooff); in btt_info_write()
78 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512), in btt_info_write()
79 "arena->info2off: %#llx is unaligned\n", arena->info2off); in btt_info_write()
81 ret = arena_write_bytes(arena, arena->info2off, super, in btt_info_write()
86 return arena_write_bytes(arena, arena->infooff, super, in btt_info_write()
92 return arena_read_bytes(arena, arena->infooff, super, in btt_info_read()
99 * mapping is in little-endian
105 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); in __btt_map_write()
107 if (unlikely(lba >= arena->external_nlba)) in __btt_map_write()
110 __func__, lba, arena->external_nlba); in __btt_map_write()
151 return -EIO; in btt_map_write()
164 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE); in btt_map_read()
166 if (unlikely(lba >= arena->external_nlba)) in btt_map_read()
169 __func__, lba, arena->external_nlba); in btt_map_read()
203 return -EIO; in btt_map_read()
214 static int btt_log_group_read(struct arena_info *arena, u32 lane, in btt_log_group_read() argument
218 arena->logoff + (lane * LOG_GRP_SIZE), log, in btt_log_group_read()
238 a->debugfs_dir = d; in arena_debugfs_init()
240 debugfs_create_x64("size", S_IRUGO, d, &a->size); in arena_debugfs_init()
242 &a->external_lba_start); in arena_debugfs_init()
243 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba); in arena_debugfs_init()
245 &a->internal_lbasize); in arena_debugfs_init()
246 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba); in arena_debugfs_init()
248 &a->external_lbasize); in arena_debugfs_init()
249 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree); in arena_debugfs_init()
250 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major); in arena_debugfs_init()
251 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor); in arena_debugfs_init()
252 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff); in arena_debugfs_init()
253 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff); in arena_debugfs_init()
254 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff); in arena_debugfs_init()
255 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff); in arena_debugfs_init()
256 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff); in arena_debugfs_init()
257 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off); in arena_debugfs_init()
258 debugfs_create_x32("flags", S_IRUGO, d, &a->flags); in arena_debugfs_init()
259 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]); in arena_debugfs_init()
260 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]); in arena_debugfs_init()
268 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev), in btt_debugfs_init()
270 if (IS_ERR_OR_NULL(btt->debugfs_dir)) in btt_debugfs_init()
273 list_for_each_entry(arena, &btt->arena_list, list) { in btt_debugfs_init()
274 arena_debugfs_init(arena, btt->debugfs_dir, i); in btt_debugfs_init()
281 return le32_to_cpu(log->ent[log_idx].seq); in log_seq()
291 * TODO The logic feels a bit kludge-y. make it better..
295 int idx0 = a->log_index[0]; in btt_log_get_old()
296 int idx1 = a->log_index[1]; in btt_log_get_old()
305 log->ent[idx0].seq = cpu_to_le32(1); in btt_log_get_old()
310 return -EINVAL; in btt_log_get_old()
312 return -EINVAL; in btt_log_get_old()
315 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1) in btt_log_get_old()
320 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1) in btt_log_get_old()
331 * it is not NULL. It returns the sub-slot number (0 or 1)
335 static int btt_log_read(struct arena_info *arena, u32 lane, in btt_log_read() argument
342 ret = btt_log_group_read(arena, lane, &log); in btt_log_read()
344 return -EIO; in btt_log_read()
349 "log corruption (%d): lane %d seq [%d, %d]\n", in btt_log_read()
350 old_ent, lane, log.ent[arena->log_index[0]].seq, in btt_log_read()
351 log.ent[arena->log_index[1]].seq); in btt_log_read()
353 return -EIO; in btt_log_read()
356 ret_ent = (old_flag ? old_ent : (1 - old_ent)); in btt_log_read()
359 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE); in btt_log_read()
369 static int __btt_log_write(struct arena_info *arena, u32 lane, in __btt_log_write() argument
373 u32 group_slot = arena->log_index[sub]; in __btt_log_write()
378 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) + in __btt_log_write()
390 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub, in btt_flog_write() argument
395 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC); in btt_flog_write()
400 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub; in btt_flog_write()
401 if (++(arena->freelist[lane].seq) == 4) in btt_flog_write()
402 arena->freelist[lane].seq = 1; in btt_flog_write()
403 if (ent_e_flag(le32_to_cpu(ent->old_map))) in btt_flog_write()
404 arena->freelist[lane].has_err = 1; in btt_flog_write()
405 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map)); in btt_flog_write()
412 * all-zeroes, and indicates an identity mapping
416 int ret = -EINVAL; in btt_map_init()
420 size_t mapsize = arena->logoff - arena->mapoff; in btt_map_init()
424 return -ENOMEM; in btt_map_init()
431 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512), in btt_map_init()
432 "arena->mapoff: %#llx is unaligned\n", arena->mapoff); in btt_map_init()
439 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf, in btt_map_init()
445 mapsize -= size; in btt_map_init()
460 size_t logsize = arena->info2off - arena->logoff; in btt_log_init()
469 return -ENOMEM; in btt_log_init()
475 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512), in btt_log_init()
476 "arena->logoff: %#llx is unaligned\n", arena->logoff); in btt_log_init()
483 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf, in btt_log_init()
489 logsize -= size; in btt_log_init()
493 for (i = 0; i < arena->nfree; i++) { in btt_log_init()
495 ent.old_map = cpu_to_le32(arena->external_nlba + i); in btt_log_init()
496 ent.new_map = cpu_to_le32(arena->external_nlba + i); in btt_log_init()
510 return arena->dataoff + ((u64)lba * arena->internal_lbasize); in to_namespace_offset()
513 static int arena_clear_freelist_error(struct arena_info *arena, u32 lane) in arena_clear_freelist_error() argument
517 if (arena->freelist[lane].has_err) { in arena_clear_freelist_error()
519 u32 lba = arena->freelist[lane].block; in arena_clear_freelist_error()
521 unsigned long len = arena->sector_size; in arena_clear_freelist_error()
523 mutex_lock(&arena->err_lock); in arena_clear_freelist_error()
532 len -= chunk; in arena_clear_freelist_error()
535 arena->freelist[lane].has_err = 0; in arena_clear_freelist_error()
537 mutex_unlock(&arena->err_lock); in arena_clear_freelist_error()
548 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry), in btt_freelist_init()
550 if (!arena->freelist) in btt_freelist_init()
551 return -ENOMEM; in btt_freelist_init()
553 for (i = 0; i < arena->nfree; i++) { in btt_freelist_init()
563 arena->freelist[i].sub = 1 - new; in btt_freelist_init()
564 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq)); in btt_freelist_init()
565 arena->freelist[i].block = log_oldmap; in btt_freelist_init()
569 * the BTT read-only in btt_freelist_init()
573 arena->freelist[i].has_err = 1; in btt_freelist_init()
614 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0) in ent_is_padding()
615 && (ent->seq == 0); in ent_is_padding()
621 * four slots. We expect that a padding slot will be all-zeroes, and use this
625 * creation of this BTT layout, it will have three of the four slots with
634 int ret, log_index[2] = {-1, -1}; in log_set_indices()
639 for (i = 0; i < arena->nfree; i++) { in log_set_indices()
663 return -ENXIO; in log_set_indices()
675 return -ENXIO; in log_set_indices()
680 * lane never got used and it is still in log_set_indices()
687 return -ENXIO; in log_set_indices()
693 * non-padding entry, then the we are no longer in the in log_set_indices()
702 return -ENXIO; in log_set_indices()
719 return -ENXIO; in log_set_indices()
722 arena->log_index[0] = log_index[0]; in log_set_indices()
723 arena->log_index[1] = log_index[1]; in log_set_indices()
731 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL); in btt_rtt_init()
732 if (arena->rtt == NULL) in btt_rtt_init()
733 return -ENOMEM; in btt_rtt_init()
742 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock), in btt_maplocks_init()
744 if (!arena->map_locks) in btt_maplocks_init()
745 return -ENOMEM; in btt_maplocks_init()
747 for (i = 0; i < arena->nfree; i++) in btt_maplocks_init()
748 spin_lock_init(&arena->map_locks[i].lock); in btt_maplocks_init()
763 arena->nd_btt = btt->nd_btt; in alloc_arena()
764 arena->sector_size = btt->sector_size; in alloc_arena()
765 mutex_init(&arena->err_lock); in alloc_arena()
770 arena->size = size; in alloc_arena()
771 arena->external_lba_start = start; in alloc_arena()
772 arena->external_lbasize = btt->lbasize; in alloc_arena()
773 arena->internal_lbasize = roundup(arena->external_lbasize, in alloc_arena()
775 arena->nfree = BTT_DEFAULT_NFREE; in alloc_arena()
776 arena->version_major = btt->nd_btt->version_major; in alloc_arena()
777 arena->version_minor = btt->nd_btt->version_minor; in alloc_arena()
780 available -= (available % BTT_PG_SIZE); in alloc_arena()
783 available -= 2 * BTT_PG_SIZE; in alloc_arena()
786 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE); in alloc_arena()
787 available -= logsize; in alloc_arena()
790 arena->internal_nlba = div_u64(available - BTT_PG_SIZE, in alloc_arena()
791 arena->internal_lbasize + MAP_ENT_SIZE); in alloc_arena()
792 arena->external_nlba = arena->internal_nlba - arena->nfree; in alloc_arena()
794 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE); in alloc_arena()
795 datasize = available - mapsize; in alloc_arena()
798 arena->infooff = arena_off; in alloc_arena()
799 arena->dataoff = arena->infooff + BTT_PG_SIZE; in alloc_arena()
800 arena->mapoff = arena->dataoff + datasize; in alloc_arena()
801 arena->logoff = arena->mapoff + mapsize; in alloc_arena()
802 arena->info2off = arena->logoff + logsize; in alloc_arena()
805 arena->log_index[0] = 0; in alloc_arena()
806 arena->log_index[1] = 1; in alloc_arena()
814 list_for_each_entry_safe(arena, next, &btt->arena_list, list) { in free_arenas()
815 list_del(&arena->list); in free_arenas()
816 kfree(arena->rtt); in free_arenas()
817 kfree(arena->map_locks); in free_arenas()
818 kfree(arena->freelist); in free_arenas()
819 debugfs_remove_recursive(arena->debugfs_dir); in free_arenas()
831 arena->internal_nlba = le32_to_cpu(super->internal_nlba); in parse_arena_meta()
832 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize); in parse_arena_meta()
833 arena->external_nlba = le32_to_cpu(super->external_nlba); in parse_arena_meta()
834 arena->external_lbasize = le32_to_cpu(super->external_lbasize); in parse_arena_meta()
835 arena->nfree = le32_to_cpu(super->nfree); in parse_arena_meta()
836 arena->version_major = le16_to_cpu(super->version_major); in parse_arena_meta()
837 arena->version_minor = le16_to_cpu(super->version_minor); in parse_arena_meta()
839 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off + in parse_arena_meta()
840 le64_to_cpu(super->nextoff)); in parse_arena_meta()
841 arena->infooff = arena_off; in parse_arena_meta()
842 arena->dataoff = arena_off + le64_to_cpu(super->dataoff); in parse_arena_meta()
843 arena->mapoff = arena_off + le64_to_cpu(super->mapoff); in parse_arena_meta()
844 arena->logoff = arena_off + le64_to_cpu(super->logoff); in parse_arena_meta()
845 arena->info2off = arena_off + le64_to_cpu(super->info2off); in parse_arena_meta()
847 arena->size = (le64_to_cpu(super->nextoff) > 0) in parse_arena_meta()
848 ? (le64_to_cpu(super->nextoff)) in parse_arena_meta()
849 : (arena->info2off - arena->infooff + BTT_PG_SIZE); in parse_arena_meta()
851 arena->flags = le32_to_cpu(super->flags); in parse_arena_meta()
859 size_t remaining = btt->rawsize; in discover_arenas()
866 return -ENOMEM; in discover_arenas()
872 ret = -ENOMEM; in discover_arenas()
876 arena->infooff = cur_off; in discover_arenas()
881 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) { in discover_arenas()
882 if (remaining == btt->rawsize) { in discover_arenas()
883 btt->init_state = INIT_NOTFOUND; in discover_arenas()
889 ret = -ENODEV; in discover_arenas()
894 arena->external_lba_start = cur_nlba; in discover_arenas()
916 list_add_tail(&arena->list, &btt->arena_list); in discover_arenas()
918 remaining -= arena->size; in discover_arenas()
919 cur_off += arena->size; in discover_arenas()
920 cur_nlba += arena->external_nlba; in discover_arenas()
923 if (arena->nextoff == 0) in discover_arenas()
926 btt->num_arenas = num_arenas; in discover_arenas()
927 btt->nlba = cur_nlba; in discover_arenas()
928 btt->init_state = INIT_READY; in discover_arenas()
943 size_t remaining = btt->rawsize; in create_arenas()
950 remaining -= arena_size; in create_arenas()
954 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off); in create_arenas()
957 return -ENOMEM; in create_arenas()
959 btt->nlba += arena->external_nlba; in create_arenas()
961 arena->nextoff = arena->size; in create_arenas()
963 arena->nextoff = 0; in create_arenas()
965 list_add_tail(&arena->list, &btt->arena_list); in create_arenas()
982 struct nd_btt *nd_btt = arena->nd_btt; in btt_arena_write_layout()
983 const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev); in btt_arena_write_layout()
995 return -ENOMEM; in btt_arena_write_layout()
997 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN); in btt_arena_write_layout()
998 memcpy(super->uuid, nd_btt->uuid, 16); in btt_arena_write_layout()
999 memcpy(super->parent_uuid, parent_uuid, 16); in btt_arena_write_layout()
1000 super->flags = cpu_to_le32(arena->flags); in btt_arena_write_layout()
1001 super->version_major = cpu_to_le16(arena->version_major); in btt_arena_write_layout()
1002 super->version_minor = cpu_to_le16(arena->version_minor); in btt_arena_write_layout()
1003 super->external_lbasize = cpu_to_le32(arena->external_lbasize); in btt_arena_write_layout()
1004 super->external_nlba = cpu_to_le32(arena->external_nlba); in btt_arena_write_layout()
1005 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize); in btt_arena_write_layout()
1006 super->internal_nlba = cpu_to_le32(arena->internal_nlba); in btt_arena_write_layout()
1007 super->nfree = cpu_to_le32(arena->nfree); in btt_arena_write_layout()
1008 super->infosize = cpu_to_le32(sizeof(struct btt_sb)); in btt_arena_write_layout()
1009 super->nextoff = cpu_to_le64(arena->nextoff); in btt_arena_write_layout()
1011 * Subtract arena->infooff (arena start) so numbers are relative in btt_arena_write_layout()
1014 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff); in btt_arena_write_layout()
1015 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff); in btt_arena_write_layout()
1016 super->logoff = cpu_to_le64(arena->logoff - arena->infooff); in btt_arena_write_layout()
1017 super->info2off = cpu_to_le64(arena->info2off - arena->infooff); in btt_arena_write_layout()
1019 super->flags = 0; in btt_arena_write_layout()
1021 super->checksum = cpu_to_le64(sum); in btt_arena_write_layout()
1038 mutex_lock(&btt->init_lock); in btt_meta_init()
1039 list_for_each_entry(arena, &btt->arena_list, list) { in btt_meta_init()
1057 btt->init_state = INIT_READY; in btt_meta_init()
1060 mutex_unlock(&btt->init_lock); in btt_meta_init()
1066 return btt->lbasize - btt->sector_size; in btt_meta_size()
1080 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size); in lba_to_arena()
1082 list_for_each_entry(arena_list, &btt->arena_list, list) { in lba_to_arena()
1083 if (lba < arena_list->external_nlba) { in lba_to_arena()
1088 lba -= arena_list->external_nlba; in lba_to_arena()
1091 return -EIO; in lba_to_arena()
1099 __acquires(&arena->map_locks[idx].lock) in lock_map()
1101 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; in lock_map()
1103 spin_lock(&arena->map_locks[idx].lock); in lock_map()
1107 __releases(&arena->map_locks[idx].lock) in unlock_map()
1109 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree; in unlock_map()
1111 spin_unlock(&arena->map_locks[idx].lock); in unlock_map()
1159 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size; in btt_rw_integrity()
1166 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); in btt_rw_integrity()
1169 * .bv_offset already adjusted for iter->bi_bvec_done, and we in btt_rw_integrity()
1188 len -= cur_len; in btt_rw_integrity()
1190 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len)) in btt_rw_integrity()
1191 return -EIO; in btt_rw_integrity()
1212 u32 lane = 0, premap, postmap; in btt_read_pg() local
1217 lane = nd_region_acquire_lane(btt->nd_region); in btt_read_pg()
1223 cur_len = min(btt->sector_size, len); in btt_read_pg()
1245 ret = -EIO; in btt_read_pg()
1249 arena->rtt[lane] = RTT_VALID | postmap; in btt_read_pg()
1272 /* Media error - set the e_flag */ in btt_read_pg()
1286 arena->rtt[lane] = RTT_INVALID; in btt_read_pg()
1287 nd_region_release_lane(btt->nd_region, lane); in btt_read_pg()
1289 len -= cur_len; in btt_read_pg()
1291 sector += btt->sector_size >> SECTOR_SHIFT; in btt_read_pg()
1297 arena->rtt[lane] = RTT_INVALID; in btt_read_pg()
1299 nd_region_release_lane(btt->nd_region, lane); in btt_read_pg()
1311 u64 nsoff = adjust_initial_offset(arena->nd_btt, in btt_is_badblock()
1315 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize); in btt_is_badblock()
1324 u32 premap = 0, old_postmap, new_postmap, lane = 0, i; in btt_write_pg() local
1333 lane = nd_region_acquire_lane(btt->nd_region); in btt_write_pg()
1338 cur_len = min(btt->sector_size, len); in btt_write_pg()
1340 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) { in btt_write_pg()
1341 ret = -EIO; in btt_write_pg()
1345 if (btt_is_badblock(btt, arena, arena->freelist[lane].block)) in btt_write_pg()
1346 arena->freelist[lane].has_err = 1; in btt_write_pg()
1348 if (mutex_is_locked(&arena->err_lock) in btt_write_pg()
1349 || arena->freelist[lane].has_err) { in btt_write_pg()
1350 nd_region_release_lane(btt->nd_region, lane); in btt_write_pg()
1352 ret = arena_clear_freelist_error(arena, lane); in btt_write_pg()
1356 /* OK to acquire a different lane/free block */ in btt_write_pg()
1360 new_postmap = arena->freelist[lane].block; in btt_write_pg()
1363 for (i = 0; i < arena->nfree; i++) in btt_write_pg()
1364 while (arena->rtt[i] == (RTT_VALID | new_postmap)) in btt_write_pg()
1368 if (new_postmap >= arena->internal_nlba) { in btt_write_pg()
1369 ret = -EIO; in btt_write_pg()
1389 if (old_postmap >= arena->internal_nlba) { in btt_write_pg()
1390 ret = -EIO; in btt_write_pg()
1399 log.seq = cpu_to_le32(arena->freelist[lane].seq); in btt_write_pg()
1400 sub = arena->freelist[lane].sub; in btt_write_pg()
1401 ret = btt_flog_write(arena, lane, sub, &log); in btt_write_pg()
1411 nd_region_release_lane(btt->nd_region, lane); in btt_write_pg()
1414 ret = arena_clear_freelist_error(arena, lane); in btt_write_pg()
1419 len -= cur_len; in btt_write_pg()
1421 sector += btt->sector_size >> SECTOR_SHIFT; in btt_write_pg()
1429 nd_region_release_lane(btt->nd_region, lane); in btt_write_pg()
1453 struct btt *btt = q->queuedata; in btt_make_request()
1467 if (len > PAGE_SIZE || len < btt->sector_size || in btt_make_request()
1468 len % btt->sector_size) { in btt_make_request()
1469 dev_err_ratelimited(&btt->nd_btt->dev, in btt_make_request()
1471 bio->bi_status = BLK_STS_IOERR; in btt_make_request()
1478 dev_err(&btt->nd_btt->dev, in btt_make_request()
1483 bio->bi_status = errno_to_blk_status(err); in btt_make_request()
1497 struct btt *btt = bdev->bd_disk->private_data; in btt_rw_page()
1513 geo->heads = 1 << 6; in btt_getgeo()
1514 geo->sectors = 1 << 5; in btt_getgeo()
1515 geo->cylinders = get_capacity(bd->bd_disk) >> 11; in btt_getgeo()
1528 struct nd_btt *nd_btt = btt->nd_btt; in btt_blk_init()
1529 struct nd_namespace_common *ndns = nd_btt->ndns; in btt_blk_init()
1532 btt->btt_queue = blk_alloc_queue(GFP_KERNEL); in btt_blk_init()
1533 if (!btt->btt_queue) in btt_blk_init()
1534 return -ENOMEM; in btt_blk_init()
1536 btt->btt_disk = alloc_disk(0); in btt_blk_init()
1537 if (!btt->btt_disk) { in btt_blk_init()
1538 blk_cleanup_queue(btt->btt_queue); in btt_blk_init()
1539 return -ENOMEM; in btt_blk_init()
1542 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name); in btt_blk_init()
1543 btt->btt_disk->first_minor = 0; in btt_blk_init()
1544 btt->btt_disk->fops = &btt_fops; in btt_blk_init()
1545 btt->btt_disk->private_data = btt; in btt_blk_init()
1546 btt->btt_disk->queue = btt->btt_queue; in btt_blk_init()
1547 btt->btt_disk->flags = GENHD_FL_EXT_DEVT; in btt_blk_init()
1548 btt->btt_disk->queue->backing_dev_info->capabilities |= in btt_blk_init()
1551 blk_queue_make_request(btt->btt_queue, btt_make_request); in btt_blk_init()
1552 blk_queue_logical_block_size(btt->btt_queue, btt->sector_size); in btt_blk_init()
1553 blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX); in btt_blk_init()
1554 blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue); in btt_blk_init()
1555 btt->btt_queue->queuedata = btt; in btt_blk_init()
1558 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); in btt_blk_init()
1561 del_gendisk(btt->btt_disk); in btt_blk_init()
1562 put_disk(btt->btt_disk); in btt_blk_init()
1563 blk_cleanup_queue(btt->btt_queue); in btt_blk_init()
1567 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); in btt_blk_init()
1568 device_add_disk(&btt->nd_btt->dev, btt->btt_disk); in btt_blk_init()
1569 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; in btt_blk_init()
1570 revalidate_disk(btt->btt_disk); in btt_blk_init()
1577 del_gendisk(btt->btt_disk); in btt_blk_cleanup()
1578 put_disk(btt->btt_disk); in btt_blk_cleanup()
1579 blk_cleanup_queue(btt->btt_queue); in btt_blk_cleanup()
1583 * btt_init - initialize a block translation table for the given device
1587 * @uuid: A uuid for the backing device - this is stored on media
1605 struct device *dev = &nd_btt->dev; in btt_init()
1611 btt->nd_btt = nd_btt; in btt_init()
1612 btt->rawsize = rawsize; in btt_init()
1613 btt->lbasize = lbasize; in btt_init()
1614 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512); in btt_init()
1615 INIT_LIST_HEAD(&btt->arena_list); in btt_init()
1616 mutex_init(&btt->init_lock); in btt_init()
1617 btt->nd_region = nd_region; in btt_init()
1618 nsio = to_nd_namespace_io(&nd_btt->ndns->dev); in btt_init()
1619 btt->phys_bb = &nsio->bb; in btt_init()
1627 if (btt->init_state != INIT_READY && nd_region->ro) { in btt_init()
1628 dev_warn(dev, "%s is read-only, unable to init btt metadata\n", in btt_init()
1629 dev_name(&nd_region->dev)); in btt_init()
1631 } else if (btt->init_state != INIT_READY) { in btt_init()
1632 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) + in btt_init()
1635 btt->num_arenas, rawsize); in btt_init()
1662 * btt_fini - de-initialize a BTT
1665 * De-initialize a Block Translation Table on device removal
1675 debugfs_remove_recursive(btt->debugfs_dir); in btt_fini()
1681 struct nd_btt *nd_btt = to_nd_btt(ndns->claim); in nvdimm_namespace_attach_btt()
1687 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) { in nvdimm_namespace_attach_btt()
1688 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n"); in nvdimm_namespace_attach_btt()
1689 return -ENODEV; in nvdimm_namespace_attach_btt()
1692 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL); in nvdimm_namespace_attach_btt()
1694 return -ENOMEM; in nvdimm_namespace_attach_btt()
1704 rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset; in nvdimm_namespace_attach_btt()
1706 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n", in nvdimm_namespace_attach_btt()
1707 dev_name(&ndns->dev), in nvdimm_namespace_attach_btt()
1708 ARENA_MIN_SIZE + nd_btt->initial_offset); in nvdimm_namespace_attach_btt()
1709 return -ENXIO; in nvdimm_namespace_attach_btt()
1711 nd_region = to_nd_region(nd_btt->dev.parent); in nvdimm_namespace_attach_btt()
1712 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid, in nvdimm_namespace_attach_btt()
1715 return -ENOMEM; in nvdimm_namespace_attach_btt()
1716 nd_btt->btt = btt; in nvdimm_namespace_attach_btt()
1724 struct btt *btt = nd_btt->btt; in nvdimm_namespace_detach_btt()
1727 nd_btt->btt = NULL; in nvdimm_namespace_detach_btt()
1739 rc = -ENXIO; in nd_btt_init()