• Home
  • Raw
  • Download

Lines Matching refs:nd_region

62 int nd_region_activate(struct nd_region *nd_region)  in nd_region_activate()  argument
66 struct device *dev = &nd_region->dev; in nd_region_activate()
69 nvdimm_bus_lock(&nd_region->dev); in nd_region_activate()
70 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_activate()
71 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_activate()
75 nvdimm_bus_unlock(&nd_region->dev); in nd_region_activate()
86 nvdimm_bus_unlock(&nd_region->dev); in nd_region_activate()
97 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_activate()
98 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_activate()
100 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd); in nd_region_activate()
110 for (i = 0; i < nd_region->ndr_mappings - 1; i++) { in nd_region_activate()
115 for (j = i + 1; j < nd_region->ndr_mappings; j++) in nd_region_activate()
126 struct nd_region *nd_region = to_nd_region(dev); in nd_region_release() local
129 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_release()
130 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_release()
135 free_percpu(nd_region->lane); in nd_region_release()
136 memregion_free(nd_region->id); in nd_region_release()
140 kfree(nd_region); in nd_region_release()
143 struct nd_region *to_nd_region(struct device *dev) in to_nd_region()
145 struct nd_region *nd_region = container_of(dev, struct nd_region, dev); in to_nd_region() local
148 return nd_region; in to_nd_region()
152 struct device *nd_region_dev(struct nd_region *nd_region) in nd_region_dev() argument
154 if (!nd_region) in nd_region_dev()
156 return &nd_region->dev; in nd_region_dev()
162 struct nd_region *nd_region = to_nd_region(dev); in to_nd_blk_region() local
165 return container_of(nd_region, struct nd_blk_region, nd_region); in to_nd_blk_region()
169 void *nd_region_provider_data(struct nd_region *nd_region) in nd_region_provider_data() argument
171 return nd_region->provider_data; in nd_region_provider_data()
195 int nd_region_to_nstype(struct nd_region *nd_region) in nd_region_to_nstype() argument
197 if (is_memory(&nd_region->dev)) { in nd_region_to_nstype()
200 for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) { in nd_region_to_nstype()
201 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_to_nstype()
211 } else if (is_nd_blk(&nd_region->dev)) { in nd_region_to_nstype()
219 static unsigned long long region_size(struct nd_region *nd_region) in region_size() argument
221 if (is_memory(&nd_region->dev)) { in region_size()
222 return nd_region->ndr_size; in region_size()
223 } else if (nd_region->ndr_mappings == 1) { in region_size()
224 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in region_size()
235 struct nd_region *nd_region = to_nd_region(dev); in size_show() local
237 return sprintf(buf, "%llu\n", region_size(nd_region)); in size_show()
244 struct nd_region *nd_region = to_nd_region(dev); in deep_flush_show() local
250 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region)); in deep_flush_show()
258 struct nd_region *nd_region = to_nd_region(dev); in deep_flush_store() local
264 rc = nvdimm_flush(nd_region, NULL); in deep_flush_store()
275 struct nd_region *nd_region = to_nd_region(dev); in mappings_show() local
277 return sprintf(buf, "%d\n", nd_region->ndr_mappings); in mappings_show()
284 struct nd_region *nd_region = to_nd_region(dev); in nstype_show() local
286 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region)); in nstype_show()
293 struct nd_region *nd_region = to_nd_region(dev); in set_cookie_show() local
294 struct nd_interleave_set *nd_set = nd_region->nd_set; in set_cookie_show()
311 if (nd_region->ndr_mappings) { in set_cookie_show()
312 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in set_cookie_show()
320 nd_region_interleave_set_cookie(nd_region, in set_cookie_show()
333 resource_size_t nd_region_available_dpa(struct nd_region *nd_region) in nd_region_available_dpa() argument
338 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); in nd_region_available_dpa()
343 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_available_dpa()
344 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_available_dpa()
351 if (is_memory(&nd_region->dev)) { in nd_region_available_dpa()
352 available += nd_pmem_available_dpa(nd_region, in nd_region_available_dpa()
358 } else if (is_nd_blk(&nd_region->dev)) in nd_region_available_dpa()
359 available += nd_blk_available_dpa(nd_region); in nd_region_available_dpa()
365 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region) in nd_region_allocatable_dpa() argument
370 if (is_memory(&nd_region->dev)) in nd_region_allocatable_dpa()
373 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev)); in nd_region_allocatable_dpa()
374 for (i = 0; i < nd_region->ndr_mappings; i++) { in nd_region_allocatable_dpa()
375 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_allocatable_dpa()
377 if (is_memory(&nd_region->dev)) in nd_region_allocatable_dpa()
379 nd_pmem_max_contiguous_dpa(nd_region, in nd_region_allocatable_dpa()
381 else if (is_nd_blk(&nd_region->dev)) in nd_region_allocatable_dpa()
382 available += nd_blk_available_dpa(nd_region); in nd_region_allocatable_dpa()
384 if (is_memory(&nd_region->dev)) in nd_region_allocatable_dpa()
385 return available * nd_region->ndr_mappings; in nd_region_allocatable_dpa()
392 struct nd_region *nd_region = to_nd_region(dev); in available_size_show() local
404 available = nd_region_available_dpa(nd_region); in available_size_show()
415 struct nd_region *nd_region = to_nd_region(dev); in max_available_extent_show() local
421 available = nd_region_allocatable_dpa(nd_region); in max_available_extent_show()
449 struct nd_region *nd_region = to_nd_region(dev); in namespace_seed_show() local
453 if (nd_region->ns_seed) in namespace_seed_show()
454 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed)); in namespace_seed_show()
465 struct nd_region *nd_region = to_nd_region(dev); in btt_seed_show() local
469 if (nd_region->btt_seed) in btt_seed_show()
470 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed)); in btt_seed_show()
482 struct nd_region *nd_region = to_nd_region(dev); in pfn_seed_show() local
486 if (nd_region->pfn_seed) in pfn_seed_show()
487 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed)); in pfn_seed_show()
499 struct nd_region *nd_region = to_nd_region(dev); in dax_seed_show() local
503 if (nd_region->dax_seed) in dax_seed_show()
504 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed)); in dax_seed_show()
516 struct nd_region *nd_region = to_nd_region(dev); in read_only_show() local
518 return sprintf(buf, "%d\n", nd_region->ro); in read_only_show()
532 struct nd_region *nd_region = to_nd_region(dev); in read_only_store() local
537 nd_region->ro = ro; in read_only_store()
546 struct nd_region *nd_region = to_nd_region(dev); in align_show() local
548 return sprintf(buf, "%#lx\n", nd_region->align); in align_show()
554 struct nd_region *nd_region = to_nd_region(dev); in align_store() local
563 if (!nd_region->ndr_mappings) in align_store()
573 dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder); in align_store()
575 || val > region_size(nd_region) || remainder) in align_store()
584 nd_region->align = val; in align_store()
594 struct nd_region *nd_region = to_nd_region(dev); in region_badblocks_show() local
599 rc = badblocks_show(&nd_region->bb, buf, 0); in region_badblocks_show()
611 struct nd_region *nd_region = to_nd_region(dev); in resource_show() local
613 return sprintf(buf, "%#llx\n", nd_region->ndr_start); in resource_show()
620 struct nd_region *nd_region = to_nd_region(dev); in persistence_domain_show() local
622 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags)) in persistence_domain_show()
624 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags)) in persistence_domain_show()
655 struct nd_region *nd_region = to_nd_region(dev); in region_visible() local
656 struct nd_interleave_set *nd_set = nd_region->nd_set; in region_visible()
657 int type = nd_region_to_nstype(nd_region); in region_visible()
672 int has_flush = nvdimm_has_flush(nd_region); in region_visible()
683 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE) in region_visible()
708 struct nd_region *nd_region = to_nd_region(dev); in mappingN() local
712 if (n >= nd_region->ndr_mappings) in mappingN()
714 nd_mapping = &nd_region->mapping[n]; in mappingN()
770 struct nd_region *nd_region = to_nd_region(dev); in mapping_visible() local
772 if (n < nd_region->ndr_mappings) in mapping_visible()
864 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, in nd_region_interleave_set_cookie() argument
867 struct nd_interleave_set *nd_set = nd_region->nd_set; in nd_region_interleave_set_cookie()
878 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) in nd_region_interleave_set_altcookie() argument
880 struct nd_interleave_set *nd_set = nd_region->nd_set; in nd_region_interleave_set_altcookie()
902 void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) in nd_region_advance_seeds() argument
905 if (nd_region->ns_seed == dev) { in nd_region_advance_seeds()
906 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
910 if (nd_region->btt_seed == dev) in nd_region_advance_seeds()
911 nd_region_create_btt_seed(nd_region); in nd_region_advance_seeds()
912 if (nd_region->ns_seed == &nd_btt->ndns->dev) in nd_region_advance_seeds()
913 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
917 if (nd_region->pfn_seed == dev) in nd_region_advance_seeds()
918 nd_region_create_pfn_seed(nd_region); in nd_region_advance_seeds()
919 if (nd_region->ns_seed == &nd_pfn->ndns->dev) in nd_region_advance_seeds()
920 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
924 if (nd_region->dax_seed == dev) in nd_region_advance_seeds()
925 nd_region_create_dax_seed(nd_region); in nd_region_advance_seeds()
926 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) in nd_region_advance_seeds()
927 nd_region_create_ns_seed(nd_region); in nd_region_advance_seeds()
932 int nd_blk_region_init(struct nd_region *nd_region) in nd_blk_region_init() argument
934 struct device *dev = &nd_region->dev; in nd_blk_region_init()
940 if (nd_region->ndr_mappings < 1) { in nd_blk_region_init()
965 unsigned int nd_region_acquire_lane(struct nd_region *nd_region) in nd_region_acquire_lane() argument
971 if (nd_region->num_lanes < nr_cpu_ids) { in nd_region_acquire_lane()
974 lane = cpu % nd_region->num_lanes; in nd_region_acquire_lane()
975 ndl_count = per_cpu_ptr(nd_region->lane, cpu); in nd_region_acquire_lane()
976 ndl_lock = per_cpu_ptr(nd_region->lane, lane); in nd_region_acquire_lane()
986 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) in nd_region_release_lane() argument
988 if (nd_region->num_lanes < nr_cpu_ids) { in nd_region_release_lane()
992 ndl_count = per_cpu_ptr(nd_region->lane, cpu); in nd_region_release_lane()
993 ndl_lock = per_cpu_ptr(nd_region->lane, lane); in nd_region_release_lane()
1007 static unsigned long default_align(struct nd_region *nd_region) in default_align() argument
1013 if (is_nd_blk(&nd_region->dev)) in default_align()
1018 for (i = 0; i < nd_region->ndr_mappings; i++) { in default_align()
1019 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in default_align()
1028 if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX) in default_align()
1031 mappings = max_t(u16, 1, nd_region->ndr_mappings); in default_align()
1039 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, in nd_region_create()
1043 struct nd_region *nd_region; in nd_region_create() local
1080 nd_region = &ndbr->nd_region; in nd_region_create()
1086 nd_region = kzalloc(struct_size(nd_region, mapping, in nd_region_create()
1089 region_buf = nd_region; in nd_region_create()
1094 nd_region->id = memregion_alloc(GFP_KERNEL); in nd_region_create()
1095 if (nd_region->id < 0) in nd_region_create()
1098 nd_region->lane = alloc_percpu(struct nd_percpu_lane); in nd_region_create()
1099 if (!nd_region->lane) in nd_region_create()
1105 ndl = per_cpu_ptr(nd_region->lane, i); in nd_region_create()
1114 nd_region->mapping[i].nvdimm = nvdimm; in nd_region_create()
1115 nd_region->mapping[i].start = mapping->start; in nd_region_create()
1116 nd_region->mapping[i].size = mapping->size; in nd_region_create()
1117 nd_region->mapping[i].position = mapping->position; in nd_region_create()
1118 INIT_LIST_HEAD(&nd_region->mapping[i].labels); in nd_region_create()
1119 mutex_init(&nd_region->mapping[i].lock); in nd_region_create()
1123 nd_region->ndr_mappings = ndr_desc->num_mappings; in nd_region_create()
1124 nd_region->provider_data = ndr_desc->provider_data; in nd_region_create()
1125 nd_region->nd_set = ndr_desc->nd_set; in nd_region_create()
1126 nd_region->num_lanes = ndr_desc->num_lanes; in nd_region_create()
1127 nd_region->flags = ndr_desc->flags; in nd_region_create()
1128 nd_region->ro = ro; in nd_region_create()
1129 nd_region->numa_node = ndr_desc->numa_node; in nd_region_create()
1130 nd_region->target_node = ndr_desc->target_node; in nd_region_create()
1131 ida_init(&nd_region->ns_ida); in nd_region_create()
1132 ida_init(&nd_region->btt_ida); in nd_region_create()
1133 ida_init(&nd_region->pfn_ida); in nd_region_create()
1134 ida_init(&nd_region->dax_ida); in nd_region_create()
1135 dev = &nd_region->dev; in nd_region_create()
1136 dev_set_name(dev, "region%d", nd_region->id); in nd_region_create()
1141 nd_region->ndr_size = resource_size(ndr_desc->res); in nd_region_create()
1142 nd_region->ndr_start = ndr_desc->res->start; in nd_region_create()
1143 nd_region->align = default_align(nd_region); in nd_region_create()
1145 nd_region->flush = ndr_desc->flush; in nd_region_create()
1147 nd_region->flush = NULL; in nd_region_create()
1151 return nd_region; in nd_region_create()
1154 memregion_free(nd_region->id); in nd_region_create()
1160 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus, in nvdimm_pmem_region_create()
1169 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus, in nvdimm_blk_region_create()
1180 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus, in nvdimm_volatile_region_create()
1189 int nvdimm_flush(struct nd_region *nd_region, struct bio *bio) in nvdimm_flush() argument
1193 if (!nd_region->flush) in nvdimm_flush()
1194 rc = generic_nvdimm_flush(nd_region); in nvdimm_flush()
1196 if (nd_region->flush(nd_region, bio)) in nvdimm_flush()
1206 int generic_nvdimm_flush(struct nd_region *nd_region) in generic_nvdimm_flush() argument
1208 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev); in generic_nvdimm_flush()
1226 for (i = 0; i < nd_region->ndr_mappings; i++) in generic_nvdimm_flush()
1243 int nvdimm_has_flush(struct nd_region *nd_region) in nvdimm_has_flush() argument
1248 if (nd_region->ndr_mappings == 0 in nvdimm_has_flush()
1253 if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush) in nvdimm_has_flush()
1257 for (i = 0; i < nd_region->ndr_mappings; i++) { in nvdimm_has_flush()
1258 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nvdimm_has_flush()
1274 int nvdimm_has_cache(struct nd_region *nd_region) in nvdimm_has_cache() argument
1276 return is_nd_pmem(&nd_region->dev) && in nvdimm_has_cache()
1277 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags); in nvdimm_has_cache()
1281 bool is_nvdimm_sync(struct nd_region *nd_region) in is_nvdimm_sync() argument
1283 if (is_nd_volatile(&nd_region->dev)) in is_nvdimm_sync()
1286 return is_nd_pmem(&nd_region->dev) && in is_nvdimm_sync()
1287 !test_bit(ND_REGION_ASYNC, &nd_region->flags); in is_nvdimm_sync()
1292 struct nd_region *nd_region; member
1298 struct nd_region *nd_region; in region_conflict() local
1305 nd_region = to_nd_region(dev); in region_conflict()
1306 if (nd_region == ctx->nd_region) in region_conflict()
1310 region_start = nd_region->ndr_start; in region_conflict()
1311 region_end = region_start + nd_region->ndr_size; in region_conflict()
1319 int nd_region_conflict(struct nd_region *nd_region, resource_size_t start, in nd_region_conflict() argument
1322 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); in nd_region_conflict()
1324 .nd_region = nd_region, in nd_region_conflict()