1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5 #ifndef __ND_H__
6 #define __ND_H__
7 #include <linux/libnvdimm.h>
8 #include <linux/badblocks.h>
9 #include <linux/blkdev.h>
10 #include <linux/device.h>
11 #include <linux/mutex.h>
12 #include <linux/ndctl.h>
13 #include <linux/types.h>
14 #include <linux/nd.h>
15 #include "label.h"
16
17 enum {
18 /*
19 * Limits the maximum number of block apertures a dimm can
20 * support and is an input to the geometry/on-disk-format of a
21 * BTT instance
22 */
23 ND_MAX_LANES = 256,
24 INT_LBASIZE_ALIGNMENT = 64,
25 NVDIMM_IO_ATOMIC = 1,
26 };
27
28 struct nvdimm_drvdata {
29 struct device *dev;
30 int nslabel_size;
31 struct nd_cmd_get_config_size nsarea;
32 void *data;
33 int ns_current, ns_next;
34 struct resource dpa;
35 struct kref kref;
36 };
37
nsl_ref_name(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)38 static inline const u8 *nsl_ref_name(struct nvdimm_drvdata *ndd,
39 struct nd_namespace_label *nd_label)
40 {
41 return nd_label->name;
42 }
43
nsl_get_name(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u8 * name)44 static inline u8 *nsl_get_name(struct nvdimm_drvdata *ndd,
45 struct nd_namespace_label *nd_label, u8 *name)
46 {
47 return memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
48 }
49
nsl_set_name(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u8 * name)50 static inline u8 *nsl_set_name(struct nvdimm_drvdata *ndd,
51 struct nd_namespace_label *nd_label, u8 *name)
52 {
53 if (!name)
54 return NULL;
55 return memcpy(nd_label->name, name, NSLABEL_NAME_LEN);
56 }
57
nsl_get_slot(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)58 static inline u32 nsl_get_slot(struct nvdimm_drvdata *ndd,
59 struct nd_namespace_label *nd_label)
60 {
61 return __le32_to_cpu(nd_label->slot);
62 }
63
nsl_set_slot(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u32 slot)64 static inline void nsl_set_slot(struct nvdimm_drvdata *ndd,
65 struct nd_namespace_label *nd_label, u32 slot)
66 {
67 nd_label->slot = __cpu_to_le32(slot);
68 }
69
nsl_get_checksum(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)70 static inline u64 nsl_get_checksum(struct nvdimm_drvdata *ndd,
71 struct nd_namespace_label *nd_label)
72 {
73 return __le64_to_cpu(nd_label->checksum);
74 }
75
nsl_set_checksum(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u64 checksum)76 static inline void nsl_set_checksum(struct nvdimm_drvdata *ndd,
77 struct nd_namespace_label *nd_label,
78 u64 checksum)
79 {
80 nd_label->checksum = __cpu_to_le64(checksum);
81 }
82
nsl_get_flags(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)83 static inline u32 nsl_get_flags(struct nvdimm_drvdata *ndd,
84 struct nd_namespace_label *nd_label)
85 {
86 return __le32_to_cpu(nd_label->flags);
87 }
88
nsl_set_flags(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u32 flags)89 static inline void nsl_set_flags(struct nvdimm_drvdata *ndd,
90 struct nd_namespace_label *nd_label, u32 flags)
91 {
92 nd_label->flags = __cpu_to_le32(flags);
93 }
94
nsl_get_dpa(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)95 static inline u64 nsl_get_dpa(struct nvdimm_drvdata *ndd,
96 struct nd_namespace_label *nd_label)
97 {
98 return __le64_to_cpu(nd_label->dpa);
99 }
100
nsl_set_dpa(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u64 dpa)101 static inline void nsl_set_dpa(struct nvdimm_drvdata *ndd,
102 struct nd_namespace_label *nd_label, u64 dpa)
103 {
104 nd_label->dpa = __cpu_to_le64(dpa);
105 }
106
nsl_get_rawsize(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)107 static inline u64 nsl_get_rawsize(struct nvdimm_drvdata *ndd,
108 struct nd_namespace_label *nd_label)
109 {
110 return __le64_to_cpu(nd_label->rawsize);
111 }
112
nsl_set_rawsize(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u64 rawsize)113 static inline void nsl_set_rawsize(struct nvdimm_drvdata *ndd,
114 struct nd_namespace_label *nd_label,
115 u64 rawsize)
116 {
117 nd_label->rawsize = __cpu_to_le64(rawsize);
118 }
119
nsl_get_isetcookie(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)120 static inline u64 nsl_get_isetcookie(struct nvdimm_drvdata *ndd,
121 struct nd_namespace_label *nd_label)
122 {
123 return __le64_to_cpu(nd_label->isetcookie);
124 }
125
nsl_set_isetcookie(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u64 isetcookie)126 static inline void nsl_set_isetcookie(struct nvdimm_drvdata *ndd,
127 struct nd_namespace_label *nd_label,
128 u64 isetcookie)
129 {
130 nd_label->isetcookie = __cpu_to_le64(isetcookie);
131 }
132
nsl_validate_isetcookie(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u64 cookie)133 static inline bool nsl_validate_isetcookie(struct nvdimm_drvdata *ndd,
134 struct nd_namespace_label *nd_label,
135 u64 cookie)
136 {
137 return cookie == __le64_to_cpu(nd_label->isetcookie);
138 }
139
nsl_get_position(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)140 static inline u16 nsl_get_position(struct nvdimm_drvdata *ndd,
141 struct nd_namespace_label *nd_label)
142 {
143 return __le16_to_cpu(nd_label->position);
144 }
145
nsl_set_position(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u16 position)146 static inline void nsl_set_position(struct nvdimm_drvdata *ndd,
147 struct nd_namespace_label *nd_label,
148 u16 position)
149 {
150 nd_label->position = __cpu_to_le16(position);
151 }
152
153
nsl_get_nlabel(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)154 static inline u16 nsl_get_nlabel(struct nvdimm_drvdata *ndd,
155 struct nd_namespace_label *nd_label)
156 {
157 return __le16_to_cpu(nd_label->nlabel);
158 }
159
nsl_set_nlabel(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u16 nlabel)160 static inline void nsl_set_nlabel(struct nvdimm_drvdata *ndd,
161 struct nd_namespace_label *nd_label,
162 u16 nlabel)
163 {
164 nd_label->nlabel = __cpu_to_le16(nlabel);
165 }
166
nsl_get_lbasize(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)167 static inline u64 nsl_get_lbasize(struct nvdimm_drvdata *ndd,
168 struct nd_namespace_label *nd_label)
169 {
170 return __le64_to_cpu(nd_label->lbasize);
171 }
172
nsl_set_lbasize(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u64 lbasize)173 static inline void nsl_set_lbasize(struct nvdimm_drvdata *ndd,
174 struct nd_namespace_label *nd_label,
175 u64 lbasize)
176 {
177 nd_label->lbasize = __cpu_to_le64(lbasize);
178 }
179
180 bool nsl_validate_blk_isetcookie(struct nvdimm_drvdata *ndd,
181 struct nd_namespace_label *nd_label,
182 u64 isetcookie);
183 bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
184 struct nd_namespace_label *nd_label, guid_t *guid);
185 enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
186 struct nd_namespace_label *nd_label);
187
188 struct nd_region_data {
189 int ns_count;
190 int ns_active;
191 unsigned int hints_shift;
192 void __iomem *flush_wpq[];
193 };
194
ndrd_get_flush_wpq(struct nd_region_data * ndrd,int dimm,int hint)195 static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
196 int dimm, int hint)
197 {
198 unsigned int num = 1 << ndrd->hints_shift;
199 unsigned int mask = num - 1;
200
201 return ndrd->flush_wpq[dimm * num + (hint & mask)];
202 }
203
ndrd_set_flush_wpq(struct nd_region_data * ndrd,int dimm,int hint,void __iomem * flush)204 static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
205 int hint, void __iomem *flush)
206 {
207 unsigned int num = 1 << ndrd->hints_shift;
208 unsigned int mask = num - 1;
209
210 ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
211 }
212
to_namespace_index(struct nvdimm_drvdata * ndd,int i)213 static inline struct nd_namespace_index *to_namespace_index(
214 struct nvdimm_drvdata *ndd, int i)
215 {
216 if (i < 0)
217 return NULL;
218
219 return ndd->data + sizeof_namespace_index(ndd) * i;
220 }
221
to_current_namespace_index(struct nvdimm_drvdata * ndd)222 static inline struct nd_namespace_index *to_current_namespace_index(
223 struct nvdimm_drvdata *ndd)
224 {
225 return to_namespace_index(ndd, ndd->ns_current);
226 }
227
to_next_namespace_index(struct nvdimm_drvdata * ndd)228 static inline struct nd_namespace_index *to_next_namespace_index(
229 struct nvdimm_drvdata *ndd)
230 {
231 return to_namespace_index(ndd, ndd->ns_next);
232 }
233
234 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
235
236 #define namespace_label_has(ndd, field) \
237 (offsetof(struct nd_namespace_label, field) \
238 < sizeof_namespace_label(ndd))
239
240 #define nd_dbg_dpa(r, d, res, fmt, arg...) \
241 dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
242 (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
243 (unsigned long long) (res ? resource_size(res) : 0), \
244 (unsigned long long) (res ? res->start : 0), ##arg)
245
246 #define for_each_dpa_resource(ndd, res) \
247 for (res = (ndd)->dpa.child; res; res = res->sibling)
248
249 #define for_each_dpa_resource_safe(ndd, res, next) \
250 for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
251 res; res = next, next = next ? next->sibling : NULL)
252
253 struct nd_percpu_lane {
254 int count;
255 spinlock_t lock;
256 };
257
258 enum nd_label_flags {
259 ND_LABEL_REAP,
260 };
261 struct nd_label_ent {
262 struct list_head list;
263 unsigned long flags;
264 struct nd_namespace_label *label;
265 };
266
267 enum nd_mapping_lock_class {
268 ND_MAPPING_CLASS0,
269 ND_MAPPING_UUID_SCAN,
270 };
271
272 struct nd_mapping {
273 struct nvdimm *nvdimm;
274 u64 start;
275 u64 size;
276 int position;
277 struct list_head labels;
278 struct mutex lock;
279 /*
280 * @ndd is for private use at region enable / disable time for
281 * get_ndd() + put_ndd(), all other nd_mapping to ndd
282 * conversions use to_ndd() which respects enabled state of the
283 * nvdimm.
284 */
285 struct nvdimm_drvdata *ndd;
286 };
287
288 struct nd_region {
289 struct device dev;
290 struct ida ns_ida;
291 struct ida btt_ida;
292 struct ida pfn_ida;
293 struct ida dax_ida;
294 unsigned long flags;
295 struct device *ns_seed;
296 struct device *btt_seed;
297 struct device *pfn_seed;
298 struct device *dax_seed;
299 unsigned long align;
300 u16 ndr_mappings;
301 u64 ndr_size;
302 u64 ndr_start;
303 int id, num_lanes, ro, numa_node, target_node;
304 void *provider_data;
305 struct kernfs_node *bb_state;
306 struct badblocks bb;
307 struct nd_interleave_set *nd_set;
308 struct nd_percpu_lane __percpu *lane;
309 int (*flush)(struct nd_region *nd_region, struct bio *bio);
310 struct nd_mapping mapping[];
311 };
312
313 struct nd_blk_region {
314 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
315 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
316 void *iobuf, u64 len, int rw);
317 void *blk_provider_data;
318 struct nd_region nd_region;
319 };
320
321 /*
322 * Lookup next in the repeating sequence of 01, 10, and 11.
323 */
nd_inc_seq(unsigned seq)324 static inline unsigned nd_inc_seq(unsigned seq)
325 {
326 static const unsigned next[] = { 0, 2, 3, 1 };
327
328 return next[seq & 3];
329 }
330
331 struct btt;
332 struct nd_btt {
333 struct device dev;
334 struct nd_namespace_common *ndns;
335 struct btt *btt;
336 unsigned long lbasize;
337 u64 size;
338 u8 *uuid;
339 int id;
340 int initial_offset;
341 u16 version_major;
342 u16 version_minor;
343 };
344
345 enum nd_pfn_mode {
346 PFN_MODE_NONE,
347 PFN_MODE_RAM,
348 PFN_MODE_PMEM,
349 };
350
351 struct nd_pfn {
352 int id;
353 u8 *uuid;
354 struct device dev;
355 unsigned long align;
356 unsigned long npfns;
357 enum nd_pfn_mode mode;
358 struct nd_pfn_sb *pfn_sb;
359 struct nd_namespace_common *ndns;
360 };
361
362 struct nd_dax {
363 struct nd_pfn nd_pfn;
364 };
365
nd_info_block_reserve(void)366 static inline u32 nd_info_block_reserve(void)
367 {
368 return ALIGN(SZ_8K, PAGE_SIZE);
369 }
370
371 enum nd_async_mode {
372 ND_SYNC,
373 ND_ASYNC,
374 };
375
376 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
377 void wait_nvdimm_bus_probe_idle(struct device *dev);
378 void nd_device_register(struct device *dev);
379 void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
380 void nd_device_notify(struct device *dev, enum nvdimm_event event);
381 int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
382 size_t len);
383 ssize_t nd_size_select_show(unsigned long current_size,
384 const unsigned long *supported, char *buf);
385 ssize_t nd_size_select_store(struct device *dev, const char *buf,
386 unsigned long *current_size, const unsigned long *supported);
387 int __init nvdimm_init(void);
388 int __init nd_region_init(void);
389 int __init nd_label_init(void);
390 void nvdimm_exit(void);
391 void nd_region_exit(void);
392 struct nvdimm;
393 extern const struct attribute_group nd_device_attribute_group;
394 extern const struct attribute_group nd_numa_attribute_group;
395 extern const struct attribute_group *nvdimm_bus_attribute_groups[];
396 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
397 int nvdimm_check_config_data(struct device *dev);
398 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
399 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
400 int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
401 size_t offset, size_t len);
402 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
403 void *buf, size_t len);
404 long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
405 unsigned int len);
406 void nvdimm_set_labeling(struct device *dev);
407 void nvdimm_set_locked(struct device *dev);
408 void nvdimm_clear_locked(struct device *dev);
409 int nvdimm_security_setup_events(struct device *dev);
410 #if IS_ENABLED(CONFIG_NVDIMM_KEYS)
411 int nvdimm_security_unlock(struct device *dev);
412 #else
nvdimm_security_unlock(struct device * dev)413 static inline int nvdimm_security_unlock(struct device *dev)
414 {
415 return 0;
416 }
417 #endif
418 struct nd_btt *to_nd_btt(struct device *dev);
419
420 struct nd_gen_sb {
421 char reserved[SZ_4K - 8];
422 __le64 checksum;
423 };
424
425 u64 nd_sb_checksum(struct nd_gen_sb *sb);
426 #if IS_ENABLED(CONFIG_BTT)
427 int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
428 bool is_nd_btt(struct device *dev);
429 struct device *nd_btt_create(struct nd_region *nd_region);
430 #else
nd_btt_probe(struct device * dev,struct nd_namespace_common * ndns)431 static inline int nd_btt_probe(struct device *dev,
432 struct nd_namespace_common *ndns)
433 {
434 return -ENODEV;
435 }
436
is_nd_btt(struct device * dev)437 static inline bool is_nd_btt(struct device *dev)
438 {
439 return false;
440 }
441
nd_btt_create(struct nd_region * nd_region)442 static inline struct device *nd_btt_create(struct nd_region *nd_region)
443 {
444 return NULL;
445 }
446 #endif
447
448 struct nd_pfn *to_nd_pfn(struct device *dev);
449 #if IS_ENABLED(CONFIG_NVDIMM_PFN)
450
451 #define MAX_NVDIMM_ALIGN 4
452
453 int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
454 bool is_nd_pfn(struct device *dev);
455 struct device *nd_pfn_create(struct nd_region *nd_region);
456 struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
457 struct nd_namespace_common *ndns);
458 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
459 extern const struct attribute_group *nd_pfn_attribute_groups[];
460 #else
nd_pfn_probe(struct device * dev,struct nd_namespace_common * ndns)461 static inline int nd_pfn_probe(struct device *dev,
462 struct nd_namespace_common *ndns)
463 {
464 return -ENODEV;
465 }
466
is_nd_pfn(struct device * dev)467 static inline bool is_nd_pfn(struct device *dev)
468 {
469 return false;
470 }
471
nd_pfn_create(struct nd_region * nd_region)472 static inline struct device *nd_pfn_create(struct nd_region *nd_region)
473 {
474 return NULL;
475 }
476
nd_pfn_validate(struct nd_pfn * nd_pfn,const char * sig)477 static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
478 {
479 return -ENODEV;
480 }
481 #endif
482
483 struct nd_dax *to_nd_dax(struct device *dev);
484 #if IS_ENABLED(CONFIG_NVDIMM_DAX)
485 int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
486 bool is_nd_dax(struct device *dev);
487 struct device *nd_dax_create(struct nd_region *nd_region);
488 #else
nd_dax_probe(struct device * dev,struct nd_namespace_common * ndns)489 static inline int nd_dax_probe(struct device *dev,
490 struct nd_namespace_common *ndns)
491 {
492 return -ENODEV;
493 }
494
is_nd_dax(struct device * dev)495 static inline bool is_nd_dax(struct device *dev)
496 {
497 return false;
498 }
499
nd_dax_create(struct nd_region * nd_region)500 static inline struct device *nd_dax_create(struct nd_region *nd_region)
501 {
502 return NULL;
503 }
504 #endif
505
506 int nd_region_to_nstype(struct nd_region *nd_region);
507 int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
508 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
509 struct nd_namespace_index *nsindex);
510 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
511 void nvdimm_bus_lock(struct device *dev);
512 void nvdimm_bus_unlock(struct device *dev);
513 bool is_nvdimm_bus_locked(struct device *dev);
514 void nvdimm_check_and_set_ro(struct gendisk *disk);
515 void nvdimm_drvdata_release(struct kref *kref);
516 void put_ndd(struct nvdimm_drvdata *ndd);
517 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
518 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res);
519 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
520 struct nd_label_id *label_id, resource_size_t start,
521 resource_size_t n);
522 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
523 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
524 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
525 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
526 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
527 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
528 char *name);
529 unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
530 struct range;
531 void nvdimm_badblocks_populate(struct nd_region *nd_region,
532 struct badblocks *bb, const struct range *range);
533 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
534 resource_size_t size);
535 void devm_namespace_disable(struct device *dev,
536 struct nd_namespace_common *ndns);
537 #if IS_ENABLED(CONFIG_ND_CLAIM)
538 /* max struct page size independent of kernel config */
539 #define MAX_STRUCT_PAGE_SIZE 64
540 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
541 #else
nvdimm_setup_pfn(struct nd_pfn * nd_pfn,struct dev_pagemap * pgmap)542 static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
543 struct dev_pagemap *pgmap)
544 {
545 return -ENXIO;
546 }
547 #endif
548 int nd_blk_region_init(struct nd_region *nd_region);
549 int nd_region_activate(struct nd_region *nd_region);
is_bad_pmem(struct badblocks * bb,sector_t sector,unsigned int len)550 static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
551 unsigned int len)
552 {
553 if (bb->count) {
554 sector_t first_bad;
555 int num_bad;
556
557 return !!badblocks_check(bb, sector, len / 512, &first_bad,
558 &num_bad);
559 }
560
561 return false;
562 }
563 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
564 const u8 *nd_dev_to_uuid(struct device *dev);
565 bool pmem_should_map_pages(struct device *dev);
566 #endif /* __ND_H__ */
567