1 /*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/libnvdimm.h>
14 #include <linux/badblocks.h>
15 #include <linux/export.h>
16 #include <linux/module.h>
17 #include <linux/blkdev.h>
18 #include <linux/device.h>
19 #include <linux/ctype.h>
20 #include <linux/ndctl.h>
21 #include <linux/mutex.h>
22 #include <linux/slab.h>
23 #include <linux/io.h>
24 #include "nd-core.h"
25 #include "nd.h"
26
27 LIST_HEAD(nvdimm_bus_list);
28 DEFINE_MUTEX(nvdimm_bus_list_mutex);
29
nvdimm_bus_lock(struct device * dev)30 void nvdimm_bus_lock(struct device *dev)
31 {
32 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
33
34 if (!nvdimm_bus)
35 return;
36 mutex_lock(&nvdimm_bus->reconfig_mutex);
37 }
38 EXPORT_SYMBOL(nvdimm_bus_lock);
39
nvdimm_bus_unlock(struct device * dev)40 void nvdimm_bus_unlock(struct device *dev)
41 {
42 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
43
44 if (!nvdimm_bus)
45 return;
46 mutex_unlock(&nvdimm_bus->reconfig_mutex);
47 }
48 EXPORT_SYMBOL(nvdimm_bus_unlock);
49
is_nvdimm_bus_locked(struct device * dev)50 bool is_nvdimm_bus_locked(struct device *dev)
51 {
52 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
53
54 if (!nvdimm_bus)
55 return false;
56 return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
57 }
58 EXPORT_SYMBOL(is_nvdimm_bus_locked);
59
60 struct nvdimm_map {
61 struct nvdimm_bus *nvdimm_bus;
62 struct list_head list;
63 resource_size_t offset;
64 unsigned long flags;
65 size_t size;
66 union {
67 void *mem;
68 void __iomem *iomem;
69 };
70 struct kref kref;
71 };
72
find_nvdimm_map(struct device * dev,resource_size_t offset)73 static struct nvdimm_map *find_nvdimm_map(struct device *dev,
74 resource_size_t offset)
75 {
76 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
77 struct nvdimm_map *nvdimm_map;
78
79 list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
80 if (nvdimm_map->offset == offset)
81 return nvdimm_map;
82 return NULL;
83 }
84
alloc_nvdimm_map(struct device * dev,resource_size_t offset,size_t size,unsigned long flags)85 static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
86 resource_size_t offset, size_t size, unsigned long flags)
87 {
88 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
89 struct nvdimm_map *nvdimm_map;
90
91 nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
92 if (!nvdimm_map)
93 return NULL;
94
95 INIT_LIST_HEAD(&nvdimm_map->list);
96 nvdimm_map->nvdimm_bus = nvdimm_bus;
97 nvdimm_map->offset = offset;
98 nvdimm_map->flags = flags;
99 nvdimm_map->size = size;
100 kref_init(&nvdimm_map->kref);
101
102 if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
103 dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
104 &offset, size, dev_name(dev));
105 goto err_request_region;
106 }
107
108 if (flags)
109 nvdimm_map->mem = memremap(offset, size, flags);
110 else
111 nvdimm_map->iomem = ioremap(offset, size);
112
113 if (!nvdimm_map->mem)
114 goto err_map;
115
116 dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
117 __func__);
118 list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
119
120 return nvdimm_map;
121
122 err_map:
123 release_mem_region(offset, size);
124 err_request_region:
125 kfree(nvdimm_map);
126 return NULL;
127 }
128
nvdimm_map_release(struct kref * kref)129 static void nvdimm_map_release(struct kref *kref)
130 {
131 struct nvdimm_bus *nvdimm_bus;
132 struct nvdimm_map *nvdimm_map;
133
134 nvdimm_map = container_of(kref, struct nvdimm_map, kref);
135 nvdimm_bus = nvdimm_map->nvdimm_bus;
136
137 dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset);
138 list_del(&nvdimm_map->list);
139 if (nvdimm_map->flags)
140 memunmap(nvdimm_map->mem);
141 else
142 iounmap(nvdimm_map->iomem);
143 release_mem_region(nvdimm_map->offset, nvdimm_map->size);
144 kfree(nvdimm_map);
145 }
146
nvdimm_map_put(void * data)147 static void nvdimm_map_put(void *data)
148 {
149 struct nvdimm_map *nvdimm_map = data;
150 struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
151
152 nvdimm_bus_lock(&nvdimm_bus->dev);
153 kref_put(&nvdimm_map->kref, nvdimm_map_release);
154 nvdimm_bus_unlock(&nvdimm_bus->dev);
155 }
156
157 /**
158 * devm_nvdimm_memremap - map a resource that is shared across regions
159 * @dev: device that will own a reference to the shared mapping
160 * @offset: physical base address of the mapping
161 * @size: mapping size
162 * @flags: memremap flags, or, if zero, perform an ioremap instead
163 */
devm_nvdimm_memremap(struct device * dev,resource_size_t offset,size_t size,unsigned long flags)164 void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
165 size_t size, unsigned long flags)
166 {
167 struct nvdimm_map *nvdimm_map;
168
169 nvdimm_bus_lock(dev);
170 nvdimm_map = find_nvdimm_map(dev, offset);
171 if (!nvdimm_map)
172 nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
173 else
174 kref_get(&nvdimm_map->kref);
175 nvdimm_bus_unlock(dev);
176
177 if (!nvdimm_map)
178 return NULL;
179
180 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
181 return NULL;
182
183 return nvdimm_map->mem;
184 }
185 EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
186
nd_fletcher64(void * addr,size_t len,bool le)187 u64 nd_fletcher64(void *addr, size_t len, bool le)
188 {
189 u32 *buf = addr;
190 u32 lo32 = 0;
191 u64 hi32 = 0;
192 int i;
193
194 for (i = 0; i < len / sizeof(u32); i++) {
195 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
196 hi32 += lo32;
197 }
198
199 return hi32 << 32 | lo32;
200 }
201 EXPORT_SYMBOL_GPL(nd_fletcher64);
202
to_nd_desc(struct nvdimm_bus * nvdimm_bus)203 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
204 {
205 /* struct nvdimm_bus definition is private to libnvdimm */
206 return nvdimm_bus->nd_desc;
207 }
208 EXPORT_SYMBOL_GPL(to_nd_desc);
209
to_nvdimm_bus_dev(struct nvdimm_bus * nvdimm_bus)210 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
211 {
212 /* struct nvdimm_bus definition is private to libnvdimm */
213 return &nvdimm_bus->dev;
214 }
215 EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
216
is_uuid_sep(char sep)217 static bool is_uuid_sep(char sep)
218 {
219 if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
220 return true;
221 return false;
222 }
223
nd_uuid_parse(struct device * dev,u8 * uuid_out,const char * buf,size_t len)224 static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
225 size_t len)
226 {
227 const char *str = buf;
228 u8 uuid[16];
229 int i;
230
231 for (i = 0; i < 16; i++) {
232 if (!isxdigit(str[0]) || !isxdigit(str[1])) {
233 dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n",
234 __func__, i, str - buf, str[0],
235 str + 1 - buf, str[1]);
236 return -EINVAL;
237 }
238
239 uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]);
240 str += 2;
241 if (is_uuid_sep(*str))
242 str++;
243 }
244
245 memcpy(uuid_out, uuid, sizeof(uuid));
246 return 0;
247 }
248
249 /**
250 * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
251 * @dev: container device for the uuid property
252 * @uuid_out: uuid buffer to replace
253 * @buf: raw sysfs buffer to parse
254 *
255 * Enforce that uuids can only be changed while the device is disabled
256 * (driver detached)
257 * LOCKING: expects device_lock() is held on entry
258 */
nd_uuid_store(struct device * dev,u8 ** uuid_out,const char * buf,size_t len)259 int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
260 size_t len)
261 {
262 u8 uuid[16];
263 int rc;
264
265 if (dev->driver)
266 return -EBUSY;
267
268 rc = nd_uuid_parse(dev, uuid, buf, len);
269 if (rc)
270 return rc;
271
272 kfree(*uuid_out);
273 *uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
274 if (!(*uuid_out))
275 return -ENOMEM;
276
277 return 0;
278 }
279
nd_sector_size_show(unsigned long current_lbasize,const unsigned long * supported,char * buf)280 ssize_t nd_sector_size_show(unsigned long current_lbasize,
281 const unsigned long *supported, char *buf)
282 {
283 ssize_t len = 0;
284 int i;
285
286 for (i = 0; supported[i]; i++)
287 if (current_lbasize == supported[i])
288 len += sprintf(buf + len, "[%ld] ", supported[i]);
289 else
290 len += sprintf(buf + len, "%ld ", supported[i]);
291 len += sprintf(buf + len, "\n");
292 return len;
293 }
294
nd_sector_size_store(struct device * dev,const char * buf,unsigned long * current_lbasize,const unsigned long * supported)295 ssize_t nd_sector_size_store(struct device *dev, const char *buf,
296 unsigned long *current_lbasize, const unsigned long *supported)
297 {
298 unsigned long lbasize;
299 int rc, i;
300
301 if (dev->driver)
302 return -EBUSY;
303
304 rc = kstrtoul(buf, 0, &lbasize);
305 if (rc)
306 return rc;
307
308 for (i = 0; supported[i]; i++)
309 if (lbasize == supported[i])
310 break;
311
312 if (supported[i]) {
313 *current_lbasize = lbasize;
314 return 0;
315 } else {
316 return -EINVAL;
317 }
318 }
319
__nd_iostat_start(struct bio * bio,unsigned long * start)320 void __nd_iostat_start(struct bio *bio, unsigned long *start)
321 {
322 struct gendisk *disk = bio->bi_bdev->bd_disk;
323 const int rw = bio_data_dir(bio);
324 int cpu = part_stat_lock();
325
326 *start = jiffies;
327 part_round_stats(cpu, &disk->part0);
328 part_stat_inc(cpu, &disk->part0, ios[rw]);
329 part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
330 part_inc_in_flight(&disk->part0, rw);
331 part_stat_unlock();
332 }
333 EXPORT_SYMBOL(__nd_iostat_start);
334
nd_iostat_end(struct bio * bio,unsigned long start)335 void nd_iostat_end(struct bio *bio, unsigned long start)
336 {
337 struct gendisk *disk = bio->bi_bdev->bd_disk;
338 unsigned long duration = jiffies - start;
339 const int rw = bio_data_dir(bio);
340 int cpu = part_stat_lock();
341
342 part_stat_add(cpu, &disk->part0, ticks[rw], duration);
343 part_round_stats(cpu, &disk->part0);
344 part_dec_in_flight(&disk->part0, rw);
345 part_stat_unlock();
346 }
347 EXPORT_SYMBOL(nd_iostat_end);
348
commands_show(struct device * dev,struct device_attribute * attr,char * buf)349 static ssize_t commands_show(struct device *dev,
350 struct device_attribute *attr, char *buf)
351 {
352 int cmd, len = 0;
353 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
354 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
355
356 for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
357 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
358 len += sprintf(buf + len, "\n");
359 return len;
360 }
361 static DEVICE_ATTR_RO(commands);
362
nvdimm_bus_provider(struct nvdimm_bus * nvdimm_bus)363 static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
364 {
365 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
366 struct device *parent = nvdimm_bus->dev.parent;
367
368 if (nd_desc->provider_name)
369 return nd_desc->provider_name;
370 else if (parent)
371 return dev_name(parent);
372 else
373 return "unknown";
374 }
375
provider_show(struct device * dev,struct device_attribute * attr,char * buf)376 static ssize_t provider_show(struct device *dev,
377 struct device_attribute *attr, char *buf)
378 {
379 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
380
381 return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
382 }
383 static DEVICE_ATTR_RO(provider);
384
flush_namespaces(struct device * dev,void * data)385 static int flush_namespaces(struct device *dev, void *data)
386 {
387 device_lock(dev);
388 device_unlock(dev);
389 return 0;
390 }
391
flush_regions_dimms(struct device * dev,void * data)392 static int flush_regions_dimms(struct device *dev, void *data)
393 {
394 device_lock(dev);
395 device_unlock(dev);
396 device_for_each_child(dev, NULL, flush_namespaces);
397 return 0;
398 }
399
wait_probe_show(struct device * dev,struct device_attribute * attr,char * buf)400 static ssize_t wait_probe_show(struct device *dev,
401 struct device_attribute *attr, char *buf)
402 {
403 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
404 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
405 int rc;
406
407 if (nd_desc->flush_probe) {
408 rc = nd_desc->flush_probe(nd_desc);
409 if (rc)
410 return rc;
411 }
412 nd_synchronize();
413 device_for_each_child(dev, NULL, flush_regions_dimms);
414 return sprintf(buf, "1\n");
415 }
416 static DEVICE_ATTR_RO(wait_probe);
417
418 static struct attribute *nvdimm_bus_attributes[] = {
419 &dev_attr_commands.attr,
420 &dev_attr_wait_probe.attr,
421 &dev_attr_provider.attr,
422 NULL,
423 };
424
425 struct attribute_group nvdimm_bus_attribute_group = {
426 .attrs = nvdimm_bus_attributes,
427 };
428 EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
429
set_badblock(struct badblocks * bb,sector_t s,int num)430 static void set_badblock(struct badblocks *bb, sector_t s, int num)
431 {
432 dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n",
433 (u64) s * 512, (u64) num * 512);
434 /* this isn't an error as the hardware will still throw an exception */
435 if (badblocks_set(bb, s, num, 1))
436 dev_info_once(bb->dev, "%s: failed for sector %llx\n",
437 __func__, (u64) s);
438 }
439
440 /**
441 * __add_badblock_range() - Convert a physical address range to bad sectors
442 * @bb: badblocks instance to populate
443 * @ns_offset: namespace offset where the error range begins (in bytes)
444 * @len: number of bytes of poison to be added
445 *
446 * This assumes that the range provided with (ns_offset, len) is within
447 * the bounds of physical addresses for this namespace, i.e. lies in the
448 * interval [ns_start, ns_start + ns_size)
449 */
__add_badblock_range(struct badblocks * bb,u64 ns_offset,u64 len)450 static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
451 {
452 const unsigned int sector_size = 512;
453 sector_t start_sector, end_sector;
454 u64 num_sectors;
455 u32 rem;
456
457 start_sector = div_u64(ns_offset, sector_size);
458 end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
459 if (rem)
460 end_sector++;
461 num_sectors = end_sector - start_sector;
462
463 if (unlikely(num_sectors > (u64)INT_MAX)) {
464 u64 remaining = num_sectors;
465 sector_t s = start_sector;
466
467 while (remaining) {
468 int done = min_t(u64, remaining, INT_MAX);
469
470 set_badblock(bb, s, done);
471 remaining -= done;
472 s += done;
473 }
474 } else
475 set_badblock(bb, start_sector, num_sectors);
476 }
477
badblocks_populate(struct list_head * poison_list,struct badblocks * bb,const struct resource * res)478 static void badblocks_populate(struct list_head *poison_list,
479 struct badblocks *bb, const struct resource *res)
480 {
481 struct nd_poison *pl;
482
483 if (list_empty(poison_list))
484 return;
485
486 list_for_each_entry(pl, poison_list, list) {
487 u64 pl_end = pl->start + pl->length - 1;
488
489 /* Discard intervals with no intersection */
490 if (pl_end < res->start)
491 continue;
492 if (pl->start > res->end)
493 continue;
494 /* Deal with any overlap after start of the namespace */
495 if (pl->start >= res->start) {
496 u64 start = pl->start;
497 u64 len;
498
499 if (pl_end <= res->end)
500 len = pl->length;
501 else
502 len = res->start + resource_size(res)
503 - pl->start;
504 __add_badblock_range(bb, start - res->start, len);
505 continue;
506 }
507 /* Deal with overlap for poison starting before the namespace */
508 if (pl->start < res->start) {
509 u64 len;
510
511 if (pl_end < res->end)
512 len = pl->start + pl->length - res->start;
513 else
514 len = resource_size(res);
515 __add_badblock_range(bb, 0, len);
516 }
517 }
518 }
519
520 /**
521 * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
522 * @region: parent region of the range to interrogate
523 * @bb: badblocks instance to populate
524 * @res: resource range to consider
525 *
526 * The poison list generated during bus initialization may contain
527 * multiple, possibly overlapping physical address ranges. Compare each
528 * of these ranges to the resource range currently being initialized,
529 * and add badblocks entries for all matching sub-ranges
530 */
nvdimm_badblocks_populate(struct nd_region * nd_region,struct badblocks * bb,const struct resource * res)531 void nvdimm_badblocks_populate(struct nd_region *nd_region,
532 struct badblocks *bb, const struct resource *res)
533 {
534 struct nvdimm_bus *nvdimm_bus;
535 struct list_head *poison_list;
536
537 if (!is_nd_pmem(&nd_region->dev)) {
538 dev_WARN_ONCE(&nd_region->dev, 1,
539 "%s only valid for pmem regions\n", __func__);
540 return;
541 }
542 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
543 poison_list = &nvdimm_bus->poison_list;
544
545 nvdimm_bus_lock(&nvdimm_bus->dev);
546 badblocks_populate(poison_list, bb, res);
547 nvdimm_bus_unlock(&nvdimm_bus->dev);
548 }
549 EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
550
add_poison(struct nvdimm_bus * nvdimm_bus,u64 addr,u64 length,gfp_t flags)551 static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length,
552 gfp_t flags)
553 {
554 struct nd_poison *pl;
555
556 pl = kzalloc(sizeof(*pl), flags);
557 if (!pl)
558 return -ENOMEM;
559
560 pl->start = addr;
561 pl->length = length;
562 list_add_tail(&pl->list, &nvdimm_bus->poison_list);
563
564 return 0;
565 }
566
bus_add_poison(struct nvdimm_bus * nvdimm_bus,u64 addr,u64 length)567 static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
568 {
569 struct nd_poison *pl;
570
571 if (list_empty(&nvdimm_bus->poison_list))
572 return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
573
574 /*
575 * There is a chance this is a duplicate, check for those first.
576 * This will be the common case as ARS_STATUS returns all known
577 * errors in the SPA space, and we can't query it per region
578 */
579 list_for_each_entry(pl, &nvdimm_bus->poison_list, list)
580 if (pl->start == addr) {
581 /* If length has changed, update this list entry */
582 if (pl->length != length)
583 pl->length = length;
584 return 0;
585 }
586
587 /*
588 * If not a duplicate or a simple length update, add the entry as is,
589 * as any overlapping ranges will get resolved when the list is consumed
590 * and converted to badblocks
591 */
592 return add_poison(nvdimm_bus, addr, length, GFP_KERNEL);
593 }
594
nvdimm_bus_add_poison(struct nvdimm_bus * nvdimm_bus,u64 addr,u64 length)595 int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
596 {
597 int rc;
598
599 nvdimm_bus_lock(&nvdimm_bus->dev);
600 rc = bus_add_poison(nvdimm_bus, addr, length);
601 nvdimm_bus_unlock(&nvdimm_bus->dev);
602
603 return rc;
604 }
605 EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
606
nvdimm_clear_from_poison_list(struct nvdimm_bus * nvdimm_bus,phys_addr_t start,unsigned int len)607 void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
608 phys_addr_t start, unsigned int len)
609 {
610 struct list_head *poison_list = &nvdimm_bus->poison_list;
611 u64 clr_end = start + len - 1;
612 struct nd_poison *pl, *next;
613
614 nvdimm_bus_lock(&nvdimm_bus->dev);
615 WARN_ON_ONCE(list_empty(poison_list));
616
617 /*
618 * [start, clr_end] is the poison interval being cleared.
619 * [pl->start, pl_end] is the poison_list entry we're comparing
620 * the above interval against. The poison list entry may need
621 * to be modified (update either start or length), deleted, or
622 * split into two based on the overlap characteristics
623 */
624
625 list_for_each_entry_safe(pl, next, poison_list, list) {
626 u64 pl_end = pl->start + pl->length - 1;
627
628 /* Skip intervals with no intersection */
629 if (pl_end < start)
630 continue;
631 if (pl->start > clr_end)
632 continue;
633 /* Delete completely overlapped poison entries */
634 if ((pl->start >= start) && (pl_end <= clr_end)) {
635 list_del(&pl->list);
636 kfree(pl);
637 continue;
638 }
639 /* Adjust start point of partially cleared entries */
640 if ((start <= pl->start) && (clr_end > pl->start)) {
641 pl->length -= clr_end - pl->start + 1;
642 pl->start = clr_end + 1;
643 continue;
644 }
645 /* Adjust pl->length for partial clearing at the tail end */
646 if ((pl->start < start) && (pl_end <= clr_end)) {
647 /* pl->start remains the same */
648 pl->length = start - pl->start;
649 continue;
650 }
651 /*
652 * If clearing in the middle of an entry, we split it into
653 * two by modifying the current entry to represent one half of
654 * the split, and adding a new entry for the second half.
655 */
656 if ((pl->start < start) && (pl_end > clr_end)) {
657 u64 new_start = clr_end + 1;
658 u64 new_len = pl_end - new_start + 1;
659
660 /* Add new entry covering the right half */
661 add_poison(nvdimm_bus, new_start, new_len, GFP_NOIO);
662 /* Adjust this entry to cover the left half */
663 pl->length = start - pl->start;
664 continue;
665 }
666 }
667 nvdimm_bus_unlock(&nvdimm_bus->dev);
668 }
669 EXPORT_SYMBOL_GPL(nvdimm_clear_from_poison_list);
670
671 #ifdef CONFIG_BLK_DEV_INTEGRITY
nd_integrity_init(struct gendisk * disk,unsigned long meta_size)672 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
673 {
674 struct blk_integrity bi;
675
676 if (meta_size == 0)
677 return 0;
678
679 memset(&bi, 0, sizeof(bi));
680
681 bi.tuple_size = meta_size;
682 bi.tag_size = meta_size;
683
684 blk_integrity_register(disk, &bi);
685 blk_queue_max_integrity_segments(disk->queue, 1);
686
687 return 0;
688 }
689 EXPORT_SYMBOL(nd_integrity_init);
690
691 #else /* CONFIG_BLK_DEV_INTEGRITY */
nd_integrity_init(struct gendisk * disk,unsigned long meta_size)692 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
693 {
694 return 0;
695 }
696 EXPORT_SYMBOL(nd_integrity_init);
697
698 #endif
699
libnvdimm_init(void)700 static __init int libnvdimm_init(void)
701 {
702 int rc;
703
704 rc = nvdimm_bus_init();
705 if (rc)
706 return rc;
707 rc = nvdimm_init();
708 if (rc)
709 goto err_dimm;
710 rc = nd_region_init();
711 if (rc)
712 goto err_region;
713 return 0;
714 err_region:
715 nvdimm_exit();
716 err_dimm:
717 nvdimm_bus_exit();
718 return rc;
719 }
720
libnvdimm_exit(void)721 static __exit void libnvdimm_exit(void)
722 {
723 WARN_ON(!list_empty(&nvdimm_bus_list));
724 nd_region_exit();
725 nvdimm_exit();
726 nvdimm_bus_exit();
727 nd_region_devs_exit();
728 nvdimm_devs_exit();
729 }
730
731 MODULE_LICENSE("GPL v2");
732 MODULE_AUTHOR("Intel Corporation");
733 subsys_initcall(libnvdimm_init);
734 module_exit(libnvdimm_exit);
735