1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Core registration and callback routines for MTD
4 * drivers and users.
5 *
6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7 * Copyright © 2006 Red Hat UK Limited
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
17 #include <linux/fs.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/reboot.h>
28 #include <linux/leds.h>
29 #include <linux/debugfs.h>
30 #include <linux/nvmem-provider.h>
31
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/partitions.h>
34
35 #include "mtdcore.h"
36
37 struct backing_dev_info *mtd_bdi;
38
39 #ifdef CONFIG_PM_SLEEP
40
mtd_cls_suspend(struct device * dev)41 static int mtd_cls_suspend(struct device *dev)
42 {
43 struct mtd_info *mtd = dev_get_drvdata(dev);
44
45 return mtd ? mtd_suspend(mtd) : 0;
46 }
47
mtd_cls_resume(struct device * dev)48 static int mtd_cls_resume(struct device *dev)
49 {
50 struct mtd_info *mtd = dev_get_drvdata(dev);
51
52 if (mtd)
53 mtd_resume(mtd);
54 return 0;
55 }
56
57 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
59 #else
60 #define MTD_CLS_PM_OPS NULL
61 #endif
62
63 static struct class mtd_class = {
64 .name = "mtd",
65 .owner = THIS_MODULE,
66 .pm = MTD_CLS_PM_OPS,
67 };
68
69 static DEFINE_IDR(mtd_idr);
70
71 /* These are exported solely for the purpose of mtd_blkdevs.c. You
72 should not use them for _anything_ else */
73 DEFINE_MUTEX(mtd_table_mutex);
74 EXPORT_SYMBOL_GPL(mtd_table_mutex);
75
__mtd_next_device(int i)76 struct mtd_info *__mtd_next_device(int i)
77 {
78 return idr_get_next(&mtd_idr, &i);
79 }
80 EXPORT_SYMBOL_GPL(__mtd_next_device);
81
82 static LIST_HEAD(mtd_notifiers);
83
84
85 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
86
87 /* REVISIT once MTD uses the driver model better, whoever allocates
88 * the mtd_info will probably want to use the release() hook...
89 */
mtd_release(struct device * dev)90 static void mtd_release(struct device *dev)
91 {
92 struct mtd_info *mtd = dev_get_drvdata(dev);
93 dev_t index = MTD_DEVT(mtd->index);
94
95 /* remove /dev/mtdXro node */
96 device_destroy(&mtd_class, index + 1);
97 }
98
mtd_type_show(struct device * dev,struct device_attribute * attr,char * buf)99 static ssize_t mtd_type_show(struct device *dev,
100 struct device_attribute *attr, char *buf)
101 {
102 struct mtd_info *mtd = dev_get_drvdata(dev);
103 char *type;
104
105 switch (mtd->type) {
106 case MTD_ABSENT:
107 type = "absent";
108 break;
109 case MTD_RAM:
110 type = "ram";
111 break;
112 case MTD_ROM:
113 type = "rom";
114 break;
115 case MTD_NORFLASH:
116 type = "nor";
117 break;
118 case MTD_NANDFLASH:
119 type = "nand";
120 break;
121 case MTD_DATAFLASH:
122 type = "dataflash";
123 break;
124 case MTD_UBIVOLUME:
125 type = "ubi";
126 break;
127 case MTD_MLCNANDFLASH:
128 type = "mlc-nand";
129 break;
130 default:
131 type = "unknown";
132 }
133
134 return snprintf(buf, PAGE_SIZE, "%s\n", type);
135 }
136 static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
137
mtd_flags_show(struct device * dev,struct device_attribute * attr,char * buf)138 static ssize_t mtd_flags_show(struct device *dev,
139 struct device_attribute *attr, char *buf)
140 {
141 struct mtd_info *mtd = dev_get_drvdata(dev);
142
143 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
144 }
145 static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
146
mtd_size_show(struct device * dev,struct device_attribute * attr,char * buf)147 static ssize_t mtd_size_show(struct device *dev,
148 struct device_attribute *attr, char *buf)
149 {
150 struct mtd_info *mtd = dev_get_drvdata(dev);
151
152 return snprintf(buf, PAGE_SIZE, "%llu\n",
153 (unsigned long long)mtd->size);
154 }
155 static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
156
mtd_erasesize_show(struct device * dev,struct device_attribute * attr,char * buf)157 static ssize_t mtd_erasesize_show(struct device *dev,
158 struct device_attribute *attr, char *buf)
159 {
160 struct mtd_info *mtd = dev_get_drvdata(dev);
161
162 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
163 }
164 static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
165
mtd_writesize_show(struct device * dev,struct device_attribute * attr,char * buf)166 static ssize_t mtd_writesize_show(struct device *dev,
167 struct device_attribute *attr, char *buf)
168 {
169 struct mtd_info *mtd = dev_get_drvdata(dev);
170
171 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
172 }
173 static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
174
mtd_subpagesize_show(struct device * dev,struct device_attribute * attr,char * buf)175 static ssize_t mtd_subpagesize_show(struct device *dev,
176 struct device_attribute *attr, char *buf)
177 {
178 struct mtd_info *mtd = dev_get_drvdata(dev);
179 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
180
181 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
182 }
183 static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
184
mtd_oobsize_show(struct device * dev,struct device_attribute * attr,char * buf)185 static ssize_t mtd_oobsize_show(struct device *dev,
186 struct device_attribute *attr, char *buf)
187 {
188 struct mtd_info *mtd = dev_get_drvdata(dev);
189
190 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
191 }
192 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
193
mtd_oobavail_show(struct device * dev,struct device_attribute * attr,char * buf)194 static ssize_t mtd_oobavail_show(struct device *dev,
195 struct device_attribute *attr, char *buf)
196 {
197 struct mtd_info *mtd = dev_get_drvdata(dev);
198
199 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail);
200 }
201 static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL);
202
mtd_numeraseregions_show(struct device * dev,struct device_attribute * attr,char * buf)203 static ssize_t mtd_numeraseregions_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
205 {
206 struct mtd_info *mtd = dev_get_drvdata(dev);
207
208 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
209 }
210 static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
211 NULL);
212
mtd_name_show(struct device * dev,struct device_attribute * attr,char * buf)213 static ssize_t mtd_name_show(struct device *dev,
214 struct device_attribute *attr, char *buf)
215 {
216 struct mtd_info *mtd = dev_get_drvdata(dev);
217
218 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
219 }
220 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
221
mtd_ecc_strength_show(struct device * dev,struct device_attribute * attr,char * buf)222 static ssize_t mtd_ecc_strength_show(struct device *dev,
223 struct device_attribute *attr, char *buf)
224 {
225 struct mtd_info *mtd = dev_get_drvdata(dev);
226
227 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
228 }
229 static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
230
mtd_bitflip_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)231 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
232 struct device_attribute *attr,
233 char *buf)
234 {
235 struct mtd_info *mtd = dev_get_drvdata(dev);
236
237 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
238 }
239
mtd_bitflip_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)240 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
241 struct device_attribute *attr,
242 const char *buf, size_t count)
243 {
244 struct mtd_info *mtd = dev_get_drvdata(dev);
245 unsigned int bitflip_threshold;
246 int retval;
247
248 retval = kstrtouint(buf, 0, &bitflip_threshold);
249 if (retval)
250 return retval;
251
252 mtd->bitflip_threshold = bitflip_threshold;
253 return count;
254 }
255 static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
256 mtd_bitflip_threshold_show,
257 mtd_bitflip_threshold_store);
258
mtd_ecc_step_size_show(struct device * dev,struct device_attribute * attr,char * buf)259 static ssize_t mtd_ecc_step_size_show(struct device *dev,
260 struct device_attribute *attr, char *buf)
261 {
262 struct mtd_info *mtd = dev_get_drvdata(dev);
263
264 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
265
266 }
267 static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
268
mtd_ecc_stats_corrected_show(struct device * dev,struct device_attribute * attr,char * buf)269 static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
270 struct device_attribute *attr, char *buf)
271 {
272 struct mtd_info *mtd = dev_get_drvdata(dev);
273 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
274
275 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
276 }
277 static DEVICE_ATTR(corrected_bits, S_IRUGO,
278 mtd_ecc_stats_corrected_show, NULL);
279
mtd_ecc_stats_errors_show(struct device * dev,struct device_attribute * attr,char * buf)280 static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
281 struct device_attribute *attr, char *buf)
282 {
283 struct mtd_info *mtd = dev_get_drvdata(dev);
284 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
285
286 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
287 }
288 static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
289
mtd_badblocks_show(struct device * dev,struct device_attribute * attr,char * buf)290 static ssize_t mtd_badblocks_show(struct device *dev,
291 struct device_attribute *attr, char *buf)
292 {
293 struct mtd_info *mtd = dev_get_drvdata(dev);
294 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
295
296 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
297 }
298 static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
299
mtd_bbtblocks_show(struct device * dev,struct device_attribute * attr,char * buf)300 static ssize_t mtd_bbtblocks_show(struct device *dev,
301 struct device_attribute *attr, char *buf)
302 {
303 struct mtd_info *mtd = dev_get_drvdata(dev);
304 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
305
306 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
307 }
308 static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
309
310 static struct attribute *mtd_attrs[] = {
311 &dev_attr_type.attr,
312 &dev_attr_flags.attr,
313 &dev_attr_size.attr,
314 &dev_attr_erasesize.attr,
315 &dev_attr_writesize.attr,
316 &dev_attr_subpagesize.attr,
317 &dev_attr_oobsize.attr,
318 &dev_attr_oobavail.attr,
319 &dev_attr_numeraseregions.attr,
320 &dev_attr_name.attr,
321 &dev_attr_ecc_strength.attr,
322 &dev_attr_ecc_step_size.attr,
323 &dev_attr_corrected_bits.attr,
324 &dev_attr_ecc_failures.attr,
325 &dev_attr_bad_blocks.attr,
326 &dev_attr_bbt_blocks.attr,
327 &dev_attr_bitflip_threshold.attr,
328 NULL,
329 };
330 ATTRIBUTE_GROUPS(mtd);
331
332 static const struct device_type mtd_devtype = {
333 .name = "mtd",
334 .groups = mtd_groups,
335 .release = mtd_release,
336 };
337
mtd_partid_debug_show(struct seq_file * s,void * p)338 static int mtd_partid_debug_show(struct seq_file *s, void *p)
339 {
340 struct mtd_info *mtd = s->private;
341
342 seq_printf(s, "%s\n", mtd->dbg.partid);
343
344 return 0;
345 }
346
347 DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug);
348
mtd_partname_debug_show(struct seq_file * s,void * p)349 static int mtd_partname_debug_show(struct seq_file *s, void *p)
350 {
351 struct mtd_info *mtd = s->private;
352
353 seq_printf(s, "%s\n", mtd->dbg.partname);
354
355 return 0;
356 }
357
358 DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug);
359
360 static struct dentry *dfs_dir_mtd;
361
mtd_debugfs_populate(struct mtd_info * mtd)362 static void mtd_debugfs_populate(struct mtd_info *mtd)
363 {
364 struct device *dev = &mtd->dev;
365 struct dentry *root;
366
367 if (IS_ERR_OR_NULL(dfs_dir_mtd))
368 return;
369
370 root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
371 mtd->dbg.dfs_dir = root;
372
373 if (mtd->dbg.partid)
374 debugfs_create_file("partid", 0400, root, mtd,
375 &mtd_partid_debug_fops);
376
377 if (mtd->dbg.partname)
378 debugfs_create_file("partname", 0400, root, mtd,
379 &mtd_partname_debug_fops);
380 }
381
382 #ifndef CONFIG_MMU
mtd_mmap_capabilities(struct mtd_info * mtd)383 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
384 {
385 switch (mtd->type) {
386 case MTD_RAM:
387 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
388 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
389 case MTD_ROM:
390 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
391 NOMMU_MAP_READ;
392 default:
393 return NOMMU_MAP_COPY;
394 }
395 }
396 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
397 #endif
398
mtd_reboot_notifier(struct notifier_block * n,unsigned long state,void * cmd)399 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
400 void *cmd)
401 {
402 struct mtd_info *mtd;
403
404 mtd = container_of(n, struct mtd_info, reboot_notifier);
405 mtd->_reboot(mtd);
406
407 return NOTIFY_DONE;
408 }
409
410 /**
411 * mtd_wunit_to_pairing_info - get pairing information of a wunit
412 * @mtd: pointer to new MTD device info structure
413 * @wunit: write unit we are interested in
414 * @info: returned pairing information
415 *
416 * Retrieve pairing information associated to the wunit.
417 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
418 * paired together, and where programming a page may influence the page it is
419 * paired with.
420 * The notion of page is replaced by the term wunit (write-unit) to stay
421 * consistent with the ->writesize field.
422 *
423 * The @wunit argument can be extracted from an absolute offset using
424 * mtd_offset_to_wunit(). @info is filled with the pairing information attached
425 * to @wunit.
426 *
427 * From the pairing info the MTD user can find all the wunits paired with
428 * @wunit using the following loop:
429 *
430 * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
431 * info.pair = i;
432 * mtd_pairing_info_to_wunit(mtd, &info);
433 * ...
434 * }
435 */
mtd_wunit_to_pairing_info(struct mtd_info * mtd,int wunit,struct mtd_pairing_info * info)436 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
437 struct mtd_pairing_info *info)
438 {
439 struct mtd_info *master = mtd_get_master(mtd);
440 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
441
442 if (wunit < 0 || wunit >= npairs)
443 return -EINVAL;
444
445 if (master->pairing && master->pairing->get_info)
446 return master->pairing->get_info(master, wunit, info);
447
448 info->group = 0;
449 info->pair = wunit;
450
451 return 0;
452 }
453 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
454
455 /**
456 * mtd_pairing_info_to_wunit - get wunit from pairing information
457 * @mtd: pointer to new MTD device info structure
458 * @info: pairing information struct
459 *
460 * Returns a positive number representing the wunit associated to the info
461 * struct, or a negative error code.
462 *
463 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
464 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
465 * doc).
466 *
467 * It can also be used to only program the first page of each pair (i.e.
468 * page attached to group 0), which allows one to use an MLC NAND in
469 * software-emulated SLC mode:
470 *
471 * info.group = 0;
472 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
473 * for (info.pair = 0; info.pair < npairs; info.pair++) {
474 * wunit = mtd_pairing_info_to_wunit(mtd, &info);
475 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
476 * mtd->writesize, &retlen, buf + (i * mtd->writesize));
477 * }
478 */
mtd_pairing_info_to_wunit(struct mtd_info * mtd,const struct mtd_pairing_info * info)479 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
480 const struct mtd_pairing_info *info)
481 {
482 struct mtd_info *master = mtd_get_master(mtd);
483 int ngroups = mtd_pairing_groups(master);
484 int npairs = mtd_wunit_per_eb(master) / ngroups;
485
486 if (!info || info->pair < 0 || info->pair >= npairs ||
487 info->group < 0 || info->group >= ngroups)
488 return -EINVAL;
489
490 if (master->pairing && master->pairing->get_wunit)
491 return mtd->pairing->get_wunit(master, info);
492
493 return info->pair;
494 }
495 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
496
497 /**
498 * mtd_pairing_groups - get the number of pairing groups
499 * @mtd: pointer to new MTD device info structure
500 *
501 * Returns the number of pairing groups.
502 *
503 * This number is usually equal to the number of bits exposed by a single
504 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
505 * to iterate over all pages of a given pair.
506 */
mtd_pairing_groups(struct mtd_info * mtd)507 int mtd_pairing_groups(struct mtd_info *mtd)
508 {
509 struct mtd_info *master = mtd_get_master(mtd);
510
511 if (!master->pairing || !master->pairing->ngroups)
512 return 1;
513
514 return master->pairing->ngroups;
515 }
516 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
517
mtd_nvmem_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)518 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
519 void *val, size_t bytes)
520 {
521 struct mtd_info *mtd = priv;
522 size_t retlen;
523 int err;
524
525 err = mtd_read(mtd, offset, bytes, &retlen, val);
526 if (err && err != -EUCLEAN)
527 return err;
528
529 return retlen == bytes ? 0 : -EIO;
530 }
531
mtd_nvmem_add(struct mtd_info * mtd)532 static int mtd_nvmem_add(struct mtd_info *mtd)
533 {
534 struct nvmem_config config = {};
535
536 config.id = -1;
537 config.dev = &mtd->dev;
538 config.name = dev_name(&mtd->dev);
539 config.owner = THIS_MODULE;
540 config.reg_read = mtd_nvmem_reg_read;
541 config.size = mtd->size;
542 config.word_size = 1;
543 config.stride = 1;
544 config.read_only = true;
545 config.root_only = true;
546 config.no_of_node = true;
547 config.priv = mtd;
548
549 mtd->nvmem = nvmem_register(&config);
550 if (IS_ERR(mtd->nvmem)) {
551 /* Just ignore if there is no NVMEM support in the kernel */
552 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
553 mtd->nvmem = NULL;
554 } else {
555 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
556 return PTR_ERR(mtd->nvmem);
557 }
558 }
559
560 return 0;
561 }
562
563 /**
564 * add_mtd_device - register an MTD device
565 * @mtd: pointer to new MTD device info structure
566 *
567 * Add a device to the list of MTD devices present in the system, and
568 * notify each currently active MTD 'user' of its arrival. Returns
569 * zero on success or non-zero on failure.
570 */
571
add_mtd_device(struct mtd_info * mtd)572 int add_mtd_device(struct mtd_info *mtd)
573 {
574 struct mtd_info *master = mtd_get_master(mtd);
575 struct mtd_notifier *not;
576 int i, error;
577
578 /*
579 * May occur, for instance, on buggy drivers which call
580 * mtd_device_parse_register() multiple times on the same master MTD,
581 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
582 */
583 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
584 return -EEXIST;
585
586 BUG_ON(mtd->writesize == 0);
587
588 /*
589 * MTD drivers should implement ->_{write,read}() or
590 * ->_{write,read}_oob(), but not both.
591 */
592 if (WARN_ON((mtd->_write && mtd->_write_oob) ||
593 (mtd->_read && mtd->_read_oob)))
594 return -EINVAL;
595
596 if (WARN_ON((!mtd->erasesize || !master->_erase) &&
597 !(mtd->flags & MTD_NO_ERASE)))
598 return -EINVAL;
599
600 /*
601 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
602 * master is an MLC NAND and has a proper pairing scheme defined.
603 * We also reject masters that implement ->_writev() for now, because
604 * NAND controller drivers don't implement this hook, and adding the
605 * SLC -> MLC address/length conversion to this path is useless if we
606 * don't have a user.
607 */
608 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
609 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
610 !master->pairing || master->_writev))
611 return -EINVAL;
612
613 mutex_lock(&mtd_table_mutex);
614
615 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
616 if (i < 0) {
617 error = i;
618 goto fail_locked;
619 }
620
621 mtd->index = i;
622 mtd->usecount = 0;
623
624 /* default value if not set by driver */
625 if (mtd->bitflip_threshold == 0)
626 mtd->bitflip_threshold = mtd->ecc_strength;
627
628 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
629 int ngroups = mtd_pairing_groups(master);
630
631 mtd->erasesize /= ngroups;
632 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
633 mtd->erasesize;
634 }
635
636 if (is_power_of_2(mtd->erasesize))
637 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
638 else
639 mtd->erasesize_shift = 0;
640
641 if (is_power_of_2(mtd->writesize))
642 mtd->writesize_shift = ffs(mtd->writesize) - 1;
643 else
644 mtd->writesize_shift = 0;
645
646 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
647 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
648
649 /* Some chips always power up locked. Unlock them now */
650 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
651 error = mtd_unlock(mtd, 0, mtd->size);
652 if (error && error != -EOPNOTSUPP)
653 printk(KERN_WARNING
654 "%s: unlock failed, writes may not work\n",
655 mtd->name);
656 /* Ignore unlock failures? */
657 error = 0;
658 }
659
660 /* Caller should have set dev.parent to match the
661 * physical device, if appropriate.
662 */
663 mtd->dev.type = &mtd_devtype;
664 mtd->dev.class = &mtd_class;
665 mtd->dev.devt = MTD_DEVT(i);
666 dev_set_name(&mtd->dev, "mtd%d", i);
667 dev_set_drvdata(&mtd->dev, mtd);
668 of_node_get(mtd_get_of_node(mtd));
669 error = device_register(&mtd->dev);
670 if (error) {
671 put_device(&mtd->dev);
672 goto fail_added;
673 }
674
675 /* Add the nvmem provider */
676 error = mtd_nvmem_add(mtd);
677 if (error)
678 goto fail_nvmem_add;
679
680 mtd_debugfs_populate(mtd);
681
682 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
683 "mtd%dro", i);
684
685 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
686 /* No need to get a refcount on the module containing
687 the notifier, since we hold the mtd_table_mutex */
688 list_for_each_entry(not, &mtd_notifiers, list)
689 not->add(mtd);
690
691 mutex_unlock(&mtd_table_mutex);
692 /* We _know_ we aren't being removed, because
693 our caller is still holding us here. So none
694 of this try_ nonsense, and no bitching about it
695 either. :) */
696 __module_get(THIS_MODULE);
697 return 0;
698
699 fail_nvmem_add:
700 device_unregister(&mtd->dev);
701 fail_added:
702 of_node_put(mtd_get_of_node(mtd));
703 idr_remove(&mtd_idr, i);
704 fail_locked:
705 mutex_unlock(&mtd_table_mutex);
706 return error;
707 }
708
709 /**
710 * del_mtd_device - unregister an MTD device
711 * @mtd: pointer to MTD device info structure
712 *
713 * Remove a device from the list of MTD devices present in the system,
714 * and notify each currently active MTD 'user' of its departure.
715 * Returns zero on success or 1 on failure, which currently will happen
716 * if the requested device does not appear to be present in the list.
717 */
718
del_mtd_device(struct mtd_info * mtd)719 int del_mtd_device(struct mtd_info *mtd)
720 {
721 int ret;
722 struct mtd_notifier *not;
723
724 mutex_lock(&mtd_table_mutex);
725
726 if (idr_find(&mtd_idr, mtd->index) != mtd) {
727 ret = -ENODEV;
728 goto out_error;
729 }
730
731 /* No need to get a refcount on the module containing
732 the notifier, since we hold the mtd_table_mutex */
733 list_for_each_entry(not, &mtd_notifiers, list)
734 not->remove(mtd);
735
736 if (mtd->usecount) {
737 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
738 mtd->index, mtd->name, mtd->usecount);
739 ret = -EBUSY;
740 } else {
741 debugfs_remove_recursive(mtd->dbg.dfs_dir);
742
743 /* Try to remove the NVMEM provider */
744 if (mtd->nvmem)
745 nvmem_unregister(mtd->nvmem);
746
747 device_unregister(&mtd->dev);
748
749 idr_remove(&mtd_idr, mtd->index);
750 of_node_put(mtd_get_of_node(mtd));
751
752 module_put(THIS_MODULE);
753 ret = 0;
754 }
755
756 out_error:
757 mutex_unlock(&mtd_table_mutex);
758 return ret;
759 }
760
761 /*
762 * Set a few defaults based on the parent devices, if not provided by the
763 * driver
764 */
mtd_set_dev_defaults(struct mtd_info * mtd)765 static void mtd_set_dev_defaults(struct mtd_info *mtd)
766 {
767 if (mtd->dev.parent) {
768 if (!mtd->owner && mtd->dev.parent->driver)
769 mtd->owner = mtd->dev.parent->driver->owner;
770 if (!mtd->name)
771 mtd->name = dev_name(mtd->dev.parent);
772 } else {
773 pr_debug("mtd device won't show a device symlink in sysfs\n");
774 }
775
776 INIT_LIST_HEAD(&mtd->partitions);
777 mutex_init(&mtd->master.partitions_lock);
778 }
779
780 /**
781 * mtd_device_parse_register - parse partitions and register an MTD device.
782 *
783 * @mtd: the MTD device to register
784 * @types: the list of MTD partition probes to try, see
785 * 'parse_mtd_partitions()' for more information
786 * @parser_data: MTD partition parser-specific data
787 * @parts: fallback partition information to register, if parsing fails;
788 * only valid if %nr_parts > %0
789 * @nr_parts: the number of partitions in parts, if zero then the full
790 * MTD device is registered if no partition info is found
791 *
792 * This function aggregates MTD partitions parsing (done by
793 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
794 * basically follows the most common pattern found in many MTD drivers:
795 *
796 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
797 * registered first.
798 * * Then It tries to probe partitions on MTD device @mtd using parsers
799 * specified in @types (if @types is %NULL, then the default list of parsers
800 * is used, see 'parse_mtd_partitions()' for more information). If none are
801 * found this functions tries to fallback to information specified in
802 * @parts/@nr_parts.
803 * * If no partitions were found this function just registers the MTD device
804 * @mtd and exits.
805 *
806 * Returns zero in case of success and a negative error code in case of failure.
807 */
mtd_device_parse_register(struct mtd_info * mtd,const char * const * types,struct mtd_part_parser_data * parser_data,const struct mtd_partition * parts,int nr_parts)808 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
809 struct mtd_part_parser_data *parser_data,
810 const struct mtd_partition *parts,
811 int nr_parts)
812 {
813 int ret;
814
815 mtd_set_dev_defaults(mtd);
816
817 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
818 ret = add_mtd_device(mtd);
819 if (ret)
820 return ret;
821 }
822
823 /* Prefer parsed partitions over driver-provided fallback */
824 ret = parse_mtd_partitions(mtd, types, parser_data);
825 if (ret == -EPROBE_DEFER)
826 goto out;
827
828 if (ret > 0)
829 ret = 0;
830 else if (nr_parts)
831 ret = add_mtd_partitions(mtd, parts, nr_parts);
832 else if (!device_is_registered(&mtd->dev))
833 ret = add_mtd_device(mtd);
834 else
835 ret = 0;
836
837 if (ret)
838 goto out;
839
840 /*
841 * FIXME: some drivers unfortunately call this function more than once.
842 * So we have to check if we've already assigned the reboot notifier.
843 *
844 * Generally, we can make multiple calls work for most cases, but it
845 * does cause problems with parse_mtd_partitions() above (e.g.,
846 * cmdlineparts will register partitions more than once).
847 */
848 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
849 "MTD already registered\n");
850 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
851 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
852 register_reboot_notifier(&mtd->reboot_notifier);
853 }
854
855 out:
856 if (ret && device_is_registered(&mtd->dev))
857 del_mtd_device(mtd);
858
859 return ret;
860 }
861 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
862
863 /**
864 * mtd_device_unregister - unregister an existing MTD device.
865 *
866 * @master: the MTD device to unregister. This will unregister both the master
867 * and any partitions if registered.
868 */
mtd_device_unregister(struct mtd_info * master)869 int mtd_device_unregister(struct mtd_info *master)
870 {
871 int err;
872
873 if (master->_reboot)
874 unregister_reboot_notifier(&master->reboot_notifier);
875
876 err = del_mtd_partitions(master);
877 if (err)
878 return err;
879
880 if (!device_is_registered(&master->dev))
881 return 0;
882
883 return del_mtd_device(master);
884 }
885 EXPORT_SYMBOL_GPL(mtd_device_unregister);
886
887 /**
888 * register_mtd_user - register a 'user' of MTD devices.
889 * @new: pointer to notifier info structure
890 *
891 * Registers a pair of callbacks function to be called upon addition
892 * or removal of MTD devices. Causes the 'add' callback to be immediately
893 * invoked for each MTD device currently present in the system.
894 */
register_mtd_user(struct mtd_notifier * new)895 void register_mtd_user (struct mtd_notifier *new)
896 {
897 struct mtd_info *mtd;
898
899 mutex_lock(&mtd_table_mutex);
900
901 list_add(&new->list, &mtd_notifiers);
902
903 __module_get(THIS_MODULE);
904
905 mtd_for_each_device(mtd)
906 new->add(mtd);
907
908 mutex_unlock(&mtd_table_mutex);
909 }
910 EXPORT_SYMBOL_GPL(register_mtd_user);
911
912 /**
913 * unregister_mtd_user - unregister a 'user' of MTD devices.
914 * @old: pointer to notifier info structure
915 *
916 * Removes a callback function pair from the list of 'users' to be
917 * notified upon addition or removal of MTD devices. Causes the
918 * 'remove' callback to be immediately invoked for each MTD device
919 * currently present in the system.
920 */
unregister_mtd_user(struct mtd_notifier * old)921 int unregister_mtd_user (struct mtd_notifier *old)
922 {
923 struct mtd_info *mtd;
924
925 mutex_lock(&mtd_table_mutex);
926
927 module_put(THIS_MODULE);
928
929 mtd_for_each_device(mtd)
930 old->remove(mtd);
931
932 list_del(&old->list);
933 mutex_unlock(&mtd_table_mutex);
934 return 0;
935 }
936 EXPORT_SYMBOL_GPL(unregister_mtd_user);
937
938 /**
939 * get_mtd_device - obtain a validated handle for an MTD device
940 * @mtd: last known address of the required MTD device
941 * @num: internal device number of the required MTD device
942 *
943 * Given a number and NULL address, return the num'th entry in the device
944 * table, if any. Given an address and num == -1, search the device table
945 * for a device with that address and return if it's still present. Given
946 * both, return the num'th driver only if its address matches. Return
947 * error code if not.
948 */
get_mtd_device(struct mtd_info * mtd,int num)949 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
950 {
951 struct mtd_info *ret = NULL, *other;
952 int err = -ENODEV;
953
954 mutex_lock(&mtd_table_mutex);
955
956 if (num == -1) {
957 mtd_for_each_device(other) {
958 if (other == mtd) {
959 ret = mtd;
960 break;
961 }
962 }
963 } else if (num >= 0) {
964 ret = idr_find(&mtd_idr, num);
965 if (mtd && mtd != ret)
966 ret = NULL;
967 }
968
969 if (!ret) {
970 ret = ERR_PTR(err);
971 goto out;
972 }
973
974 err = __get_mtd_device(ret);
975 if (err)
976 ret = ERR_PTR(err);
977 out:
978 mutex_unlock(&mtd_table_mutex);
979 return ret;
980 }
981 EXPORT_SYMBOL_GPL(get_mtd_device);
982
983
__get_mtd_device(struct mtd_info * mtd)984 int __get_mtd_device(struct mtd_info *mtd)
985 {
986 struct mtd_info *master = mtd_get_master(mtd);
987 int err;
988
989 if (!try_module_get(master->owner))
990 return -ENODEV;
991
992 if (master->_get_device) {
993 err = master->_get_device(mtd);
994
995 if (err) {
996 module_put(master->owner);
997 return err;
998 }
999 }
1000
1001 master->usecount++;
1002
1003 while (mtd->parent) {
1004 mtd->usecount++;
1005 mtd = mtd->parent;
1006 }
1007
1008 return 0;
1009 }
1010 EXPORT_SYMBOL_GPL(__get_mtd_device);
1011
1012 /**
1013 * get_mtd_device_nm - obtain a validated handle for an MTD device by
1014 * device name
1015 * @name: MTD device name to open
1016 *
1017 * This function returns MTD device description structure in case of
1018 * success and an error code in case of failure.
1019 */
get_mtd_device_nm(const char * name)1020 struct mtd_info *get_mtd_device_nm(const char *name)
1021 {
1022 int err = -ENODEV;
1023 struct mtd_info *mtd = NULL, *other;
1024
1025 mutex_lock(&mtd_table_mutex);
1026
1027 mtd_for_each_device(other) {
1028 if (!strcmp(name, other->name)) {
1029 mtd = other;
1030 break;
1031 }
1032 }
1033
1034 if (!mtd)
1035 goto out_unlock;
1036
1037 err = __get_mtd_device(mtd);
1038 if (err)
1039 goto out_unlock;
1040
1041 mutex_unlock(&mtd_table_mutex);
1042 return mtd;
1043
1044 out_unlock:
1045 mutex_unlock(&mtd_table_mutex);
1046 return ERR_PTR(err);
1047 }
1048 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1049
put_mtd_device(struct mtd_info * mtd)1050 void put_mtd_device(struct mtd_info *mtd)
1051 {
1052 mutex_lock(&mtd_table_mutex);
1053 __put_mtd_device(mtd);
1054 mutex_unlock(&mtd_table_mutex);
1055
1056 }
1057 EXPORT_SYMBOL_GPL(put_mtd_device);
1058
__put_mtd_device(struct mtd_info * mtd)1059 void __put_mtd_device(struct mtd_info *mtd)
1060 {
1061 struct mtd_info *master = mtd_get_master(mtd);
1062
1063 while (mtd->parent) {
1064 --mtd->usecount;
1065 BUG_ON(mtd->usecount < 0);
1066 mtd = mtd->parent;
1067 }
1068
1069 master->usecount--;
1070
1071 if (master->_put_device)
1072 master->_put_device(master);
1073
1074 module_put(master->owner);
1075 }
1076 EXPORT_SYMBOL_GPL(__put_mtd_device);
1077
1078 /*
1079 * Erase is an synchronous operation. Device drivers are epected to return a
1080 * negative error code if the operation failed and update instr->fail_addr
1081 * to point the portion that was not properly erased.
1082 */
mtd_erase(struct mtd_info * mtd,struct erase_info * instr)1083 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1084 {
1085 struct mtd_info *master = mtd_get_master(mtd);
1086 u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1087 struct erase_info adjinstr;
1088 int ret;
1089
1090 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1091 adjinstr = *instr;
1092
1093 if (!mtd->erasesize || !master->_erase)
1094 return -ENOTSUPP;
1095
1096 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1097 return -EINVAL;
1098 if (!(mtd->flags & MTD_WRITEABLE))
1099 return -EROFS;
1100
1101 if (!instr->len)
1102 return 0;
1103
1104 ledtrig_mtd_activity();
1105
1106 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1107 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1108 master->erasesize;
1109 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1110 master->erasesize) -
1111 adjinstr.addr;
1112 }
1113
1114 adjinstr.addr += mst_ofs;
1115
1116 ret = master->_erase(master, &adjinstr);
1117
1118 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1119 instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1120 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1121 instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1122 master);
1123 instr->fail_addr *= mtd->erasesize;
1124 }
1125 }
1126
1127 return ret;
1128 }
1129 EXPORT_SYMBOL_GPL(mtd_erase);
1130
1131 /*
1132 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1133 */
mtd_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)1134 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1135 void **virt, resource_size_t *phys)
1136 {
1137 struct mtd_info *master = mtd_get_master(mtd);
1138
1139 *retlen = 0;
1140 *virt = NULL;
1141 if (phys)
1142 *phys = 0;
1143 if (!master->_point)
1144 return -EOPNOTSUPP;
1145 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1146 return -EINVAL;
1147 if (!len)
1148 return 0;
1149
1150 from = mtd_get_master_ofs(mtd, from);
1151 return master->_point(master, from, len, retlen, virt, phys);
1152 }
1153 EXPORT_SYMBOL_GPL(mtd_point);
1154
1155 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
mtd_unpoint(struct mtd_info * mtd,loff_t from,size_t len)1156 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1157 {
1158 struct mtd_info *master = mtd_get_master(mtd);
1159
1160 if (!master->_unpoint)
1161 return -EOPNOTSUPP;
1162 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1163 return -EINVAL;
1164 if (!len)
1165 return 0;
1166 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1167 }
1168 EXPORT_SYMBOL_GPL(mtd_unpoint);
1169
1170 /*
1171 * Allow NOMMU mmap() to directly map the device (if not NULL)
1172 * - return the address to which the offset maps
1173 * - return -ENOSYS to indicate refusal to do the mapping
1174 */
mtd_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)1175 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1176 unsigned long offset, unsigned long flags)
1177 {
1178 size_t retlen;
1179 void *virt;
1180 int ret;
1181
1182 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1183 if (ret)
1184 return ret;
1185 if (retlen != len) {
1186 mtd_unpoint(mtd, offset, retlen);
1187 return -ENOSYS;
1188 }
1189 return (unsigned long)virt;
1190 }
1191 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1192
mtd_update_ecc_stats(struct mtd_info * mtd,struct mtd_info * master,const struct mtd_ecc_stats * old_stats)1193 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1194 const struct mtd_ecc_stats *old_stats)
1195 {
1196 struct mtd_ecc_stats diff;
1197
1198 if (master == mtd)
1199 return;
1200
1201 diff = master->ecc_stats;
1202 diff.failed -= old_stats->failed;
1203 diff.corrected -= old_stats->corrected;
1204
1205 while (mtd->parent) {
1206 mtd->ecc_stats.failed += diff.failed;
1207 mtd->ecc_stats.corrected += diff.corrected;
1208 mtd = mtd->parent;
1209 }
1210 }
1211
mtd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1212 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1213 u_char *buf)
1214 {
1215 struct mtd_oob_ops ops = {
1216 .len = len,
1217 .datbuf = buf,
1218 };
1219 int ret;
1220
1221 ret = mtd_read_oob(mtd, from, &ops);
1222 *retlen = ops.retlen;
1223
1224 return ret;
1225 }
1226 EXPORT_SYMBOL_GPL(mtd_read);
1227
mtd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1228 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1229 const u_char *buf)
1230 {
1231 struct mtd_oob_ops ops = {
1232 .len = len,
1233 .datbuf = (u8 *)buf,
1234 };
1235 int ret;
1236
1237 ret = mtd_write_oob(mtd, to, &ops);
1238 *retlen = ops.retlen;
1239
1240 return ret;
1241 }
1242 EXPORT_SYMBOL_GPL(mtd_write);
1243
1244 /*
1245 * In blackbox flight recorder like scenarios we want to make successful writes
1246 * in interrupt context. panic_write() is only intended to be called when its
1247 * known the kernel is about to panic and we need the write to succeed. Since
1248 * the kernel is not going to be running for much longer, this function can
1249 * break locks and delay to ensure the write succeeds (but not sleep).
1250 */
mtd_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1251 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1252 const u_char *buf)
1253 {
1254 struct mtd_info *master = mtd_get_master(mtd);
1255
1256 *retlen = 0;
1257 if (!master->_panic_write)
1258 return -EOPNOTSUPP;
1259 if (to < 0 || to >= mtd->size || len > mtd->size - to)
1260 return -EINVAL;
1261 if (!(mtd->flags & MTD_WRITEABLE))
1262 return -EROFS;
1263 if (!len)
1264 return 0;
1265 if (!master->oops_panic_write)
1266 master->oops_panic_write = true;
1267
1268 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1269 retlen, buf);
1270 }
1271 EXPORT_SYMBOL_GPL(mtd_panic_write);
1272
mtd_check_oob_ops(struct mtd_info * mtd,loff_t offs,struct mtd_oob_ops * ops)1273 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1274 struct mtd_oob_ops *ops)
1275 {
1276 /*
1277 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1278 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1279 * this case.
1280 */
1281 if (!ops->datbuf)
1282 ops->len = 0;
1283
1284 if (!ops->oobbuf)
1285 ops->ooblen = 0;
1286
1287 if (offs < 0 || offs + ops->len > mtd->size)
1288 return -EINVAL;
1289
1290 if (ops->ooblen) {
1291 size_t maxooblen;
1292
1293 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1294 return -EINVAL;
1295
1296 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1297 mtd_div_by_ws(offs, mtd)) *
1298 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1299 if (ops->ooblen > maxooblen)
1300 return -EINVAL;
1301 }
1302
1303 return 0;
1304 }
1305
mtd_read_oob_std(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1306 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1307 struct mtd_oob_ops *ops)
1308 {
1309 struct mtd_info *master = mtd_get_master(mtd);
1310 int ret;
1311
1312 from = mtd_get_master_ofs(mtd, from);
1313 if (master->_read_oob)
1314 ret = master->_read_oob(master, from, ops);
1315 else
1316 ret = master->_read(master, from, ops->len, &ops->retlen,
1317 ops->datbuf);
1318
1319 return ret;
1320 }
1321
mtd_write_oob_std(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)1322 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1323 struct mtd_oob_ops *ops)
1324 {
1325 struct mtd_info *master = mtd_get_master(mtd);
1326 int ret;
1327
1328 to = mtd_get_master_ofs(mtd, to);
1329 if (master->_write_oob)
1330 ret = master->_write_oob(master, to, ops);
1331 else
1332 ret = master->_write(master, to, ops->len, &ops->retlen,
1333 ops->datbuf);
1334
1335 return ret;
1336 }
1337
mtd_io_emulated_slc(struct mtd_info * mtd,loff_t start,bool read,struct mtd_oob_ops * ops)1338 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1339 struct mtd_oob_ops *ops)
1340 {
1341 struct mtd_info *master = mtd_get_master(mtd);
1342 int ngroups = mtd_pairing_groups(master);
1343 int npairs = mtd_wunit_per_eb(master) / ngroups;
1344 struct mtd_oob_ops adjops = *ops;
1345 unsigned int wunit, oobavail;
1346 struct mtd_pairing_info info;
1347 int max_bitflips = 0;
1348 u32 ebofs, pageofs;
1349 loff_t base, pos;
1350
1351 ebofs = mtd_mod_by_eb(start, mtd);
1352 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1353 info.group = 0;
1354 info.pair = mtd_div_by_ws(ebofs, mtd);
1355 pageofs = mtd_mod_by_ws(ebofs, mtd);
1356 oobavail = mtd_oobavail(mtd, ops);
1357
1358 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1359 int ret;
1360
1361 if (info.pair >= npairs) {
1362 info.pair = 0;
1363 base += master->erasesize;
1364 }
1365
1366 wunit = mtd_pairing_info_to_wunit(master, &info);
1367 pos = mtd_wunit_to_offset(mtd, base, wunit);
1368
1369 adjops.len = ops->len - ops->retlen;
1370 if (adjops.len > mtd->writesize - pageofs)
1371 adjops.len = mtd->writesize - pageofs;
1372
1373 adjops.ooblen = ops->ooblen - ops->oobretlen;
1374 if (adjops.ooblen > oobavail - adjops.ooboffs)
1375 adjops.ooblen = oobavail - adjops.ooboffs;
1376
1377 if (read) {
1378 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1379 if (ret > 0)
1380 max_bitflips = max(max_bitflips, ret);
1381 } else {
1382 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1383 }
1384
1385 if (ret < 0)
1386 return ret;
1387
1388 max_bitflips = max(max_bitflips, ret);
1389 ops->retlen += adjops.retlen;
1390 ops->oobretlen += adjops.oobretlen;
1391 adjops.datbuf += adjops.retlen;
1392 adjops.oobbuf += adjops.oobretlen;
1393 adjops.ooboffs = 0;
1394 pageofs = 0;
1395 info.pair++;
1396 }
1397
1398 return max_bitflips;
1399 }
1400
mtd_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1401 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1402 {
1403 struct mtd_info *master = mtd_get_master(mtd);
1404 struct mtd_ecc_stats old_stats = master->ecc_stats;
1405 int ret_code;
1406
1407 ops->retlen = ops->oobretlen = 0;
1408
1409 ret_code = mtd_check_oob_ops(mtd, from, ops);
1410 if (ret_code)
1411 return ret_code;
1412
1413 ledtrig_mtd_activity();
1414
1415 /* Check the validity of a potential fallback on mtd->_read */
1416 if (!master->_read_oob && (!master->_read || ops->oobbuf))
1417 return -EOPNOTSUPP;
1418
1419 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1420 ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1421 else
1422 ret_code = mtd_read_oob_std(mtd, from, ops);
1423
1424 mtd_update_ecc_stats(mtd, master, &old_stats);
1425
1426 /*
1427 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1428 * similar to mtd->_read(), returning a non-negative integer
1429 * representing max bitflips. In other cases, mtd->_read_oob() may
1430 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1431 */
1432 if (unlikely(ret_code < 0))
1433 return ret_code;
1434 if (mtd->ecc_strength == 0)
1435 return 0; /* device lacks ecc */
1436 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1437 }
1438 EXPORT_SYMBOL_GPL(mtd_read_oob);
1439
mtd_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)1440 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1441 struct mtd_oob_ops *ops)
1442 {
1443 struct mtd_info *master = mtd_get_master(mtd);
1444 int ret;
1445
1446 ops->retlen = ops->oobretlen = 0;
1447
1448 if (!(mtd->flags & MTD_WRITEABLE))
1449 return -EROFS;
1450
1451 ret = mtd_check_oob_ops(mtd, to, ops);
1452 if (ret)
1453 return ret;
1454
1455 ledtrig_mtd_activity();
1456
1457 /* Check the validity of a potential fallback on mtd->_write */
1458 if (!master->_write_oob && (!master->_write || ops->oobbuf))
1459 return -EOPNOTSUPP;
1460
1461 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1462 return mtd_io_emulated_slc(mtd, to, false, ops);
1463
1464 return mtd_write_oob_std(mtd, to, ops);
1465 }
1466 EXPORT_SYMBOL_GPL(mtd_write_oob);
1467
1468 /**
1469 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1470 * @mtd: MTD device structure
1471 * @section: ECC section. Depending on the layout you may have all the ECC
1472 * bytes stored in a single contiguous section, or one section
1473 * per ECC chunk (and sometime several sections for a single ECC
1474 * ECC chunk)
1475 * @oobecc: OOB region struct filled with the appropriate ECC position
1476 * information
1477 *
1478 * This function returns ECC section information in the OOB area. If you want
1479 * to get all the ECC bytes information, then you should call
1480 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1481 *
1482 * Returns zero on success, a negative error code otherwise.
1483 */
mtd_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobecc)1484 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1485 struct mtd_oob_region *oobecc)
1486 {
1487 struct mtd_info *master = mtd_get_master(mtd);
1488
1489 memset(oobecc, 0, sizeof(*oobecc));
1490
1491 if (!master || section < 0)
1492 return -EINVAL;
1493
1494 if (!master->ooblayout || !master->ooblayout->ecc)
1495 return -ENOTSUPP;
1496
1497 return master->ooblayout->ecc(master, section, oobecc);
1498 }
1499 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1500
1501 /**
1502 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1503 * section
1504 * @mtd: MTD device structure
1505 * @section: Free section you are interested in. Depending on the layout
1506 * you may have all the free bytes stored in a single contiguous
1507 * section, or one section per ECC chunk plus an extra section
1508 * for the remaining bytes (or other funky layout).
1509 * @oobfree: OOB region struct filled with the appropriate free position
1510 * information
1511 *
1512 * This function returns free bytes position in the OOB area. If you want
1513 * to get all the free bytes information, then you should call
1514 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1515 *
1516 * Returns zero on success, a negative error code otherwise.
1517 */
mtd_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobfree)1518 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1519 struct mtd_oob_region *oobfree)
1520 {
1521 struct mtd_info *master = mtd_get_master(mtd);
1522
1523 memset(oobfree, 0, sizeof(*oobfree));
1524
1525 if (!master || section < 0)
1526 return -EINVAL;
1527
1528 if (!master->ooblayout || !master->ooblayout->free)
1529 return -ENOTSUPP;
1530
1531 return master->ooblayout->free(master, section, oobfree);
1532 }
1533 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1534
1535 /**
1536 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1537 * @mtd: mtd info structure
1538 * @byte: the byte we are searching for
1539 * @sectionp: pointer where the section id will be stored
1540 * @oobregion: used to retrieve the ECC position
1541 * @iter: iterator function. Should be either mtd_ooblayout_free or
1542 * mtd_ooblayout_ecc depending on the region type you're searching for
1543 *
1544 * This function returns the section id and oobregion information of a
1545 * specific byte. For example, say you want to know where the 4th ECC byte is
1546 * stored, you'll use:
1547 *
1548 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
1549 *
1550 * Returns zero on success, a negative error code otherwise.
1551 */
mtd_ooblayout_find_region(struct mtd_info * mtd,int byte,int * sectionp,struct mtd_oob_region * oobregion,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1552 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1553 int *sectionp, struct mtd_oob_region *oobregion,
1554 int (*iter)(struct mtd_info *,
1555 int section,
1556 struct mtd_oob_region *oobregion))
1557 {
1558 int pos = 0, ret, section = 0;
1559
1560 memset(oobregion, 0, sizeof(*oobregion));
1561
1562 while (1) {
1563 ret = iter(mtd, section, oobregion);
1564 if (ret)
1565 return ret;
1566
1567 if (pos + oobregion->length > byte)
1568 break;
1569
1570 pos += oobregion->length;
1571 section++;
1572 }
1573
1574 /*
1575 * Adjust region info to make it start at the beginning at the
1576 * 'start' ECC byte.
1577 */
1578 oobregion->offset += byte - pos;
1579 oobregion->length -= byte - pos;
1580 *sectionp = section;
1581
1582 return 0;
1583 }
1584
1585 /**
1586 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1587 * ECC byte
1588 * @mtd: mtd info structure
1589 * @eccbyte: the byte we are searching for
1590 * @sectionp: pointer where the section id will be stored
1591 * @oobregion: OOB region information
1592 *
1593 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1594 * byte.
1595 *
1596 * Returns zero on success, a negative error code otherwise.
1597 */
mtd_ooblayout_find_eccregion(struct mtd_info * mtd,int eccbyte,int * section,struct mtd_oob_region * oobregion)1598 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1599 int *section,
1600 struct mtd_oob_region *oobregion)
1601 {
1602 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1603 mtd_ooblayout_ecc);
1604 }
1605 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1606
1607 /**
1608 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1609 * @mtd: mtd info structure
1610 * @buf: destination buffer to store OOB bytes
1611 * @oobbuf: OOB buffer
1612 * @start: first byte to retrieve
1613 * @nbytes: number of bytes to retrieve
1614 * @iter: section iterator
1615 *
1616 * Extract bytes attached to a specific category (ECC or free)
1617 * from the OOB buffer and copy them into buf.
1618 *
1619 * Returns zero on success, a negative error code otherwise.
1620 */
mtd_ooblayout_get_bytes(struct mtd_info * mtd,u8 * buf,const u8 * oobbuf,int start,int nbytes,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1621 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1622 const u8 *oobbuf, int start, int nbytes,
1623 int (*iter)(struct mtd_info *,
1624 int section,
1625 struct mtd_oob_region *oobregion))
1626 {
1627 struct mtd_oob_region oobregion;
1628 int section, ret;
1629
1630 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1631 &oobregion, iter);
1632
1633 while (!ret) {
1634 int cnt;
1635
1636 cnt = min_t(int, nbytes, oobregion.length);
1637 memcpy(buf, oobbuf + oobregion.offset, cnt);
1638 buf += cnt;
1639 nbytes -= cnt;
1640
1641 if (!nbytes)
1642 break;
1643
1644 ret = iter(mtd, ++section, &oobregion);
1645 }
1646
1647 return ret;
1648 }
1649
1650 /**
1651 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1652 * @mtd: mtd info structure
1653 * @buf: source buffer to get OOB bytes from
1654 * @oobbuf: OOB buffer
1655 * @start: first OOB byte to set
1656 * @nbytes: number of OOB bytes to set
1657 * @iter: section iterator
1658 *
1659 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1660 * is selected by passing the appropriate iterator.
1661 *
1662 * Returns zero on success, a negative error code otherwise.
1663 */
mtd_ooblayout_set_bytes(struct mtd_info * mtd,const u8 * buf,u8 * oobbuf,int start,int nbytes,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1664 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1665 u8 *oobbuf, int start, int nbytes,
1666 int (*iter)(struct mtd_info *,
1667 int section,
1668 struct mtd_oob_region *oobregion))
1669 {
1670 struct mtd_oob_region oobregion;
1671 int section, ret;
1672
1673 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1674 &oobregion, iter);
1675
1676 while (!ret) {
1677 int cnt;
1678
1679 cnt = min_t(int, nbytes, oobregion.length);
1680 memcpy(oobbuf + oobregion.offset, buf, cnt);
1681 buf += cnt;
1682 nbytes -= cnt;
1683
1684 if (!nbytes)
1685 break;
1686
1687 ret = iter(mtd, ++section, &oobregion);
1688 }
1689
1690 return ret;
1691 }
1692
1693 /**
1694 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1695 * @mtd: mtd info structure
1696 * @iter: category iterator
1697 *
1698 * Count the number of bytes in a given category.
1699 *
1700 * Returns a positive value on success, a negative error code otherwise.
1701 */
mtd_ooblayout_count_bytes(struct mtd_info * mtd,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1702 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1703 int (*iter)(struct mtd_info *,
1704 int section,
1705 struct mtd_oob_region *oobregion))
1706 {
1707 struct mtd_oob_region oobregion;
1708 int section = 0, ret, nbytes = 0;
1709
1710 while (1) {
1711 ret = iter(mtd, section++, &oobregion);
1712 if (ret) {
1713 if (ret == -ERANGE)
1714 ret = nbytes;
1715 break;
1716 }
1717
1718 nbytes += oobregion.length;
1719 }
1720
1721 return ret;
1722 }
1723
1724 /**
1725 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1726 * @mtd: mtd info structure
1727 * @eccbuf: destination buffer to store ECC bytes
1728 * @oobbuf: OOB buffer
1729 * @start: first ECC byte to retrieve
1730 * @nbytes: number of ECC bytes to retrieve
1731 *
1732 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1733 *
1734 * Returns zero on success, a negative error code otherwise.
1735 */
mtd_ooblayout_get_eccbytes(struct mtd_info * mtd,u8 * eccbuf,const u8 * oobbuf,int start,int nbytes)1736 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1737 const u8 *oobbuf, int start, int nbytes)
1738 {
1739 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1740 mtd_ooblayout_ecc);
1741 }
1742 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1743
1744 /**
1745 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1746 * @mtd: mtd info structure
1747 * @eccbuf: source buffer to get ECC bytes from
1748 * @oobbuf: OOB buffer
1749 * @start: first ECC byte to set
1750 * @nbytes: number of ECC bytes to set
1751 *
1752 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1753 *
1754 * Returns zero on success, a negative error code otherwise.
1755 */
mtd_ooblayout_set_eccbytes(struct mtd_info * mtd,const u8 * eccbuf,u8 * oobbuf,int start,int nbytes)1756 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1757 u8 *oobbuf, int start, int nbytes)
1758 {
1759 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1760 mtd_ooblayout_ecc);
1761 }
1762 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1763
1764 /**
1765 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
1766 * @mtd: mtd info structure
1767 * @databuf: destination buffer to store ECC bytes
1768 * @oobbuf: OOB buffer
1769 * @start: first ECC byte to retrieve
1770 * @nbytes: number of ECC bytes to retrieve
1771 *
1772 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1773 *
1774 * Returns zero on success, a negative error code otherwise.
1775 */
mtd_ooblayout_get_databytes(struct mtd_info * mtd,u8 * databuf,const u8 * oobbuf,int start,int nbytes)1776 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1777 const u8 *oobbuf, int start, int nbytes)
1778 {
1779 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1780 mtd_ooblayout_free);
1781 }
1782 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1783
1784 /**
1785 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
1786 * @mtd: mtd info structure
1787 * @databuf: source buffer to get data bytes from
1788 * @oobbuf: OOB buffer
1789 * @start: first ECC byte to set
1790 * @nbytes: number of ECC bytes to set
1791 *
1792 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
1793 *
1794 * Returns zero on success, a negative error code otherwise.
1795 */
mtd_ooblayout_set_databytes(struct mtd_info * mtd,const u8 * databuf,u8 * oobbuf,int start,int nbytes)1796 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1797 u8 *oobbuf, int start, int nbytes)
1798 {
1799 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1800 mtd_ooblayout_free);
1801 }
1802 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1803
1804 /**
1805 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
1806 * @mtd: mtd info structure
1807 *
1808 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
1809 *
1810 * Returns zero on success, a negative error code otherwise.
1811 */
mtd_ooblayout_count_freebytes(struct mtd_info * mtd)1812 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1813 {
1814 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1815 }
1816 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1817
1818 /**
1819 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
1820 * @mtd: mtd info structure
1821 *
1822 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
1823 *
1824 * Returns zero on success, a negative error code otherwise.
1825 */
mtd_ooblayout_count_eccbytes(struct mtd_info * mtd)1826 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1827 {
1828 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1829 }
1830 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1831
1832 /*
1833 * Method to access the protection register area, present in some flash
1834 * devices. The user data is one time programmable but the factory data is read
1835 * only.
1836 */
mtd_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)1837 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1838 struct otp_info *buf)
1839 {
1840 struct mtd_info *master = mtd_get_master(mtd);
1841
1842 if (!master->_get_fact_prot_info)
1843 return -EOPNOTSUPP;
1844 if (!len)
1845 return 0;
1846 return master->_get_fact_prot_info(master, len, retlen, buf);
1847 }
1848 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
1849
mtd_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1850 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1851 size_t *retlen, u_char *buf)
1852 {
1853 struct mtd_info *master = mtd_get_master(mtd);
1854
1855 *retlen = 0;
1856 if (!master->_read_fact_prot_reg)
1857 return -EOPNOTSUPP;
1858 if (!len)
1859 return 0;
1860 return master->_read_fact_prot_reg(master, from, len, retlen, buf);
1861 }
1862 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
1863
mtd_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)1864 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1865 struct otp_info *buf)
1866 {
1867 struct mtd_info *master = mtd_get_master(mtd);
1868
1869 if (!master->_get_user_prot_info)
1870 return -EOPNOTSUPP;
1871 if (!len)
1872 return 0;
1873 return master->_get_user_prot_info(master, len, retlen, buf);
1874 }
1875 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
1876
mtd_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1877 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1878 size_t *retlen, u_char *buf)
1879 {
1880 struct mtd_info *master = mtd_get_master(mtd);
1881
1882 *retlen = 0;
1883 if (!master->_read_user_prot_reg)
1884 return -EOPNOTSUPP;
1885 if (!len)
1886 return 0;
1887 return master->_read_user_prot_reg(master, from, len, retlen, buf);
1888 }
1889 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
1890
mtd_write_user_prot_reg(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,u_char * buf)1891 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
1892 size_t *retlen, u_char *buf)
1893 {
1894 struct mtd_info *master = mtd_get_master(mtd);
1895 int ret;
1896
1897 *retlen = 0;
1898 if (!master->_write_user_prot_reg)
1899 return -EOPNOTSUPP;
1900 if (!len)
1901 return 0;
1902 ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
1903 if (ret)
1904 return ret;
1905
1906 /*
1907 * If no data could be written at all, we are out of memory and
1908 * must return -ENOSPC.
1909 */
1910 return (*retlen) ? 0 : -ENOSPC;
1911 }
1912 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
1913
mtd_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)1914 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1915 {
1916 struct mtd_info *master = mtd_get_master(mtd);
1917
1918 if (!master->_lock_user_prot_reg)
1919 return -EOPNOTSUPP;
1920 if (!len)
1921 return 0;
1922 return master->_lock_user_prot_reg(master, from, len);
1923 }
1924 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1925
1926 /* Chip-supported device locking */
mtd_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)1927 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1928 {
1929 struct mtd_info *master = mtd_get_master(mtd);
1930
1931 if (!master->_lock)
1932 return -EOPNOTSUPP;
1933 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1934 return -EINVAL;
1935 if (!len)
1936 return 0;
1937
1938 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1939 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
1940 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
1941 }
1942
1943 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
1944 }
1945 EXPORT_SYMBOL_GPL(mtd_lock);
1946
mtd_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)1947 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1948 {
1949 struct mtd_info *master = mtd_get_master(mtd);
1950
1951 if (!master->_unlock)
1952 return -EOPNOTSUPP;
1953 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1954 return -EINVAL;
1955 if (!len)
1956 return 0;
1957
1958 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1959 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
1960 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
1961 }
1962
1963 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
1964 }
1965 EXPORT_SYMBOL_GPL(mtd_unlock);
1966
mtd_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)1967 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1968 {
1969 struct mtd_info *master = mtd_get_master(mtd);
1970
1971 if (!master->_is_locked)
1972 return -EOPNOTSUPP;
1973 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1974 return -EINVAL;
1975 if (!len)
1976 return 0;
1977
1978 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1979 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
1980 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
1981 }
1982
1983 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
1984 }
1985 EXPORT_SYMBOL_GPL(mtd_is_locked);
1986
mtd_block_isreserved(struct mtd_info * mtd,loff_t ofs)1987 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1988 {
1989 struct mtd_info *master = mtd_get_master(mtd);
1990
1991 if (ofs < 0 || ofs >= mtd->size)
1992 return -EINVAL;
1993 if (!master->_block_isreserved)
1994 return 0;
1995
1996 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1997 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
1998
1999 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2000 }
2001 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2002
mtd_block_isbad(struct mtd_info * mtd,loff_t ofs)2003 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2004 {
2005 struct mtd_info *master = mtd_get_master(mtd);
2006
2007 if (ofs < 0 || ofs >= mtd->size)
2008 return -EINVAL;
2009 if (!master->_block_isbad)
2010 return 0;
2011
2012 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2013 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2014
2015 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2016 }
2017 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2018
mtd_block_markbad(struct mtd_info * mtd,loff_t ofs)2019 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2020 {
2021 struct mtd_info *master = mtd_get_master(mtd);
2022 int ret;
2023
2024 if (!master->_block_markbad)
2025 return -EOPNOTSUPP;
2026 if (ofs < 0 || ofs >= mtd->size)
2027 return -EINVAL;
2028 if (!(mtd->flags & MTD_WRITEABLE))
2029 return -EROFS;
2030
2031 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2032 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2033
2034 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2035 if (ret)
2036 return ret;
2037
2038 while (mtd->parent) {
2039 mtd->ecc_stats.badblocks++;
2040 mtd = mtd->parent;
2041 }
2042
2043 return 0;
2044 }
2045 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2046
2047 /*
2048 * default_mtd_writev - the default writev method
2049 * @mtd: mtd device description object pointer
2050 * @vecs: the vectors to write
2051 * @count: count of vectors in @vecs
2052 * @to: the MTD device offset to write to
2053 * @retlen: on exit contains the count of bytes written to the MTD device.
2054 *
2055 * This function returns zero in case of success and a negative error code in
2056 * case of failure.
2057 */
default_mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)2058 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2059 unsigned long count, loff_t to, size_t *retlen)
2060 {
2061 unsigned long i;
2062 size_t totlen = 0, thislen;
2063 int ret = 0;
2064
2065 for (i = 0; i < count; i++) {
2066 if (!vecs[i].iov_len)
2067 continue;
2068 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2069 vecs[i].iov_base);
2070 totlen += thislen;
2071 if (ret || thislen != vecs[i].iov_len)
2072 break;
2073 to += vecs[i].iov_len;
2074 }
2075 *retlen = totlen;
2076 return ret;
2077 }
2078
2079 /*
2080 * mtd_writev - the vector-based MTD write method
2081 * @mtd: mtd device description object pointer
2082 * @vecs: the vectors to write
2083 * @count: count of vectors in @vecs
2084 * @to: the MTD device offset to write to
2085 * @retlen: on exit contains the count of bytes written to the MTD device.
2086 *
2087 * This function returns zero in case of success and a negative error code in
2088 * case of failure.
2089 */
mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)2090 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2091 unsigned long count, loff_t to, size_t *retlen)
2092 {
2093 struct mtd_info *master = mtd_get_master(mtd);
2094
2095 *retlen = 0;
2096 if (!(mtd->flags & MTD_WRITEABLE))
2097 return -EROFS;
2098
2099 if (!master->_writev)
2100 return default_mtd_writev(mtd, vecs, count, to, retlen);
2101
2102 return master->_writev(master, vecs, count,
2103 mtd_get_master_ofs(mtd, to), retlen);
2104 }
2105 EXPORT_SYMBOL_GPL(mtd_writev);
2106
2107 /**
2108 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2109 * @mtd: mtd device description object pointer
2110 * @size: a pointer to the ideal or maximum size of the allocation, points
2111 * to the actual allocation size on success.
2112 *
2113 * This routine attempts to allocate a contiguous kernel buffer up to
2114 * the specified size, backing off the size of the request exponentially
2115 * until the request succeeds or until the allocation size falls below
2116 * the system page size. This attempts to make sure it does not adversely
2117 * impact system performance, so when allocating more than one page, we
2118 * ask the memory allocator to avoid re-trying, swapping, writing back
2119 * or performing I/O.
2120 *
2121 * Note, this function also makes sure that the allocated buffer is aligned to
2122 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2123 *
2124 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2125 * to handle smaller (i.e. degraded) buffer allocations under low- or
2126 * fragmented-memory situations where such reduced allocations, from a
2127 * requested ideal, are allowed.
2128 *
2129 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2130 */
mtd_kmalloc_up_to(const struct mtd_info * mtd,size_t * size)2131 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2132 {
2133 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2134 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2135 void *kbuf;
2136
2137 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2138
2139 while (*size > min_alloc) {
2140 kbuf = kmalloc(*size, flags);
2141 if (kbuf)
2142 return kbuf;
2143
2144 *size >>= 1;
2145 *size = ALIGN(*size, mtd->writesize);
2146 }
2147
2148 /*
2149 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2150 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2151 */
2152 return kmalloc(*size, GFP_KERNEL);
2153 }
2154 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2155
2156 #ifdef CONFIG_PROC_FS
2157
2158 /*====================================================================*/
2159 /* Support for /proc/mtd */
2160
mtd_proc_show(struct seq_file * m,void * v)2161 static int mtd_proc_show(struct seq_file *m, void *v)
2162 {
2163 struct mtd_info *mtd;
2164
2165 seq_puts(m, "dev: size erasesize name\n");
2166 mutex_lock(&mtd_table_mutex);
2167 mtd_for_each_device(mtd) {
2168 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2169 mtd->index, (unsigned long long)mtd->size,
2170 mtd->erasesize, mtd->name);
2171 }
2172 mutex_unlock(&mtd_table_mutex);
2173 return 0;
2174 }
2175 #endif /* CONFIG_PROC_FS */
2176
2177 /*====================================================================*/
2178 /* Init code */
2179
mtd_bdi_init(char * name)2180 static struct backing_dev_info * __init mtd_bdi_init(char *name)
2181 {
2182 struct backing_dev_info *bdi;
2183 int ret;
2184
2185 bdi = bdi_alloc(NUMA_NO_NODE);
2186 if (!bdi)
2187 return ERR_PTR(-ENOMEM);
2188 bdi->ra_pages = 0;
2189 bdi->io_pages = 0;
2190
2191 /*
2192 * We put '-0' suffix to the name to get the same name format as we
2193 * used to get. Since this is called only once, we get a unique name.
2194 */
2195 ret = bdi_register(bdi, "%.28s-0", name);
2196 if (ret)
2197 bdi_put(bdi);
2198
2199 return ret ? ERR_PTR(ret) : bdi;
2200 }
2201
2202 static struct proc_dir_entry *proc_mtd;
2203
init_mtd(void)2204 static int __init init_mtd(void)
2205 {
2206 int ret;
2207
2208 ret = class_register(&mtd_class);
2209 if (ret)
2210 goto err_reg;
2211
2212 mtd_bdi = mtd_bdi_init("mtd");
2213 if (IS_ERR(mtd_bdi)) {
2214 ret = PTR_ERR(mtd_bdi);
2215 goto err_bdi;
2216 }
2217
2218 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2219
2220 ret = init_mtdchar();
2221 if (ret)
2222 goto out_procfs;
2223
2224 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2225
2226 return 0;
2227
2228 out_procfs:
2229 if (proc_mtd)
2230 remove_proc_entry("mtd", NULL);
2231 bdi_put(mtd_bdi);
2232 err_bdi:
2233 class_unregister(&mtd_class);
2234 err_reg:
2235 pr_err("Error registering mtd class or bdi: %d\n", ret);
2236 return ret;
2237 }
2238
cleanup_mtd(void)2239 static void __exit cleanup_mtd(void)
2240 {
2241 debugfs_remove_recursive(dfs_dir_mtd);
2242 cleanup_mtdchar();
2243 if (proc_mtd)
2244 remove_proc_entry("mtd", NULL);
2245 class_unregister(&mtd_class);
2246 bdi_put(mtd_bdi);
2247 idr_destroy(&mtd_idr);
2248 }
2249
2250 module_init(init_mtd);
2251 module_exit(cleanup_mtd);
2252
2253 MODULE_LICENSE("GPL");
2254 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2255 MODULE_DESCRIPTION("Core MTD registration and access routines");
2256