• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Core registration and callback routines for MTD
4  * drivers and users.
5  *
6  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7  * Copyright © 2006      Red Hat UK Limited
8  */
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/ptrace.h>
13 #include <linux/seq_file.h>
14 #include <linux/string.h>
15 #include <linux/timer.h>
16 #include <linux/major.h>
17 #include <linux/fs.h>
18 #include <linux/err.h>
19 #include <linux/ioctl.h>
20 #include <linux/init.h>
21 #include <linux/of.h>
22 #include <linux/proc_fs.h>
23 #include <linux/idr.h>
24 #include <linux/backing-dev.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/reboot.h>
28 #include <linux/leds.h>
29 #include <linux/debugfs.h>
30 #include <linux/nvmem-provider.h>
31 
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/partitions.h>
34 
35 #include "mtdcore.h"
36 
37 struct backing_dev_info *mtd_bdi;
38 
39 #ifdef CONFIG_PM_SLEEP
40 
mtd_cls_suspend(struct device * dev)41 static int mtd_cls_suspend(struct device *dev)
42 {
43 	struct mtd_info *mtd = dev_get_drvdata(dev);
44 
45 	return mtd ? mtd_suspend(mtd) : 0;
46 }
47 
mtd_cls_resume(struct device * dev)48 static int mtd_cls_resume(struct device *dev)
49 {
50 	struct mtd_info *mtd = dev_get_drvdata(dev);
51 
52 	if (mtd)
53 		mtd_resume(mtd);
54 	return 0;
55 }
56 
57 static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58 #define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
59 #else
60 #define MTD_CLS_PM_OPS NULL
61 #endif
62 
63 static struct class mtd_class = {
64 	.name = "mtd",
65 	.owner = THIS_MODULE,
66 	.pm = MTD_CLS_PM_OPS,
67 };
68 
69 static DEFINE_IDR(mtd_idr);
70 
71 /* These are exported solely for the purpose of mtd_blkdevs.c. You
72    should not use them for _anything_ else */
73 DEFINE_MUTEX(mtd_table_mutex);
74 EXPORT_SYMBOL_GPL(mtd_table_mutex);
75 
__mtd_next_device(int i)76 struct mtd_info *__mtd_next_device(int i)
77 {
78 	return idr_get_next(&mtd_idr, &i);
79 }
80 EXPORT_SYMBOL_GPL(__mtd_next_device);
81 
82 static LIST_HEAD(mtd_notifiers);
83 
84 
85 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
86 
87 /* REVISIT once MTD uses the driver model better, whoever allocates
88  * the mtd_info will probably want to use the release() hook...
89  */
mtd_release(struct device * dev)90 static void mtd_release(struct device *dev)
91 {
92 	struct mtd_info *mtd = dev_get_drvdata(dev);
93 	dev_t index = MTD_DEVT(mtd->index);
94 
95 	/* remove /dev/mtdXro node */
96 	device_destroy(&mtd_class, index + 1);
97 }
98 
99 #define MTD_DEVICE_ATTR_RO(name) \
100 static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
101 
102 #define MTD_DEVICE_ATTR_RW(name) \
103 static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
104 
mtd_type_show(struct device * dev,struct device_attribute * attr,char * buf)105 static ssize_t mtd_type_show(struct device *dev,
106 		struct device_attribute *attr, char *buf)
107 {
108 	struct mtd_info *mtd = dev_get_drvdata(dev);
109 	char *type;
110 
111 	switch (mtd->type) {
112 	case MTD_ABSENT:
113 		type = "absent";
114 		break;
115 	case MTD_RAM:
116 		type = "ram";
117 		break;
118 	case MTD_ROM:
119 		type = "rom";
120 		break;
121 	case MTD_NORFLASH:
122 		type = "nor";
123 		break;
124 	case MTD_NANDFLASH:
125 		type = "nand";
126 		break;
127 	case MTD_DATAFLASH:
128 		type = "dataflash";
129 		break;
130 	case MTD_UBIVOLUME:
131 		type = "ubi";
132 		break;
133 	case MTD_MLCNANDFLASH:
134 		type = "mlc-nand";
135 		break;
136 	default:
137 		type = "unknown";
138 	}
139 
140 	return sysfs_emit(buf, "%s\n", type);
141 }
142 MTD_DEVICE_ATTR_RO(type);
143 
mtd_flags_show(struct device * dev,struct device_attribute * attr,char * buf)144 static ssize_t mtd_flags_show(struct device *dev,
145 		struct device_attribute *attr, char *buf)
146 {
147 	struct mtd_info *mtd = dev_get_drvdata(dev);
148 
149 	return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
150 }
151 MTD_DEVICE_ATTR_RO(flags);
152 
mtd_size_show(struct device * dev,struct device_attribute * attr,char * buf)153 static ssize_t mtd_size_show(struct device *dev,
154 		struct device_attribute *attr, char *buf)
155 {
156 	struct mtd_info *mtd = dev_get_drvdata(dev);
157 
158 	return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
159 }
160 MTD_DEVICE_ATTR_RO(size);
161 
mtd_erasesize_show(struct device * dev,struct device_attribute * attr,char * buf)162 static ssize_t mtd_erasesize_show(struct device *dev,
163 		struct device_attribute *attr, char *buf)
164 {
165 	struct mtd_info *mtd = dev_get_drvdata(dev);
166 
167 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
168 }
169 MTD_DEVICE_ATTR_RO(erasesize);
170 
mtd_writesize_show(struct device * dev,struct device_attribute * attr,char * buf)171 static ssize_t mtd_writesize_show(struct device *dev,
172 		struct device_attribute *attr, char *buf)
173 {
174 	struct mtd_info *mtd = dev_get_drvdata(dev);
175 
176 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
177 }
178 MTD_DEVICE_ATTR_RO(writesize);
179 
mtd_subpagesize_show(struct device * dev,struct device_attribute * attr,char * buf)180 static ssize_t mtd_subpagesize_show(struct device *dev,
181 		struct device_attribute *attr, char *buf)
182 {
183 	struct mtd_info *mtd = dev_get_drvdata(dev);
184 	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
185 
186 	return sysfs_emit(buf, "%u\n", subpagesize);
187 }
188 MTD_DEVICE_ATTR_RO(subpagesize);
189 
mtd_oobsize_show(struct device * dev,struct device_attribute * attr,char * buf)190 static ssize_t mtd_oobsize_show(struct device *dev,
191 		struct device_attribute *attr, char *buf)
192 {
193 	struct mtd_info *mtd = dev_get_drvdata(dev);
194 
195 	return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
196 }
197 MTD_DEVICE_ATTR_RO(oobsize);
198 
mtd_oobavail_show(struct device * dev,struct device_attribute * attr,char * buf)199 static ssize_t mtd_oobavail_show(struct device *dev,
200 				 struct device_attribute *attr, char *buf)
201 {
202 	struct mtd_info *mtd = dev_get_drvdata(dev);
203 
204 	return sysfs_emit(buf, "%u\n", mtd->oobavail);
205 }
206 MTD_DEVICE_ATTR_RO(oobavail);
207 
mtd_numeraseregions_show(struct device * dev,struct device_attribute * attr,char * buf)208 static ssize_t mtd_numeraseregions_show(struct device *dev,
209 		struct device_attribute *attr, char *buf)
210 {
211 	struct mtd_info *mtd = dev_get_drvdata(dev);
212 
213 	return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
214 }
215 MTD_DEVICE_ATTR_RO(numeraseregions);
216 
mtd_name_show(struct device * dev,struct device_attribute * attr,char * buf)217 static ssize_t mtd_name_show(struct device *dev,
218 		struct device_attribute *attr, char *buf)
219 {
220 	struct mtd_info *mtd = dev_get_drvdata(dev);
221 
222 	return sysfs_emit(buf, "%s\n", mtd->name);
223 }
224 MTD_DEVICE_ATTR_RO(name);
225 
mtd_ecc_strength_show(struct device * dev,struct device_attribute * attr,char * buf)226 static ssize_t mtd_ecc_strength_show(struct device *dev,
227 				     struct device_attribute *attr, char *buf)
228 {
229 	struct mtd_info *mtd = dev_get_drvdata(dev);
230 
231 	return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
232 }
233 MTD_DEVICE_ATTR_RO(ecc_strength);
234 
mtd_bitflip_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)235 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
236 					  struct device_attribute *attr,
237 					  char *buf)
238 {
239 	struct mtd_info *mtd = dev_get_drvdata(dev);
240 
241 	return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
242 }
243 
mtd_bitflip_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)244 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
245 					   struct device_attribute *attr,
246 					   const char *buf, size_t count)
247 {
248 	struct mtd_info *mtd = dev_get_drvdata(dev);
249 	unsigned int bitflip_threshold;
250 	int retval;
251 
252 	retval = kstrtouint(buf, 0, &bitflip_threshold);
253 	if (retval)
254 		return retval;
255 
256 	mtd->bitflip_threshold = bitflip_threshold;
257 	return count;
258 }
259 MTD_DEVICE_ATTR_RW(bitflip_threshold);
260 
mtd_ecc_step_size_show(struct device * dev,struct device_attribute * attr,char * buf)261 static ssize_t mtd_ecc_step_size_show(struct device *dev,
262 		struct device_attribute *attr, char *buf)
263 {
264 	struct mtd_info *mtd = dev_get_drvdata(dev);
265 
266 	return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
267 
268 }
269 MTD_DEVICE_ATTR_RO(ecc_step_size);
270 
mtd_corrected_bits_show(struct device * dev,struct device_attribute * attr,char * buf)271 static ssize_t mtd_corrected_bits_show(struct device *dev,
272 		struct device_attribute *attr, char *buf)
273 {
274 	struct mtd_info *mtd = dev_get_drvdata(dev);
275 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
276 
277 	return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
278 }
279 MTD_DEVICE_ATTR_RO(corrected_bits);	/* ecc stats corrected */
280 
mtd_ecc_failures_show(struct device * dev,struct device_attribute * attr,char * buf)281 static ssize_t mtd_ecc_failures_show(struct device *dev,
282 		struct device_attribute *attr, char *buf)
283 {
284 	struct mtd_info *mtd = dev_get_drvdata(dev);
285 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
286 
287 	return sysfs_emit(buf, "%u\n", ecc_stats->failed);
288 }
289 MTD_DEVICE_ATTR_RO(ecc_failures);	/* ecc stats errors */
290 
mtd_bad_blocks_show(struct device * dev,struct device_attribute * attr,char * buf)291 static ssize_t mtd_bad_blocks_show(struct device *dev,
292 		struct device_attribute *attr, char *buf)
293 {
294 	struct mtd_info *mtd = dev_get_drvdata(dev);
295 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
296 
297 	return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
298 }
299 MTD_DEVICE_ATTR_RO(bad_blocks);
300 
mtd_bbt_blocks_show(struct device * dev,struct device_attribute * attr,char * buf)301 static ssize_t mtd_bbt_blocks_show(struct device *dev,
302 		struct device_attribute *attr, char *buf)
303 {
304 	struct mtd_info *mtd = dev_get_drvdata(dev);
305 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306 
307 	return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
308 }
309 MTD_DEVICE_ATTR_RO(bbt_blocks);
310 
311 static struct attribute *mtd_attrs[] = {
312 	&dev_attr_type.attr,
313 	&dev_attr_flags.attr,
314 	&dev_attr_size.attr,
315 	&dev_attr_erasesize.attr,
316 	&dev_attr_writesize.attr,
317 	&dev_attr_subpagesize.attr,
318 	&dev_attr_oobsize.attr,
319 	&dev_attr_oobavail.attr,
320 	&dev_attr_numeraseregions.attr,
321 	&dev_attr_name.attr,
322 	&dev_attr_ecc_strength.attr,
323 	&dev_attr_ecc_step_size.attr,
324 	&dev_attr_corrected_bits.attr,
325 	&dev_attr_ecc_failures.attr,
326 	&dev_attr_bad_blocks.attr,
327 	&dev_attr_bbt_blocks.attr,
328 	&dev_attr_bitflip_threshold.attr,
329 	NULL,
330 };
331 ATTRIBUTE_GROUPS(mtd);
332 
333 static const struct device_type mtd_devtype = {
334 	.name		= "mtd",
335 	.groups		= mtd_groups,
336 	.release	= mtd_release,
337 };
338 
mtd_partid_debug_show(struct seq_file * s,void * p)339 static int mtd_partid_debug_show(struct seq_file *s, void *p)
340 {
341 	struct mtd_info *mtd = s->private;
342 
343 	seq_printf(s, "%s\n", mtd->dbg.partid);
344 
345 	return 0;
346 }
347 
348 DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug);
349 
mtd_partname_debug_show(struct seq_file * s,void * p)350 static int mtd_partname_debug_show(struct seq_file *s, void *p)
351 {
352 	struct mtd_info *mtd = s->private;
353 
354 	seq_printf(s, "%s\n", mtd->dbg.partname);
355 
356 	return 0;
357 }
358 
359 DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug);
360 
361 static struct dentry *dfs_dir_mtd;
362 
mtd_debugfs_populate(struct mtd_info * mtd)363 static void mtd_debugfs_populate(struct mtd_info *mtd)
364 {
365 	struct mtd_info *master = mtd_get_master(mtd);
366 	struct device *dev = &mtd->dev;
367 	struct dentry *root;
368 
369 	if (IS_ERR_OR_NULL(dfs_dir_mtd))
370 		return;
371 
372 	root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
373 	mtd->dbg.dfs_dir = root;
374 
375 	if (master->dbg.partid)
376 		debugfs_create_file("partid", 0400, root, master,
377 				    &mtd_partid_debug_fops);
378 
379 	if (master->dbg.partname)
380 		debugfs_create_file("partname", 0400, root, master,
381 				    &mtd_partname_debug_fops);
382 }
383 
384 #ifndef CONFIG_MMU
mtd_mmap_capabilities(struct mtd_info * mtd)385 unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
386 {
387 	switch (mtd->type) {
388 	case MTD_RAM:
389 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
390 			NOMMU_MAP_READ | NOMMU_MAP_WRITE;
391 	case MTD_ROM:
392 		return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
393 			NOMMU_MAP_READ;
394 	default:
395 		return NOMMU_MAP_COPY;
396 	}
397 }
398 EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
399 #endif
400 
mtd_reboot_notifier(struct notifier_block * n,unsigned long state,void * cmd)401 static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
402 			       void *cmd)
403 {
404 	struct mtd_info *mtd;
405 
406 	mtd = container_of(n, struct mtd_info, reboot_notifier);
407 	mtd->_reboot(mtd);
408 
409 	return NOTIFY_DONE;
410 }
411 
412 /**
413  * mtd_wunit_to_pairing_info - get pairing information of a wunit
414  * @mtd: pointer to new MTD device info structure
415  * @wunit: write unit we are interested in
416  * @info: returned pairing information
417  *
418  * Retrieve pairing information associated to the wunit.
419  * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
420  * paired together, and where programming a page may influence the page it is
421  * paired with.
422  * The notion of page is replaced by the term wunit (write-unit) to stay
423  * consistent with the ->writesize field.
424  *
425  * The @wunit argument can be extracted from an absolute offset using
426  * mtd_offset_to_wunit(). @info is filled with the pairing information attached
427  * to @wunit.
428  *
429  * From the pairing info the MTD user can find all the wunits paired with
430  * @wunit using the following loop:
431  *
432  * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
433  *	info.pair = i;
434  *	mtd_pairing_info_to_wunit(mtd, &info);
435  *	...
436  * }
437  */
mtd_wunit_to_pairing_info(struct mtd_info * mtd,int wunit,struct mtd_pairing_info * info)438 int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
439 			      struct mtd_pairing_info *info)
440 {
441 	struct mtd_info *master = mtd_get_master(mtd);
442 	int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
443 
444 	if (wunit < 0 || wunit >= npairs)
445 		return -EINVAL;
446 
447 	if (master->pairing && master->pairing->get_info)
448 		return master->pairing->get_info(master, wunit, info);
449 
450 	info->group = 0;
451 	info->pair = wunit;
452 
453 	return 0;
454 }
455 EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
456 
457 /**
458  * mtd_pairing_info_to_wunit - get wunit from pairing information
459  * @mtd: pointer to new MTD device info structure
460  * @info: pairing information struct
461  *
462  * Returns a positive number representing the wunit associated to the info
463  * struct, or a negative error code.
464  *
465  * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
466  * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
467  * doc).
468  *
469  * It can also be used to only program the first page of each pair (i.e.
470  * page attached to group 0), which allows one to use an MLC NAND in
471  * software-emulated SLC mode:
472  *
473  * info.group = 0;
474  * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
475  * for (info.pair = 0; info.pair < npairs; info.pair++) {
476  *	wunit = mtd_pairing_info_to_wunit(mtd, &info);
477  *	mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
478  *		  mtd->writesize, &retlen, buf + (i * mtd->writesize));
479  * }
480  */
mtd_pairing_info_to_wunit(struct mtd_info * mtd,const struct mtd_pairing_info * info)481 int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
482 			      const struct mtd_pairing_info *info)
483 {
484 	struct mtd_info *master = mtd_get_master(mtd);
485 	int ngroups = mtd_pairing_groups(master);
486 	int npairs = mtd_wunit_per_eb(master) / ngroups;
487 
488 	if (!info || info->pair < 0 || info->pair >= npairs ||
489 	    info->group < 0 || info->group >= ngroups)
490 		return -EINVAL;
491 
492 	if (master->pairing && master->pairing->get_wunit)
493 		return mtd->pairing->get_wunit(master, info);
494 
495 	return info->pair;
496 }
497 EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
498 
499 /**
500  * mtd_pairing_groups - get the number of pairing groups
501  * @mtd: pointer to new MTD device info structure
502  *
503  * Returns the number of pairing groups.
504  *
505  * This number is usually equal to the number of bits exposed by a single
506  * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
507  * to iterate over all pages of a given pair.
508  */
mtd_pairing_groups(struct mtd_info * mtd)509 int mtd_pairing_groups(struct mtd_info *mtd)
510 {
511 	struct mtd_info *master = mtd_get_master(mtd);
512 
513 	if (!master->pairing || !master->pairing->ngroups)
514 		return 1;
515 
516 	return master->pairing->ngroups;
517 }
518 EXPORT_SYMBOL_GPL(mtd_pairing_groups);
519 
mtd_nvmem_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)520 static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
521 			      void *val, size_t bytes)
522 {
523 	struct mtd_info *mtd = priv;
524 	size_t retlen;
525 	int err;
526 
527 	err = mtd_read(mtd, offset, bytes, &retlen, val);
528 	if (err && err != -EUCLEAN)
529 		return err;
530 
531 	return retlen == bytes ? 0 : -EIO;
532 }
533 
mtd_nvmem_add(struct mtd_info * mtd)534 static int mtd_nvmem_add(struct mtd_info *mtd)
535 {
536 	struct device_node *node = mtd_get_of_node(mtd);
537 	struct nvmem_config config = {};
538 
539 	config.id = -1;
540 	config.dev = &mtd->dev;
541 	config.name = dev_name(&mtd->dev);
542 	config.owner = THIS_MODULE;
543 	config.reg_read = mtd_nvmem_reg_read;
544 	config.size = mtd->size;
545 	config.word_size = 1;
546 	config.stride = 1;
547 	config.read_only = true;
548 	config.root_only = true;
549 	config.ignore_wp = true;
550 	config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
551 	config.priv = mtd;
552 
553 	mtd->nvmem = nvmem_register(&config);
554 	if (IS_ERR(mtd->nvmem)) {
555 		/* Just ignore if there is no NVMEM support in the kernel */
556 		if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
557 			mtd->nvmem = NULL;
558 		} else {
559 			dev_err(&mtd->dev, "Failed to register NVMEM device\n");
560 			return PTR_ERR(mtd->nvmem);
561 		}
562 	}
563 
564 	return 0;
565 }
566 
567 /**
568  *	add_mtd_device - register an MTD device
569  *	@mtd: pointer to new MTD device info structure
570  *
571  *	Add a device to the list of MTD devices present in the system, and
572  *	notify each currently active MTD 'user' of its arrival. Returns
573  *	zero on success or non-zero on failure.
574  */
575 
add_mtd_device(struct mtd_info * mtd)576 int add_mtd_device(struct mtd_info *mtd)
577 {
578 	struct mtd_info *master = mtd_get_master(mtd);
579 	struct mtd_notifier *not;
580 	int i, error;
581 
582 	/*
583 	 * May occur, for instance, on buggy drivers which call
584 	 * mtd_device_parse_register() multiple times on the same master MTD,
585 	 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
586 	 */
587 	if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
588 		return -EEXIST;
589 
590 	BUG_ON(mtd->writesize == 0);
591 
592 	/*
593 	 * MTD drivers should implement ->_{write,read}() or
594 	 * ->_{write,read}_oob(), but not both.
595 	 */
596 	if (WARN_ON((mtd->_write && mtd->_write_oob) ||
597 		    (mtd->_read && mtd->_read_oob)))
598 		return -EINVAL;
599 
600 	if (WARN_ON((!mtd->erasesize || !master->_erase) &&
601 		    !(mtd->flags & MTD_NO_ERASE)))
602 		return -EINVAL;
603 
604 	/*
605 	 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
606 	 * master is an MLC NAND and has a proper pairing scheme defined.
607 	 * We also reject masters that implement ->_writev() for now, because
608 	 * NAND controller drivers don't implement this hook, and adding the
609 	 * SLC -> MLC address/length conversion to this path is useless if we
610 	 * don't have a user.
611 	 */
612 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
613 	    (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
614 	     !master->pairing || master->_writev))
615 		return -EINVAL;
616 
617 	mutex_lock(&mtd_table_mutex);
618 
619 	i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
620 	if (i < 0) {
621 		error = i;
622 		goto fail_locked;
623 	}
624 
625 	mtd->index = i;
626 	mtd->usecount = 0;
627 
628 	/* default value if not set by driver */
629 	if (mtd->bitflip_threshold == 0)
630 		mtd->bitflip_threshold = mtd->ecc_strength;
631 
632 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
633 		int ngroups = mtd_pairing_groups(master);
634 
635 		mtd->erasesize /= ngroups;
636 		mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
637 			    mtd->erasesize;
638 	}
639 
640 	if (is_power_of_2(mtd->erasesize))
641 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
642 	else
643 		mtd->erasesize_shift = 0;
644 
645 	if (is_power_of_2(mtd->writesize))
646 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
647 	else
648 		mtd->writesize_shift = 0;
649 
650 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
651 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
652 
653 	/* Some chips always power up locked. Unlock them now */
654 	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
655 		error = mtd_unlock(mtd, 0, mtd->size);
656 		if (error && error != -EOPNOTSUPP)
657 			printk(KERN_WARNING
658 			       "%s: unlock failed, writes may not work\n",
659 			       mtd->name);
660 		/* Ignore unlock failures? */
661 		error = 0;
662 	}
663 
664 	/* Caller should have set dev.parent to match the
665 	 * physical device, if appropriate.
666 	 */
667 	mtd->dev.type = &mtd_devtype;
668 	mtd->dev.class = &mtd_class;
669 	mtd->dev.devt = MTD_DEVT(i);
670 	dev_set_name(&mtd->dev, "mtd%d", i);
671 	dev_set_drvdata(&mtd->dev, mtd);
672 	of_node_get(mtd_get_of_node(mtd));
673 	error = device_register(&mtd->dev);
674 	if (error) {
675 		put_device(&mtd->dev);
676 		goto fail_added;
677 	}
678 
679 	/* Add the nvmem provider */
680 	error = mtd_nvmem_add(mtd);
681 	if (error)
682 		goto fail_nvmem_add;
683 
684 	mtd_debugfs_populate(mtd);
685 
686 	device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
687 		      "mtd%dro", i);
688 
689 	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
690 	/* No need to get a refcount on the module containing
691 	   the notifier, since we hold the mtd_table_mutex */
692 	list_for_each_entry(not, &mtd_notifiers, list)
693 		not->add(mtd);
694 
695 	mutex_unlock(&mtd_table_mutex);
696 	/* We _know_ we aren't being removed, because
697 	   our caller is still holding us here. So none
698 	   of this try_ nonsense, and no bitching about it
699 	   either. :) */
700 	__module_get(THIS_MODULE);
701 	return 0;
702 
703 fail_nvmem_add:
704 	device_unregister(&mtd->dev);
705 fail_added:
706 	of_node_put(mtd_get_of_node(mtd));
707 	idr_remove(&mtd_idr, i);
708 fail_locked:
709 	mutex_unlock(&mtd_table_mutex);
710 	return error;
711 }
712 
713 /**
714  *	del_mtd_device - unregister an MTD device
715  *	@mtd: pointer to MTD device info structure
716  *
717  *	Remove a device from the list of MTD devices present in the system,
718  *	and notify each currently active MTD 'user' of its departure.
719  *	Returns zero on success or 1 on failure, which currently will happen
720  *	if the requested device does not appear to be present in the list.
721  */
722 
del_mtd_device(struct mtd_info * mtd)723 int del_mtd_device(struct mtd_info *mtd)
724 {
725 	int ret;
726 	struct mtd_notifier *not;
727 
728 	mutex_lock(&mtd_table_mutex);
729 
730 	if (idr_find(&mtd_idr, mtd->index) != mtd) {
731 		ret = -ENODEV;
732 		goto out_error;
733 	}
734 
735 	/* No need to get a refcount on the module containing
736 		the notifier, since we hold the mtd_table_mutex */
737 	list_for_each_entry(not, &mtd_notifiers, list)
738 		not->remove(mtd);
739 
740 	if (mtd->usecount) {
741 		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
742 		       mtd->index, mtd->name, mtd->usecount);
743 		ret = -EBUSY;
744 	} else {
745 		debugfs_remove_recursive(mtd->dbg.dfs_dir);
746 
747 		/* Try to remove the NVMEM provider */
748 		if (mtd->nvmem)
749 			nvmem_unregister(mtd->nvmem);
750 
751 		device_unregister(&mtd->dev);
752 
753 		idr_remove(&mtd_idr, mtd->index);
754 		of_node_put(mtd_get_of_node(mtd));
755 
756 		module_put(THIS_MODULE);
757 		ret = 0;
758 	}
759 
760 out_error:
761 	mutex_unlock(&mtd_table_mutex);
762 	return ret;
763 }
764 
765 /*
766  * Set a few defaults based on the parent devices, if not provided by the
767  * driver
768  */
mtd_set_dev_defaults(struct mtd_info * mtd)769 static void mtd_set_dev_defaults(struct mtd_info *mtd)
770 {
771 	if (mtd->dev.parent) {
772 		if (!mtd->owner && mtd->dev.parent->driver)
773 			mtd->owner = mtd->dev.parent->driver->owner;
774 		if (!mtd->name)
775 			mtd->name = dev_name(mtd->dev.parent);
776 	} else {
777 		pr_debug("mtd device won't show a device symlink in sysfs\n");
778 	}
779 
780 	INIT_LIST_HEAD(&mtd->partitions);
781 	mutex_init(&mtd->master.partitions_lock);
782 	mutex_init(&mtd->master.chrdev_lock);
783 }
784 
mtd_otp_size(struct mtd_info * mtd,bool is_user)785 static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
786 {
787 	struct otp_info *info;
788 	ssize_t size = 0;
789 	unsigned int i;
790 	size_t retlen;
791 	int ret;
792 
793 	info = kmalloc(PAGE_SIZE, GFP_KERNEL);
794 	if (!info)
795 		return -ENOMEM;
796 
797 	if (is_user)
798 		ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
799 	else
800 		ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
801 	if (ret)
802 		goto err;
803 
804 	for (i = 0; i < retlen / sizeof(*info); i++)
805 		size += info[i].length;
806 
807 	kfree(info);
808 	return size;
809 
810 err:
811 	kfree(info);
812 
813 	/* ENODATA means there is no OTP region. */
814 	return ret == -ENODATA ? 0 : ret;
815 }
816 
mtd_otp_nvmem_register(struct mtd_info * mtd,const char * compatible,int size,nvmem_reg_read_t reg_read)817 static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
818 						   const char *compatible,
819 						   int size,
820 						   nvmem_reg_read_t reg_read)
821 {
822 	struct nvmem_device *nvmem = NULL;
823 	struct nvmem_config config = {};
824 	struct device_node *np;
825 
826 	/* DT binding is optional */
827 	np = of_get_compatible_child(mtd->dev.of_node, compatible);
828 
829 	/* OTP nvmem will be registered on the physical device */
830 	config.dev = mtd->dev.parent;
831 	config.name = compatible;
832 	config.id = NVMEM_DEVID_AUTO;
833 	config.owner = THIS_MODULE;
834 	config.type = NVMEM_TYPE_OTP;
835 	config.root_only = true;
836 	config.ignore_wp = true;
837 	config.reg_read = reg_read;
838 	config.size = size;
839 	config.of_node = np;
840 	config.priv = mtd;
841 
842 	nvmem = nvmem_register(&config);
843 	/* Just ignore if there is no NVMEM support in the kernel */
844 	if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
845 		nvmem = NULL;
846 
847 	of_node_put(np);
848 
849 	return nvmem;
850 }
851 
mtd_nvmem_user_otp_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)852 static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
853 				       void *val, size_t bytes)
854 {
855 	struct mtd_info *mtd = priv;
856 	size_t retlen;
857 	int ret;
858 
859 	ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
860 	if (ret)
861 		return ret;
862 
863 	return retlen == bytes ? 0 : -EIO;
864 }
865 
mtd_nvmem_fact_otp_reg_read(void * priv,unsigned int offset,void * val,size_t bytes)866 static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
867 				       void *val, size_t bytes)
868 {
869 	struct mtd_info *mtd = priv;
870 	size_t retlen;
871 	int ret;
872 
873 	ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
874 	if (ret)
875 		return ret;
876 
877 	return retlen == bytes ? 0 : -EIO;
878 }
879 
mtd_otp_nvmem_add(struct mtd_info * mtd)880 static int mtd_otp_nvmem_add(struct mtd_info *mtd)
881 {
882 	struct device *dev = mtd->dev.parent;
883 	struct nvmem_device *nvmem;
884 	ssize_t size;
885 	int err;
886 
887 	if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
888 		size = mtd_otp_size(mtd, true);
889 		if (size < 0)
890 			return size;
891 
892 		if (size > 0) {
893 			nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
894 						       mtd_nvmem_user_otp_reg_read);
895 			if (IS_ERR(nvmem)) {
896 				dev_err(dev, "Failed to register OTP NVMEM device\n");
897 				return PTR_ERR(nvmem);
898 			}
899 			mtd->otp_user_nvmem = nvmem;
900 		}
901 	}
902 
903 	if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
904 		size = mtd_otp_size(mtd, false);
905 		if (size < 0) {
906 			err = size;
907 			goto err;
908 		}
909 
910 		if (size > 0) {
911 			nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
912 						       mtd_nvmem_fact_otp_reg_read);
913 			if (IS_ERR(nvmem)) {
914 				dev_err(dev, "Failed to register OTP NVMEM device\n");
915 				err = PTR_ERR(nvmem);
916 				goto err;
917 			}
918 			mtd->otp_factory_nvmem = nvmem;
919 		}
920 	}
921 
922 	return 0;
923 
924 err:
925 	if (mtd->otp_user_nvmem)
926 		nvmem_unregister(mtd->otp_user_nvmem);
927 	return err;
928 }
929 
930 /**
931  * mtd_device_parse_register - parse partitions and register an MTD device.
932  *
933  * @mtd: the MTD device to register
934  * @types: the list of MTD partition probes to try, see
935  *         'parse_mtd_partitions()' for more information
936  * @parser_data: MTD partition parser-specific data
937  * @parts: fallback partition information to register, if parsing fails;
938  *         only valid if %nr_parts > %0
939  * @nr_parts: the number of partitions in parts, if zero then the full
940  *            MTD device is registered if no partition info is found
941  *
942  * This function aggregates MTD partitions parsing (done by
943  * 'parse_mtd_partitions()') and MTD device and partitions registering. It
944  * basically follows the most common pattern found in many MTD drivers:
945  *
946  * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
947  *   registered first.
948  * * Then It tries to probe partitions on MTD device @mtd using parsers
949  *   specified in @types (if @types is %NULL, then the default list of parsers
950  *   is used, see 'parse_mtd_partitions()' for more information). If none are
951  *   found this functions tries to fallback to information specified in
952  *   @parts/@nr_parts.
953  * * If no partitions were found this function just registers the MTD device
954  *   @mtd and exits.
955  *
956  * Returns zero in case of success and a negative error code in case of failure.
957  */
mtd_device_parse_register(struct mtd_info * mtd,const char * const * types,struct mtd_part_parser_data * parser_data,const struct mtd_partition * parts,int nr_parts)958 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
959 			      struct mtd_part_parser_data *parser_data,
960 			      const struct mtd_partition *parts,
961 			      int nr_parts)
962 {
963 	int ret;
964 
965 	mtd_set_dev_defaults(mtd);
966 
967 	ret = mtd_otp_nvmem_add(mtd);
968 	if (ret)
969 		goto out;
970 
971 	if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
972 		ret = add_mtd_device(mtd);
973 		if (ret)
974 			goto out;
975 	}
976 
977 	/* Prefer parsed partitions over driver-provided fallback */
978 	ret = parse_mtd_partitions(mtd, types, parser_data);
979 	if (ret == -EPROBE_DEFER)
980 		goto out;
981 
982 	if (ret > 0)
983 		ret = 0;
984 	else if (nr_parts)
985 		ret = add_mtd_partitions(mtd, parts, nr_parts);
986 	else if (!device_is_registered(&mtd->dev))
987 		ret = add_mtd_device(mtd);
988 	else
989 		ret = 0;
990 
991 	if (ret)
992 		goto out;
993 
994 	/*
995 	 * FIXME: some drivers unfortunately call this function more than once.
996 	 * So we have to check if we've already assigned the reboot notifier.
997 	 *
998 	 * Generally, we can make multiple calls work for most cases, but it
999 	 * does cause problems with parse_mtd_partitions() above (e.g.,
1000 	 * cmdlineparts will register partitions more than once).
1001 	 */
1002 	WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1003 		  "MTD already registered\n");
1004 	if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1005 		mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1006 		register_reboot_notifier(&mtd->reboot_notifier);
1007 	}
1008 
1009 out:
1010 	if (ret) {
1011 		nvmem_unregister(mtd->otp_user_nvmem);
1012 		nvmem_unregister(mtd->otp_factory_nvmem);
1013 	}
1014 
1015 	if (ret && device_is_registered(&mtd->dev))
1016 		del_mtd_device(mtd);
1017 
1018 	return ret;
1019 }
1020 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1021 
1022 /**
1023  * mtd_device_unregister - unregister an existing MTD device.
1024  *
1025  * @master: the MTD device to unregister.  This will unregister both the master
1026  *          and any partitions if registered.
1027  */
mtd_device_unregister(struct mtd_info * master)1028 int mtd_device_unregister(struct mtd_info *master)
1029 {
1030 	int err;
1031 
1032 	if (master->_reboot)
1033 		unregister_reboot_notifier(&master->reboot_notifier);
1034 
1035 	if (master->otp_user_nvmem)
1036 		nvmem_unregister(master->otp_user_nvmem);
1037 
1038 	if (master->otp_factory_nvmem)
1039 		nvmem_unregister(master->otp_factory_nvmem);
1040 
1041 	err = del_mtd_partitions(master);
1042 	if (err)
1043 		return err;
1044 
1045 	if (!device_is_registered(&master->dev))
1046 		return 0;
1047 
1048 	return del_mtd_device(master);
1049 }
1050 EXPORT_SYMBOL_GPL(mtd_device_unregister);
1051 
1052 /**
1053  *	register_mtd_user - register a 'user' of MTD devices.
1054  *	@new: pointer to notifier info structure
1055  *
1056  *	Registers a pair of callbacks function to be called upon addition
1057  *	or removal of MTD devices. Causes the 'add' callback to be immediately
1058  *	invoked for each MTD device currently present in the system.
1059  */
register_mtd_user(struct mtd_notifier * new)1060 void register_mtd_user (struct mtd_notifier *new)
1061 {
1062 	struct mtd_info *mtd;
1063 
1064 	mutex_lock(&mtd_table_mutex);
1065 
1066 	list_add(&new->list, &mtd_notifiers);
1067 
1068 	__module_get(THIS_MODULE);
1069 
1070 	mtd_for_each_device(mtd)
1071 		new->add(mtd);
1072 
1073 	mutex_unlock(&mtd_table_mutex);
1074 }
1075 EXPORT_SYMBOL_GPL(register_mtd_user);
1076 
1077 /**
1078  *	unregister_mtd_user - unregister a 'user' of MTD devices.
1079  *	@old: pointer to notifier info structure
1080  *
1081  *	Removes a callback function pair from the list of 'users' to be
1082  *	notified upon addition or removal of MTD devices. Causes the
1083  *	'remove' callback to be immediately invoked for each MTD device
1084  *	currently present in the system.
1085  */
unregister_mtd_user(struct mtd_notifier * old)1086 int unregister_mtd_user (struct mtd_notifier *old)
1087 {
1088 	struct mtd_info *mtd;
1089 
1090 	mutex_lock(&mtd_table_mutex);
1091 
1092 	module_put(THIS_MODULE);
1093 
1094 	mtd_for_each_device(mtd)
1095 		old->remove(mtd);
1096 
1097 	list_del(&old->list);
1098 	mutex_unlock(&mtd_table_mutex);
1099 	return 0;
1100 }
1101 EXPORT_SYMBOL_GPL(unregister_mtd_user);
1102 
1103 /**
1104  *	get_mtd_device - obtain a validated handle for an MTD device
1105  *	@mtd: last known address of the required MTD device
1106  *	@num: internal device number of the required MTD device
1107  *
1108  *	Given a number and NULL address, return the num'th entry in the device
1109  *	table, if any.	Given an address and num == -1, search the device table
1110  *	for a device with that address and return if it's still present. Given
1111  *	both, return the num'th driver only if its address matches. Return
1112  *	error code if not.
1113  */
get_mtd_device(struct mtd_info * mtd,int num)1114 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1115 {
1116 	struct mtd_info *ret = NULL, *other;
1117 	int err = -ENODEV;
1118 
1119 	mutex_lock(&mtd_table_mutex);
1120 
1121 	if (num == -1) {
1122 		mtd_for_each_device(other) {
1123 			if (other == mtd) {
1124 				ret = mtd;
1125 				break;
1126 			}
1127 		}
1128 	} else if (num >= 0) {
1129 		ret = idr_find(&mtd_idr, num);
1130 		if (mtd && mtd != ret)
1131 			ret = NULL;
1132 	}
1133 
1134 	if (!ret) {
1135 		ret = ERR_PTR(err);
1136 		goto out;
1137 	}
1138 
1139 	err = __get_mtd_device(ret);
1140 	if (err)
1141 		ret = ERR_PTR(err);
1142 out:
1143 	mutex_unlock(&mtd_table_mutex);
1144 	return ret;
1145 }
1146 EXPORT_SYMBOL_GPL(get_mtd_device);
1147 
1148 
__get_mtd_device(struct mtd_info * mtd)1149 int __get_mtd_device(struct mtd_info *mtd)
1150 {
1151 	struct mtd_info *master = mtd_get_master(mtd);
1152 	int err;
1153 
1154 	if (!try_module_get(master->owner))
1155 		return -ENODEV;
1156 
1157 	if (master->_get_device) {
1158 		err = master->_get_device(mtd);
1159 
1160 		if (err) {
1161 			module_put(master->owner);
1162 			return err;
1163 		}
1164 	}
1165 
1166 	master->usecount++;
1167 
1168 	while (mtd->parent) {
1169 		mtd->usecount++;
1170 		mtd = mtd->parent;
1171 	}
1172 
1173 	return 0;
1174 }
1175 EXPORT_SYMBOL_GPL(__get_mtd_device);
1176 
1177 /**
1178  *	get_mtd_device_nm - obtain a validated handle for an MTD device by
1179  *	device name
1180  *	@name: MTD device name to open
1181  *
1182  * 	This function returns MTD device description structure in case of
1183  * 	success and an error code in case of failure.
1184  */
get_mtd_device_nm(const char * name)1185 struct mtd_info *get_mtd_device_nm(const char *name)
1186 {
1187 	int err = -ENODEV;
1188 	struct mtd_info *mtd = NULL, *other;
1189 
1190 	mutex_lock(&mtd_table_mutex);
1191 
1192 	mtd_for_each_device(other) {
1193 		if (!strcmp(name, other->name)) {
1194 			mtd = other;
1195 			break;
1196 		}
1197 	}
1198 
1199 	if (!mtd)
1200 		goto out_unlock;
1201 
1202 	err = __get_mtd_device(mtd);
1203 	if (err)
1204 		goto out_unlock;
1205 
1206 	mutex_unlock(&mtd_table_mutex);
1207 	return mtd;
1208 
1209 out_unlock:
1210 	mutex_unlock(&mtd_table_mutex);
1211 	return ERR_PTR(err);
1212 }
1213 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1214 
put_mtd_device(struct mtd_info * mtd)1215 void put_mtd_device(struct mtd_info *mtd)
1216 {
1217 	mutex_lock(&mtd_table_mutex);
1218 	__put_mtd_device(mtd);
1219 	mutex_unlock(&mtd_table_mutex);
1220 
1221 }
1222 EXPORT_SYMBOL_GPL(put_mtd_device);
1223 
__put_mtd_device(struct mtd_info * mtd)1224 void __put_mtd_device(struct mtd_info *mtd)
1225 {
1226 	struct mtd_info *master = mtd_get_master(mtd);
1227 
1228 	while (mtd->parent) {
1229 		--mtd->usecount;
1230 		BUG_ON(mtd->usecount < 0);
1231 		mtd = mtd->parent;
1232 	}
1233 
1234 	master->usecount--;
1235 
1236 	if (master->_put_device)
1237 		master->_put_device(master);
1238 
1239 	module_put(master->owner);
1240 }
1241 EXPORT_SYMBOL_GPL(__put_mtd_device);
1242 
1243 /*
1244  * Erase is an synchronous operation. Device drivers are epected to return a
1245  * negative error code if the operation failed and update instr->fail_addr
1246  * to point the portion that was not properly erased.
1247  */
mtd_erase(struct mtd_info * mtd,struct erase_info * instr)1248 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1249 {
1250 	struct mtd_info *master = mtd_get_master(mtd);
1251 	u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1252 	struct erase_info adjinstr;
1253 	int ret;
1254 
1255 	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1256 	adjinstr = *instr;
1257 
1258 	if (!mtd->erasesize || !master->_erase)
1259 		return -ENOTSUPP;
1260 
1261 	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1262 		return -EINVAL;
1263 	if (!(mtd->flags & MTD_WRITEABLE))
1264 		return -EROFS;
1265 
1266 	if (!instr->len)
1267 		return 0;
1268 
1269 	ledtrig_mtd_activity();
1270 
1271 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1272 		adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1273 				master->erasesize;
1274 		adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1275 				master->erasesize) -
1276 			       adjinstr.addr;
1277 	}
1278 
1279 	adjinstr.addr += mst_ofs;
1280 
1281 	ret = master->_erase(master, &adjinstr);
1282 
1283 	if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1284 		instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1285 		if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1286 			instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1287 							 master);
1288 			instr->fail_addr *= mtd->erasesize;
1289 		}
1290 	}
1291 
1292 	return ret;
1293 }
1294 EXPORT_SYMBOL_GPL(mtd_erase);
1295 
1296 /*
1297  * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1298  */
mtd_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)1299 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1300 	      void **virt, resource_size_t *phys)
1301 {
1302 	struct mtd_info *master = mtd_get_master(mtd);
1303 
1304 	*retlen = 0;
1305 	*virt = NULL;
1306 	if (phys)
1307 		*phys = 0;
1308 	if (!master->_point)
1309 		return -EOPNOTSUPP;
1310 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1311 		return -EINVAL;
1312 	if (!len)
1313 		return 0;
1314 
1315 	from = mtd_get_master_ofs(mtd, from);
1316 	return master->_point(master, from, len, retlen, virt, phys);
1317 }
1318 EXPORT_SYMBOL_GPL(mtd_point);
1319 
1320 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
mtd_unpoint(struct mtd_info * mtd,loff_t from,size_t len)1321 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1322 {
1323 	struct mtd_info *master = mtd_get_master(mtd);
1324 
1325 	if (!master->_unpoint)
1326 		return -EOPNOTSUPP;
1327 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
1328 		return -EINVAL;
1329 	if (!len)
1330 		return 0;
1331 	return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1332 }
1333 EXPORT_SYMBOL_GPL(mtd_unpoint);
1334 
1335 /*
1336  * Allow NOMMU mmap() to directly map the device (if not NULL)
1337  * - return the address to which the offset maps
1338  * - return -ENOSYS to indicate refusal to do the mapping
1339  */
mtd_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)1340 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1341 				    unsigned long offset, unsigned long flags)
1342 {
1343 	size_t retlen;
1344 	void *virt;
1345 	int ret;
1346 
1347 	ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1348 	if (ret)
1349 		return ret;
1350 	if (retlen != len) {
1351 		mtd_unpoint(mtd, offset, retlen);
1352 		return -ENOSYS;
1353 	}
1354 	return (unsigned long)virt;
1355 }
1356 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1357 
mtd_update_ecc_stats(struct mtd_info * mtd,struct mtd_info * master,const struct mtd_ecc_stats * old_stats)1358 static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1359 				 const struct mtd_ecc_stats *old_stats)
1360 {
1361 	struct mtd_ecc_stats diff;
1362 
1363 	if (master == mtd)
1364 		return;
1365 
1366 	diff = master->ecc_stats;
1367 	diff.failed -= old_stats->failed;
1368 	diff.corrected -= old_stats->corrected;
1369 
1370 	while (mtd->parent) {
1371 		mtd->ecc_stats.failed += diff.failed;
1372 		mtd->ecc_stats.corrected += diff.corrected;
1373 		mtd = mtd->parent;
1374 	}
1375 }
1376 
mtd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)1377 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1378 	     u_char *buf)
1379 {
1380 	struct mtd_oob_ops ops = {
1381 		.len = len,
1382 		.datbuf = buf,
1383 	};
1384 	int ret;
1385 
1386 	ret = mtd_read_oob(mtd, from, &ops);
1387 	*retlen = ops.retlen;
1388 
1389 	return ret;
1390 }
1391 EXPORT_SYMBOL_GPL(mtd_read);
1392 
mtd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1393 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1394 	      const u_char *buf)
1395 {
1396 	struct mtd_oob_ops ops = {
1397 		.len = len,
1398 		.datbuf = (u8 *)buf,
1399 	};
1400 	int ret;
1401 
1402 	ret = mtd_write_oob(mtd, to, &ops);
1403 	*retlen = ops.retlen;
1404 
1405 	return ret;
1406 }
1407 EXPORT_SYMBOL_GPL(mtd_write);
1408 
1409 /*
1410  * In blackbox flight recorder like scenarios we want to make successful writes
1411  * in interrupt context. panic_write() is only intended to be called when its
1412  * known the kernel is about to panic and we need the write to succeed. Since
1413  * the kernel is not going to be running for much longer, this function can
1414  * break locks and delay to ensure the write succeeds (but not sleep).
1415  */
mtd_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1416 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1417 		    const u_char *buf)
1418 {
1419 	struct mtd_info *master = mtd_get_master(mtd);
1420 
1421 	*retlen = 0;
1422 	if (!master->_panic_write)
1423 		return -EOPNOTSUPP;
1424 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
1425 		return -EINVAL;
1426 	if (!(mtd->flags & MTD_WRITEABLE))
1427 		return -EROFS;
1428 	if (!len)
1429 		return 0;
1430 	if (!master->oops_panic_write)
1431 		master->oops_panic_write = true;
1432 
1433 	return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1434 				    retlen, buf);
1435 }
1436 EXPORT_SYMBOL_GPL(mtd_panic_write);
1437 
mtd_check_oob_ops(struct mtd_info * mtd,loff_t offs,struct mtd_oob_ops * ops)1438 static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1439 			     struct mtd_oob_ops *ops)
1440 {
1441 	/*
1442 	 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1443 	 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1444 	 *  this case.
1445 	 */
1446 	if (!ops->datbuf)
1447 		ops->len = 0;
1448 
1449 	if (!ops->oobbuf)
1450 		ops->ooblen = 0;
1451 
1452 	if (offs < 0 || offs + ops->len > mtd->size)
1453 		return -EINVAL;
1454 
1455 	if (ops->ooblen) {
1456 		size_t maxooblen;
1457 
1458 		if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1459 			return -EINVAL;
1460 
1461 		maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1462 				      mtd_div_by_ws(offs, mtd)) *
1463 			     mtd_oobavail(mtd, ops)) - ops->ooboffs;
1464 		if (ops->ooblen > maxooblen)
1465 			return -EINVAL;
1466 	}
1467 
1468 	return 0;
1469 }
1470 
mtd_read_oob_std(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1471 static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1472 			    struct mtd_oob_ops *ops)
1473 {
1474 	struct mtd_info *master = mtd_get_master(mtd);
1475 	int ret;
1476 
1477 	from = mtd_get_master_ofs(mtd, from);
1478 	if (master->_read_oob)
1479 		ret = master->_read_oob(master, from, ops);
1480 	else
1481 		ret = master->_read(master, from, ops->len, &ops->retlen,
1482 				    ops->datbuf);
1483 
1484 	return ret;
1485 }
1486 
mtd_write_oob_std(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)1487 static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1488 			     struct mtd_oob_ops *ops)
1489 {
1490 	struct mtd_info *master = mtd_get_master(mtd);
1491 	int ret;
1492 
1493 	to = mtd_get_master_ofs(mtd, to);
1494 	if (master->_write_oob)
1495 		ret = master->_write_oob(master, to, ops);
1496 	else
1497 		ret = master->_write(master, to, ops->len, &ops->retlen,
1498 				     ops->datbuf);
1499 
1500 	return ret;
1501 }
1502 
mtd_io_emulated_slc(struct mtd_info * mtd,loff_t start,bool read,struct mtd_oob_ops * ops)1503 static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1504 			       struct mtd_oob_ops *ops)
1505 {
1506 	struct mtd_info *master = mtd_get_master(mtd);
1507 	int ngroups = mtd_pairing_groups(master);
1508 	int npairs = mtd_wunit_per_eb(master) / ngroups;
1509 	struct mtd_oob_ops adjops = *ops;
1510 	unsigned int wunit, oobavail;
1511 	struct mtd_pairing_info info;
1512 	int max_bitflips = 0;
1513 	u32 ebofs, pageofs;
1514 	loff_t base, pos;
1515 
1516 	ebofs = mtd_mod_by_eb(start, mtd);
1517 	base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1518 	info.group = 0;
1519 	info.pair = mtd_div_by_ws(ebofs, mtd);
1520 	pageofs = mtd_mod_by_ws(ebofs, mtd);
1521 	oobavail = mtd_oobavail(mtd, ops);
1522 
1523 	while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1524 		int ret;
1525 
1526 		if (info.pair >= npairs) {
1527 			info.pair = 0;
1528 			base += master->erasesize;
1529 		}
1530 
1531 		wunit = mtd_pairing_info_to_wunit(master, &info);
1532 		pos = mtd_wunit_to_offset(mtd, base, wunit);
1533 
1534 		adjops.len = ops->len - ops->retlen;
1535 		if (adjops.len > mtd->writesize - pageofs)
1536 			adjops.len = mtd->writesize - pageofs;
1537 
1538 		adjops.ooblen = ops->ooblen - ops->oobretlen;
1539 		if (adjops.ooblen > oobavail - adjops.ooboffs)
1540 			adjops.ooblen = oobavail - adjops.ooboffs;
1541 
1542 		if (read) {
1543 			ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1544 			if (ret > 0)
1545 				max_bitflips = max(max_bitflips, ret);
1546 		} else {
1547 			ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1548 		}
1549 
1550 		if (ret < 0)
1551 			return ret;
1552 
1553 		max_bitflips = max(max_bitflips, ret);
1554 		ops->retlen += adjops.retlen;
1555 		ops->oobretlen += adjops.oobretlen;
1556 		adjops.datbuf += adjops.retlen;
1557 		adjops.oobbuf += adjops.oobretlen;
1558 		adjops.ooboffs = 0;
1559 		pageofs = 0;
1560 		info.pair++;
1561 	}
1562 
1563 	return max_bitflips;
1564 }
1565 
mtd_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)1566 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1567 {
1568 	struct mtd_info *master = mtd_get_master(mtd);
1569 	struct mtd_ecc_stats old_stats = master->ecc_stats;
1570 	int ret_code;
1571 
1572 	ops->retlen = ops->oobretlen = 0;
1573 
1574 	ret_code = mtd_check_oob_ops(mtd, from, ops);
1575 	if (ret_code)
1576 		return ret_code;
1577 
1578 	ledtrig_mtd_activity();
1579 
1580 	/* Check the validity of a potential fallback on mtd->_read */
1581 	if (!master->_read_oob && (!master->_read || ops->oobbuf))
1582 		return -EOPNOTSUPP;
1583 
1584 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1585 		ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1586 	else
1587 		ret_code = mtd_read_oob_std(mtd, from, ops);
1588 
1589 	mtd_update_ecc_stats(mtd, master, &old_stats);
1590 
1591 	/*
1592 	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1593 	 * similar to mtd->_read(), returning a non-negative integer
1594 	 * representing max bitflips. In other cases, mtd->_read_oob() may
1595 	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1596 	 */
1597 	if (unlikely(ret_code < 0))
1598 		return ret_code;
1599 	if (mtd->ecc_strength == 0)
1600 		return 0;	/* device lacks ecc */
1601 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1602 }
1603 EXPORT_SYMBOL_GPL(mtd_read_oob);
1604 
mtd_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)1605 int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1606 				struct mtd_oob_ops *ops)
1607 {
1608 	struct mtd_info *master = mtd_get_master(mtd);
1609 	int ret;
1610 
1611 	ops->retlen = ops->oobretlen = 0;
1612 
1613 	if (!(mtd->flags & MTD_WRITEABLE))
1614 		return -EROFS;
1615 
1616 	ret = mtd_check_oob_ops(mtd, to, ops);
1617 	if (ret)
1618 		return ret;
1619 
1620 	ledtrig_mtd_activity();
1621 
1622 	/* Check the validity of a potential fallback on mtd->_write */
1623 	if (!master->_write_oob && (!master->_write || ops->oobbuf))
1624 		return -EOPNOTSUPP;
1625 
1626 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1627 		return mtd_io_emulated_slc(mtd, to, false, ops);
1628 
1629 	return mtd_write_oob_std(mtd, to, ops);
1630 }
1631 EXPORT_SYMBOL_GPL(mtd_write_oob);
1632 
1633 /**
1634  * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1635  * @mtd: MTD device structure
1636  * @section: ECC section. Depending on the layout you may have all the ECC
1637  *	     bytes stored in a single contiguous section, or one section
1638  *	     per ECC chunk (and sometime several sections for a single ECC
1639  *	     ECC chunk)
1640  * @oobecc: OOB region struct filled with the appropriate ECC position
1641  *	    information
1642  *
1643  * This function returns ECC section information in the OOB area. If you want
1644  * to get all the ECC bytes information, then you should call
1645  * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1646  *
1647  * Returns zero on success, a negative error code otherwise.
1648  */
mtd_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobecc)1649 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1650 		      struct mtd_oob_region *oobecc)
1651 {
1652 	struct mtd_info *master = mtd_get_master(mtd);
1653 
1654 	memset(oobecc, 0, sizeof(*oobecc));
1655 
1656 	if (!master || section < 0)
1657 		return -EINVAL;
1658 
1659 	if (!master->ooblayout || !master->ooblayout->ecc)
1660 		return -ENOTSUPP;
1661 
1662 	return master->ooblayout->ecc(master, section, oobecc);
1663 }
1664 EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1665 
1666 /**
1667  * mtd_ooblayout_free - Get the OOB region definition of a specific free
1668  *			section
1669  * @mtd: MTD device structure
1670  * @section: Free section you are interested in. Depending on the layout
1671  *	     you may have all the free bytes stored in a single contiguous
1672  *	     section, or one section per ECC chunk plus an extra section
1673  *	     for the remaining bytes (or other funky layout).
1674  * @oobfree: OOB region struct filled with the appropriate free position
1675  *	     information
1676  *
1677  * This function returns free bytes position in the OOB area. If you want
1678  * to get all the free bytes information, then you should call
1679  * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1680  *
1681  * Returns zero on success, a negative error code otherwise.
1682  */
mtd_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobfree)1683 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1684 		       struct mtd_oob_region *oobfree)
1685 {
1686 	struct mtd_info *master = mtd_get_master(mtd);
1687 
1688 	memset(oobfree, 0, sizeof(*oobfree));
1689 
1690 	if (!master || section < 0)
1691 		return -EINVAL;
1692 
1693 	if (!master->ooblayout || !master->ooblayout->free)
1694 		return -ENOTSUPP;
1695 
1696 	return master->ooblayout->free(master, section, oobfree);
1697 }
1698 EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1699 
1700 /**
1701  * mtd_ooblayout_find_region - Find the region attached to a specific byte
1702  * @mtd: mtd info structure
1703  * @byte: the byte we are searching for
1704  * @sectionp: pointer where the section id will be stored
1705  * @oobregion: used to retrieve the ECC position
1706  * @iter: iterator function. Should be either mtd_ooblayout_free or
1707  *	  mtd_ooblayout_ecc depending on the region type you're searching for
1708  *
1709  * This function returns the section id and oobregion information of a
1710  * specific byte. For example, say you want to know where the 4th ECC byte is
1711  * stored, you'll use:
1712  *
1713  * mtd_ooblayout_find_region(mtd, 3, &section, &oobregion, mtd_ooblayout_ecc);
1714  *
1715  * Returns zero on success, a negative error code otherwise.
1716  */
mtd_ooblayout_find_region(struct mtd_info * mtd,int byte,int * sectionp,struct mtd_oob_region * oobregion,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1717 static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1718 				int *sectionp, struct mtd_oob_region *oobregion,
1719 				int (*iter)(struct mtd_info *,
1720 					    int section,
1721 					    struct mtd_oob_region *oobregion))
1722 {
1723 	int pos = 0, ret, section = 0;
1724 
1725 	memset(oobregion, 0, sizeof(*oobregion));
1726 
1727 	while (1) {
1728 		ret = iter(mtd, section, oobregion);
1729 		if (ret)
1730 			return ret;
1731 
1732 		if (pos + oobregion->length > byte)
1733 			break;
1734 
1735 		pos += oobregion->length;
1736 		section++;
1737 	}
1738 
1739 	/*
1740 	 * Adjust region info to make it start at the beginning at the
1741 	 * 'start' ECC byte.
1742 	 */
1743 	oobregion->offset += byte - pos;
1744 	oobregion->length -= byte - pos;
1745 	*sectionp = section;
1746 
1747 	return 0;
1748 }
1749 
1750 /**
1751  * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1752  *				  ECC byte
1753  * @mtd: mtd info structure
1754  * @eccbyte: the byte we are searching for
1755  * @section: pointer where the section id will be stored
1756  * @oobregion: OOB region information
1757  *
1758  * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1759  * byte.
1760  *
1761  * Returns zero on success, a negative error code otherwise.
1762  */
mtd_ooblayout_find_eccregion(struct mtd_info * mtd,int eccbyte,int * section,struct mtd_oob_region * oobregion)1763 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1764 				 int *section,
1765 				 struct mtd_oob_region *oobregion)
1766 {
1767 	return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1768 					 mtd_ooblayout_ecc);
1769 }
1770 EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1771 
1772 /**
1773  * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1774  * @mtd: mtd info structure
1775  * @buf: destination buffer to store OOB bytes
1776  * @oobbuf: OOB buffer
1777  * @start: first byte to retrieve
1778  * @nbytes: number of bytes to retrieve
1779  * @iter: section iterator
1780  *
1781  * Extract bytes attached to a specific category (ECC or free)
1782  * from the OOB buffer and copy them into buf.
1783  *
1784  * Returns zero on success, a negative error code otherwise.
1785  */
mtd_ooblayout_get_bytes(struct mtd_info * mtd,u8 * buf,const u8 * oobbuf,int start,int nbytes,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1786 static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1787 				const u8 *oobbuf, int start, int nbytes,
1788 				int (*iter)(struct mtd_info *,
1789 					    int section,
1790 					    struct mtd_oob_region *oobregion))
1791 {
1792 	struct mtd_oob_region oobregion;
1793 	int section, ret;
1794 
1795 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1796 					&oobregion, iter);
1797 
1798 	while (!ret) {
1799 		int cnt;
1800 
1801 		cnt = min_t(int, nbytes, oobregion.length);
1802 		memcpy(buf, oobbuf + oobregion.offset, cnt);
1803 		buf += cnt;
1804 		nbytes -= cnt;
1805 
1806 		if (!nbytes)
1807 			break;
1808 
1809 		ret = iter(mtd, ++section, &oobregion);
1810 	}
1811 
1812 	return ret;
1813 }
1814 
1815 /**
1816  * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1817  * @mtd: mtd info structure
1818  * @buf: source buffer to get OOB bytes from
1819  * @oobbuf: OOB buffer
1820  * @start: first OOB byte to set
1821  * @nbytes: number of OOB bytes to set
1822  * @iter: section iterator
1823  *
1824  * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1825  * is selected by passing the appropriate iterator.
1826  *
1827  * Returns zero on success, a negative error code otherwise.
1828  */
mtd_ooblayout_set_bytes(struct mtd_info * mtd,const u8 * buf,u8 * oobbuf,int start,int nbytes,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1829 static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1830 				u8 *oobbuf, int start, int nbytes,
1831 				int (*iter)(struct mtd_info *,
1832 					    int section,
1833 					    struct mtd_oob_region *oobregion))
1834 {
1835 	struct mtd_oob_region oobregion;
1836 	int section, ret;
1837 
1838 	ret = mtd_ooblayout_find_region(mtd, start, &section,
1839 					&oobregion, iter);
1840 
1841 	while (!ret) {
1842 		int cnt;
1843 
1844 		cnt = min_t(int, nbytes, oobregion.length);
1845 		memcpy(oobbuf + oobregion.offset, buf, cnt);
1846 		buf += cnt;
1847 		nbytes -= cnt;
1848 
1849 		if (!nbytes)
1850 			break;
1851 
1852 		ret = iter(mtd, ++section, &oobregion);
1853 	}
1854 
1855 	return ret;
1856 }
1857 
1858 /**
1859  * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1860  * @mtd: mtd info structure
1861  * @iter: category iterator
1862  *
1863  * Count the number of bytes in a given category.
1864  *
1865  * Returns a positive value on success, a negative error code otherwise.
1866  */
mtd_ooblayout_count_bytes(struct mtd_info * mtd,int (* iter)(struct mtd_info *,int section,struct mtd_oob_region * oobregion))1867 static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1868 				int (*iter)(struct mtd_info *,
1869 					    int section,
1870 					    struct mtd_oob_region *oobregion))
1871 {
1872 	struct mtd_oob_region oobregion;
1873 	int section = 0, ret, nbytes = 0;
1874 
1875 	while (1) {
1876 		ret = iter(mtd, section++, &oobregion);
1877 		if (ret) {
1878 			if (ret == -ERANGE)
1879 				ret = nbytes;
1880 			break;
1881 		}
1882 
1883 		nbytes += oobregion.length;
1884 	}
1885 
1886 	return ret;
1887 }
1888 
1889 /**
1890  * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1891  * @mtd: mtd info structure
1892  * @eccbuf: destination buffer to store ECC bytes
1893  * @oobbuf: OOB buffer
1894  * @start: first ECC byte to retrieve
1895  * @nbytes: number of ECC bytes to retrieve
1896  *
1897  * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1898  *
1899  * Returns zero on success, a negative error code otherwise.
1900  */
mtd_ooblayout_get_eccbytes(struct mtd_info * mtd,u8 * eccbuf,const u8 * oobbuf,int start,int nbytes)1901 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1902 			       const u8 *oobbuf, int start, int nbytes)
1903 {
1904 	return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1905 				       mtd_ooblayout_ecc);
1906 }
1907 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1908 
1909 /**
1910  * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1911  * @mtd: mtd info structure
1912  * @eccbuf: source buffer to get ECC bytes from
1913  * @oobbuf: OOB buffer
1914  * @start: first ECC byte to set
1915  * @nbytes: number of ECC bytes to set
1916  *
1917  * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1918  *
1919  * Returns zero on success, a negative error code otherwise.
1920  */
mtd_ooblayout_set_eccbytes(struct mtd_info * mtd,const u8 * eccbuf,u8 * oobbuf,int start,int nbytes)1921 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1922 			       u8 *oobbuf, int start, int nbytes)
1923 {
1924 	return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1925 				       mtd_ooblayout_ecc);
1926 }
1927 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1928 
1929 /**
1930  * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
1931  * @mtd: mtd info structure
1932  * @databuf: destination buffer to store ECC bytes
1933  * @oobbuf: OOB buffer
1934  * @start: first ECC byte to retrieve
1935  * @nbytes: number of ECC bytes to retrieve
1936  *
1937  * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1938  *
1939  * Returns zero on success, a negative error code otherwise.
1940  */
mtd_ooblayout_get_databytes(struct mtd_info * mtd,u8 * databuf,const u8 * oobbuf,int start,int nbytes)1941 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1942 				const u8 *oobbuf, int start, int nbytes)
1943 {
1944 	return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1945 				       mtd_ooblayout_free);
1946 }
1947 EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1948 
1949 /**
1950  * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
1951  * @mtd: mtd info structure
1952  * @databuf: source buffer to get data bytes from
1953  * @oobbuf: OOB buffer
1954  * @start: first ECC byte to set
1955  * @nbytes: number of ECC bytes to set
1956  *
1957  * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
1958  *
1959  * Returns zero on success, a negative error code otherwise.
1960  */
mtd_ooblayout_set_databytes(struct mtd_info * mtd,const u8 * databuf,u8 * oobbuf,int start,int nbytes)1961 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1962 				u8 *oobbuf, int start, int nbytes)
1963 {
1964 	return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1965 				       mtd_ooblayout_free);
1966 }
1967 EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1968 
1969 /**
1970  * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
1971  * @mtd: mtd info structure
1972  *
1973  * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
1974  *
1975  * Returns zero on success, a negative error code otherwise.
1976  */
mtd_ooblayout_count_freebytes(struct mtd_info * mtd)1977 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1978 {
1979 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1980 }
1981 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1982 
1983 /**
1984  * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
1985  * @mtd: mtd info structure
1986  *
1987  * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
1988  *
1989  * Returns zero on success, a negative error code otherwise.
1990  */
mtd_ooblayout_count_eccbytes(struct mtd_info * mtd)1991 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1992 {
1993 	return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1994 }
1995 EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1996 
1997 /*
1998  * Method to access the protection register area, present in some flash
1999  * devices. The user data is one time programmable but the factory data is read
2000  * only.
2001  */
mtd_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2002 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2003 			   struct otp_info *buf)
2004 {
2005 	struct mtd_info *master = mtd_get_master(mtd);
2006 
2007 	if (!master->_get_fact_prot_info)
2008 		return -EOPNOTSUPP;
2009 	if (!len)
2010 		return 0;
2011 	return master->_get_fact_prot_info(master, len, retlen, buf);
2012 }
2013 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2014 
mtd_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2015 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2016 			   size_t *retlen, u_char *buf)
2017 {
2018 	struct mtd_info *master = mtd_get_master(mtd);
2019 
2020 	*retlen = 0;
2021 	if (!master->_read_fact_prot_reg)
2022 		return -EOPNOTSUPP;
2023 	if (!len)
2024 		return 0;
2025 	return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2026 }
2027 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2028 
mtd_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)2029 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2030 			   struct otp_info *buf)
2031 {
2032 	struct mtd_info *master = mtd_get_master(mtd);
2033 
2034 	if (!master->_get_user_prot_info)
2035 		return -EOPNOTSUPP;
2036 	if (!len)
2037 		return 0;
2038 	return master->_get_user_prot_info(master, len, retlen, buf);
2039 }
2040 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2041 
mtd_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2042 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2043 			   size_t *retlen, u_char *buf)
2044 {
2045 	struct mtd_info *master = mtd_get_master(mtd);
2046 
2047 	*retlen = 0;
2048 	if (!master->_read_user_prot_reg)
2049 		return -EOPNOTSUPP;
2050 	if (!len)
2051 		return 0;
2052 	return master->_read_user_prot_reg(master, from, len, retlen, buf);
2053 }
2054 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2055 
mtd_write_user_prot_reg(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2056 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2057 			    size_t *retlen, const u_char *buf)
2058 {
2059 	struct mtd_info *master = mtd_get_master(mtd);
2060 	int ret;
2061 
2062 	*retlen = 0;
2063 	if (!master->_write_user_prot_reg)
2064 		return -EOPNOTSUPP;
2065 	if (!len)
2066 		return 0;
2067 	ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2068 	if (ret)
2069 		return ret;
2070 
2071 	/*
2072 	 * If no data could be written at all, we are out of memory and
2073 	 * must return -ENOSPC.
2074 	 */
2075 	return (*retlen) ? 0 : -ENOSPC;
2076 }
2077 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2078 
mtd_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)2079 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2080 {
2081 	struct mtd_info *master = mtd_get_master(mtd);
2082 
2083 	if (!master->_lock_user_prot_reg)
2084 		return -EOPNOTSUPP;
2085 	if (!len)
2086 		return 0;
2087 	return master->_lock_user_prot_reg(master, from, len);
2088 }
2089 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2090 
mtd_erase_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)2091 int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2092 {
2093 	struct mtd_info *master = mtd_get_master(mtd);
2094 
2095 	if (!master->_erase_user_prot_reg)
2096 		return -EOPNOTSUPP;
2097 	if (!len)
2098 		return 0;
2099 	return master->_erase_user_prot_reg(master, from, len);
2100 }
2101 EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2102 
2103 /* Chip-supported device locking */
mtd_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2104 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2105 {
2106 	struct mtd_info *master = mtd_get_master(mtd);
2107 
2108 	if (!master->_lock)
2109 		return -EOPNOTSUPP;
2110 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2111 		return -EINVAL;
2112 	if (!len)
2113 		return 0;
2114 
2115 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2116 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2117 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2118 	}
2119 
2120 	return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2121 }
2122 EXPORT_SYMBOL_GPL(mtd_lock);
2123 
mtd_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)2124 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2125 {
2126 	struct mtd_info *master = mtd_get_master(mtd);
2127 
2128 	if (!master->_unlock)
2129 		return -EOPNOTSUPP;
2130 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2131 		return -EINVAL;
2132 	if (!len)
2133 		return 0;
2134 
2135 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2136 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2137 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2138 	}
2139 
2140 	return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2141 }
2142 EXPORT_SYMBOL_GPL(mtd_unlock);
2143 
mtd_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)2144 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2145 {
2146 	struct mtd_info *master = mtd_get_master(mtd);
2147 
2148 	if (!master->_is_locked)
2149 		return -EOPNOTSUPP;
2150 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2151 		return -EINVAL;
2152 	if (!len)
2153 		return 0;
2154 
2155 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2156 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2157 		len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2158 	}
2159 
2160 	return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2161 }
2162 EXPORT_SYMBOL_GPL(mtd_is_locked);
2163 
mtd_block_isreserved(struct mtd_info * mtd,loff_t ofs)2164 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2165 {
2166 	struct mtd_info *master = mtd_get_master(mtd);
2167 
2168 	if (ofs < 0 || ofs >= mtd->size)
2169 		return -EINVAL;
2170 	if (!master->_block_isreserved)
2171 		return 0;
2172 
2173 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2174 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2175 
2176 	return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2177 }
2178 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2179 
mtd_block_isbad(struct mtd_info * mtd,loff_t ofs)2180 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2181 {
2182 	struct mtd_info *master = mtd_get_master(mtd);
2183 
2184 	if (ofs < 0 || ofs >= mtd->size)
2185 		return -EINVAL;
2186 	if (!master->_block_isbad)
2187 		return 0;
2188 
2189 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2190 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2191 
2192 	return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2193 }
2194 EXPORT_SYMBOL_GPL(mtd_block_isbad);
2195 
mtd_block_markbad(struct mtd_info * mtd,loff_t ofs)2196 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2197 {
2198 	struct mtd_info *master = mtd_get_master(mtd);
2199 	int ret;
2200 
2201 	if (!master->_block_markbad)
2202 		return -EOPNOTSUPP;
2203 	if (ofs < 0 || ofs >= mtd->size)
2204 		return -EINVAL;
2205 	if (!(mtd->flags & MTD_WRITEABLE))
2206 		return -EROFS;
2207 
2208 	if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2209 		ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2210 
2211 	ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2212 	if (ret)
2213 		return ret;
2214 
2215 	while (mtd->parent) {
2216 		mtd->ecc_stats.badblocks++;
2217 		mtd = mtd->parent;
2218 	}
2219 
2220 	return 0;
2221 }
2222 EXPORT_SYMBOL_GPL(mtd_block_markbad);
2223 
2224 /*
2225  * default_mtd_writev - the default writev method
2226  * @mtd: mtd device description object pointer
2227  * @vecs: the vectors to write
2228  * @count: count of vectors in @vecs
2229  * @to: the MTD device offset to write to
2230  * @retlen: on exit contains the count of bytes written to the MTD device.
2231  *
2232  * This function returns zero in case of success and a negative error code in
2233  * case of failure.
2234  */
default_mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)2235 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2236 			      unsigned long count, loff_t to, size_t *retlen)
2237 {
2238 	unsigned long i;
2239 	size_t totlen = 0, thislen;
2240 	int ret = 0;
2241 
2242 	for (i = 0; i < count; i++) {
2243 		if (!vecs[i].iov_len)
2244 			continue;
2245 		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2246 				vecs[i].iov_base);
2247 		totlen += thislen;
2248 		if (ret || thislen != vecs[i].iov_len)
2249 			break;
2250 		to += vecs[i].iov_len;
2251 	}
2252 	*retlen = totlen;
2253 	return ret;
2254 }
2255 
2256 /*
2257  * mtd_writev - the vector-based MTD write method
2258  * @mtd: mtd device description object pointer
2259  * @vecs: the vectors to write
2260  * @count: count of vectors in @vecs
2261  * @to: the MTD device offset to write to
2262  * @retlen: on exit contains the count of bytes written to the MTD device.
2263  *
2264  * This function returns zero in case of success and a negative error code in
2265  * case of failure.
2266  */
mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)2267 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2268 	       unsigned long count, loff_t to, size_t *retlen)
2269 {
2270 	struct mtd_info *master = mtd_get_master(mtd);
2271 
2272 	*retlen = 0;
2273 	if (!(mtd->flags & MTD_WRITEABLE))
2274 		return -EROFS;
2275 
2276 	if (!master->_writev)
2277 		return default_mtd_writev(mtd, vecs, count, to, retlen);
2278 
2279 	return master->_writev(master, vecs, count,
2280 			       mtd_get_master_ofs(mtd, to), retlen);
2281 }
2282 EXPORT_SYMBOL_GPL(mtd_writev);
2283 
2284 /**
2285  * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2286  * @mtd: mtd device description object pointer
2287  * @size: a pointer to the ideal or maximum size of the allocation, points
2288  *        to the actual allocation size on success.
2289  *
2290  * This routine attempts to allocate a contiguous kernel buffer up to
2291  * the specified size, backing off the size of the request exponentially
2292  * until the request succeeds or until the allocation size falls below
2293  * the system page size. This attempts to make sure it does not adversely
2294  * impact system performance, so when allocating more than one page, we
2295  * ask the memory allocator to avoid re-trying, swapping, writing back
2296  * or performing I/O.
2297  *
2298  * Note, this function also makes sure that the allocated buffer is aligned to
2299  * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2300  *
2301  * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2302  * to handle smaller (i.e. degraded) buffer allocations under low- or
2303  * fragmented-memory situations where such reduced allocations, from a
2304  * requested ideal, are allowed.
2305  *
2306  * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2307  */
mtd_kmalloc_up_to(const struct mtd_info * mtd,size_t * size)2308 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2309 {
2310 	gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2311 	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2312 	void *kbuf;
2313 
2314 	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2315 
2316 	while (*size > min_alloc) {
2317 		kbuf = kmalloc(*size, flags);
2318 		if (kbuf)
2319 			return kbuf;
2320 
2321 		*size >>= 1;
2322 		*size = ALIGN(*size, mtd->writesize);
2323 	}
2324 
2325 	/*
2326 	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2327 	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2328 	 */
2329 	return kmalloc(*size, GFP_KERNEL);
2330 }
2331 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2332 
2333 #ifdef CONFIG_PROC_FS
2334 
2335 /*====================================================================*/
2336 /* Support for /proc/mtd */
2337 
mtd_proc_show(struct seq_file * m,void * v)2338 static int mtd_proc_show(struct seq_file *m, void *v)
2339 {
2340 	struct mtd_info *mtd;
2341 
2342 	seq_puts(m, "dev:    size   erasesize  name\n");
2343 	mutex_lock(&mtd_table_mutex);
2344 	mtd_for_each_device(mtd) {
2345 		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2346 			   mtd->index, (unsigned long long)mtd->size,
2347 			   mtd->erasesize, mtd->name);
2348 	}
2349 	mutex_unlock(&mtd_table_mutex);
2350 	return 0;
2351 }
2352 #endif /* CONFIG_PROC_FS */
2353 
2354 /*====================================================================*/
2355 /* Init code */
2356 
mtd_bdi_init(const char * name)2357 static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2358 {
2359 	struct backing_dev_info *bdi;
2360 	int ret;
2361 
2362 	bdi = bdi_alloc(NUMA_NO_NODE);
2363 	if (!bdi)
2364 		return ERR_PTR(-ENOMEM);
2365 	bdi->ra_pages = 0;
2366 	bdi->io_pages = 0;
2367 
2368 	/*
2369 	 * We put '-0' suffix to the name to get the same name format as we
2370 	 * used to get. Since this is called only once, we get a unique name.
2371 	 */
2372 	ret = bdi_register(bdi, "%.28s-0", name);
2373 	if (ret)
2374 		bdi_put(bdi);
2375 
2376 	return ret ? ERR_PTR(ret) : bdi;
2377 }
2378 
2379 static struct proc_dir_entry *proc_mtd;
2380 
init_mtd(void)2381 static int __init init_mtd(void)
2382 {
2383 	int ret;
2384 
2385 	ret = class_register(&mtd_class);
2386 	if (ret)
2387 		goto err_reg;
2388 
2389 	mtd_bdi = mtd_bdi_init("mtd");
2390 	if (IS_ERR(mtd_bdi)) {
2391 		ret = PTR_ERR(mtd_bdi);
2392 		goto err_bdi;
2393 	}
2394 
2395 	proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2396 
2397 	ret = init_mtdchar();
2398 	if (ret)
2399 		goto out_procfs;
2400 
2401 	dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2402 
2403 	return 0;
2404 
2405 out_procfs:
2406 	if (proc_mtd)
2407 		remove_proc_entry("mtd", NULL);
2408 	bdi_put(mtd_bdi);
2409 err_bdi:
2410 	class_unregister(&mtd_class);
2411 err_reg:
2412 	pr_err("Error registering mtd class or bdi: %d\n", ret);
2413 	return ret;
2414 }
2415 
cleanup_mtd(void)2416 static void __exit cleanup_mtd(void)
2417 {
2418 	debugfs_remove_recursive(dfs_dir_mtd);
2419 	cleanup_mtdchar();
2420 	if (proc_mtd)
2421 		remove_proc_entry("mtd", NULL);
2422 	class_unregister(&mtd_class);
2423 	bdi_put(mtd_bdi);
2424 	idr_destroy(&mtd_idr);
2425 }
2426 
2427 module_init(init_mtd);
2428 module_exit(cleanup_mtd);
2429 
2430 MODULE_LICENSE("GPL");
2431 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2432 MODULE_DESCRIPTION("Core MTD registration and access routines");
2433