• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Core registration and callback routines for MTD
3  * drivers and users.
4  *
5  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
6  * Copyright © 2006      Red Hat UK Limited
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/ptrace.h>
27 #include <linux/seq_file.h>
28 #include <linux/string.h>
29 #include <linux/timer.h>
30 #include <linux/major.h>
31 #include <linux/fs.h>
32 #include <linux/err.h>
33 #include <linux/ioctl.h>
34 #include <linux/init.h>
35 #include <linux/proc_fs.h>
36 #include <linux/idr.h>
37 #include <linux/backing-dev.h>
38 #include <linux/gfp.h>
39 #include <linux/slab.h>
40 
41 #include <linux/mtd/mtd.h>
42 #include <linux/mtd/partitions.h>
43 
44 #include "mtdcore.h"
45 
46 /*
47  * backing device capabilities for non-mappable devices (such as NAND flash)
48  * - permits private mappings, copies are taken of the data
49  */
50 static struct backing_dev_info mtd_bdi_unmappable = {
51 	.capabilities	= BDI_CAP_MAP_COPY,
52 };
53 
54 /*
55  * backing device capabilities for R/O mappable devices (such as ROM)
56  * - permits private mappings, copies are taken of the data
57  * - permits non-writable shared mappings
58  */
59 static struct backing_dev_info mtd_bdi_ro_mappable = {
60 	.capabilities	= (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
61 			   BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP),
62 };
63 
64 /*
65  * backing device capabilities for writable mappable devices (such as RAM)
66  * - permits private mappings, copies are taken of the data
67  * - permits non-writable shared mappings
68  */
69 static struct backing_dev_info mtd_bdi_rw_mappable = {
70 	.capabilities	= (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT |
71 			   BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP |
72 			   BDI_CAP_WRITE_MAP),
73 };
74 
75 static int mtd_cls_suspend(struct device *dev, pm_message_t state);
76 static int mtd_cls_resume(struct device *dev);
77 
78 static struct class mtd_class = {
79 	.name = "mtd",
80 	.owner = THIS_MODULE,
81 	.suspend = mtd_cls_suspend,
82 	.resume = mtd_cls_resume,
83 };
84 
85 static DEFINE_IDR(mtd_idr);
86 
87 /* These are exported solely for the purpose of mtd_blkdevs.c. You
88    should not use them for _anything_ else */
89 DEFINE_MUTEX(mtd_table_mutex);
90 EXPORT_SYMBOL_GPL(mtd_table_mutex);
91 
__mtd_next_device(int i)92 struct mtd_info *__mtd_next_device(int i)
93 {
94 	return idr_get_next(&mtd_idr, &i);
95 }
96 EXPORT_SYMBOL_GPL(__mtd_next_device);
97 
98 static LIST_HEAD(mtd_notifiers);
99 
100 
101 #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
102 
103 /* REVISIT once MTD uses the driver model better, whoever allocates
104  * the mtd_info will probably want to use the release() hook...
105  */
mtd_release(struct device * dev)106 static void mtd_release(struct device *dev)
107 {
108 	struct mtd_info *mtd = dev_get_drvdata(dev);
109 	dev_t index = MTD_DEVT(mtd->index);
110 
111 	/* remove /dev/mtdXro node */
112 	device_destroy(&mtd_class, index + 1);
113 }
114 
mtd_cls_suspend(struct device * dev,pm_message_t state)115 static int mtd_cls_suspend(struct device *dev, pm_message_t state)
116 {
117 	struct mtd_info *mtd = dev_get_drvdata(dev);
118 
119 	return mtd ? mtd_suspend(mtd) : 0;
120 }
121 
mtd_cls_resume(struct device * dev)122 static int mtd_cls_resume(struct device *dev)
123 {
124 	struct mtd_info *mtd = dev_get_drvdata(dev);
125 
126 	if (mtd)
127 		mtd_resume(mtd);
128 	return 0;
129 }
130 
mtd_type_show(struct device * dev,struct device_attribute * attr,char * buf)131 static ssize_t mtd_type_show(struct device *dev,
132 		struct device_attribute *attr, char *buf)
133 {
134 	struct mtd_info *mtd = dev_get_drvdata(dev);
135 	char *type;
136 
137 	switch (mtd->type) {
138 	case MTD_ABSENT:
139 		type = "absent";
140 		break;
141 	case MTD_RAM:
142 		type = "ram";
143 		break;
144 	case MTD_ROM:
145 		type = "rom";
146 		break;
147 	case MTD_NORFLASH:
148 		type = "nor";
149 		break;
150 	case MTD_NANDFLASH:
151 		type = "nand";
152 		break;
153 	case MTD_DATAFLASH:
154 		type = "dataflash";
155 		break;
156 	case MTD_UBIVOLUME:
157 		type = "ubi";
158 		break;
159 	case MTD_MLCNANDFLASH:
160 		type = "mlc-nand";
161 		break;
162 	default:
163 		type = "unknown";
164 	}
165 
166 	return snprintf(buf, PAGE_SIZE, "%s\n", type);
167 }
168 static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
169 
mtd_flags_show(struct device * dev,struct device_attribute * attr,char * buf)170 static ssize_t mtd_flags_show(struct device *dev,
171 		struct device_attribute *attr, char *buf)
172 {
173 	struct mtd_info *mtd = dev_get_drvdata(dev);
174 
175 	return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
176 
177 }
178 static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
179 
mtd_size_show(struct device * dev,struct device_attribute * attr,char * buf)180 static ssize_t mtd_size_show(struct device *dev,
181 		struct device_attribute *attr, char *buf)
182 {
183 	struct mtd_info *mtd = dev_get_drvdata(dev);
184 
185 	return snprintf(buf, PAGE_SIZE, "%llu\n",
186 		(unsigned long long)mtd->size);
187 
188 }
189 static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
190 
mtd_erasesize_show(struct device * dev,struct device_attribute * attr,char * buf)191 static ssize_t mtd_erasesize_show(struct device *dev,
192 		struct device_attribute *attr, char *buf)
193 {
194 	struct mtd_info *mtd = dev_get_drvdata(dev);
195 
196 	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
197 
198 }
199 static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
200 
mtd_writesize_show(struct device * dev,struct device_attribute * attr,char * buf)201 static ssize_t mtd_writesize_show(struct device *dev,
202 		struct device_attribute *attr, char *buf)
203 {
204 	struct mtd_info *mtd = dev_get_drvdata(dev);
205 
206 	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
207 
208 }
209 static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
210 
mtd_subpagesize_show(struct device * dev,struct device_attribute * attr,char * buf)211 static ssize_t mtd_subpagesize_show(struct device *dev,
212 		struct device_attribute *attr, char *buf)
213 {
214 	struct mtd_info *mtd = dev_get_drvdata(dev);
215 	unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
216 
217 	return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
218 
219 }
220 static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
221 
mtd_oobsize_show(struct device * dev,struct device_attribute * attr,char * buf)222 static ssize_t mtd_oobsize_show(struct device *dev,
223 		struct device_attribute *attr, char *buf)
224 {
225 	struct mtd_info *mtd = dev_get_drvdata(dev);
226 
227 	return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
228 
229 }
230 static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
231 
mtd_numeraseregions_show(struct device * dev,struct device_attribute * attr,char * buf)232 static ssize_t mtd_numeraseregions_show(struct device *dev,
233 		struct device_attribute *attr, char *buf)
234 {
235 	struct mtd_info *mtd = dev_get_drvdata(dev);
236 
237 	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
238 
239 }
240 static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
241 	NULL);
242 
mtd_name_show(struct device * dev,struct device_attribute * attr,char * buf)243 static ssize_t mtd_name_show(struct device *dev,
244 		struct device_attribute *attr, char *buf)
245 {
246 	struct mtd_info *mtd = dev_get_drvdata(dev);
247 
248 	return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
249 
250 }
251 static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
252 
mtd_ecc_strength_show(struct device * dev,struct device_attribute * attr,char * buf)253 static ssize_t mtd_ecc_strength_show(struct device *dev,
254 				     struct device_attribute *attr, char *buf)
255 {
256 	struct mtd_info *mtd = dev_get_drvdata(dev);
257 
258 	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
259 }
260 static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
261 
mtd_bitflip_threshold_show(struct device * dev,struct device_attribute * attr,char * buf)262 static ssize_t mtd_bitflip_threshold_show(struct device *dev,
263 					  struct device_attribute *attr,
264 					  char *buf)
265 {
266 	struct mtd_info *mtd = dev_get_drvdata(dev);
267 
268 	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
269 }
270 
mtd_bitflip_threshold_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)271 static ssize_t mtd_bitflip_threshold_store(struct device *dev,
272 					   struct device_attribute *attr,
273 					   const char *buf, size_t count)
274 {
275 	struct mtd_info *mtd = dev_get_drvdata(dev);
276 	unsigned int bitflip_threshold;
277 	int retval;
278 
279 	retval = kstrtouint(buf, 0, &bitflip_threshold);
280 	if (retval)
281 		return retval;
282 
283 	mtd->bitflip_threshold = bitflip_threshold;
284 	return count;
285 }
286 static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
287 		   mtd_bitflip_threshold_show,
288 		   mtd_bitflip_threshold_store);
289 
mtd_ecc_step_size_show(struct device * dev,struct device_attribute * attr,char * buf)290 static ssize_t mtd_ecc_step_size_show(struct device *dev,
291 		struct device_attribute *attr, char *buf)
292 {
293 	struct mtd_info *mtd = dev_get_drvdata(dev);
294 
295 	return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
296 
297 }
298 static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
299 
mtd_ecc_stats_corrected_show(struct device * dev,struct device_attribute * attr,char * buf)300 static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
301 		struct device_attribute *attr, char *buf)
302 {
303 	struct mtd_info *mtd = dev_get_drvdata(dev);
304 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
305 
306 	return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
307 }
308 static DEVICE_ATTR(corrected_bits, S_IRUGO,
309 		   mtd_ecc_stats_corrected_show, NULL);
310 
mtd_ecc_stats_errors_show(struct device * dev,struct device_attribute * attr,char * buf)311 static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
312 		struct device_attribute *attr, char *buf)
313 {
314 	struct mtd_info *mtd = dev_get_drvdata(dev);
315 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
316 
317 	return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
318 }
319 static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
320 
mtd_badblocks_show(struct device * dev,struct device_attribute * attr,char * buf)321 static ssize_t mtd_badblocks_show(struct device *dev,
322 		struct device_attribute *attr, char *buf)
323 {
324 	struct mtd_info *mtd = dev_get_drvdata(dev);
325 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
326 
327 	return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
328 }
329 static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
330 
mtd_bbtblocks_show(struct device * dev,struct device_attribute * attr,char * buf)331 static ssize_t mtd_bbtblocks_show(struct device *dev,
332 		struct device_attribute *attr, char *buf)
333 {
334 	struct mtd_info *mtd = dev_get_drvdata(dev);
335 	struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
336 
337 	return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
338 }
339 static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
340 
341 static struct attribute *mtd_attrs[] = {
342 	&dev_attr_type.attr,
343 	&dev_attr_flags.attr,
344 	&dev_attr_size.attr,
345 	&dev_attr_erasesize.attr,
346 	&dev_attr_writesize.attr,
347 	&dev_attr_subpagesize.attr,
348 	&dev_attr_oobsize.attr,
349 	&dev_attr_numeraseregions.attr,
350 	&dev_attr_name.attr,
351 	&dev_attr_ecc_strength.attr,
352 	&dev_attr_ecc_step_size.attr,
353 	&dev_attr_corrected_bits.attr,
354 	&dev_attr_ecc_failures.attr,
355 	&dev_attr_bad_blocks.attr,
356 	&dev_attr_bbt_blocks.attr,
357 	&dev_attr_bitflip_threshold.attr,
358 	NULL,
359 };
360 ATTRIBUTE_GROUPS(mtd);
361 
362 static struct device_type mtd_devtype = {
363 	.name		= "mtd",
364 	.groups		= mtd_groups,
365 	.release	= mtd_release,
366 };
367 
368 /**
369  *	add_mtd_device - register an MTD device
370  *	@mtd: pointer to new MTD device info structure
371  *
372  *	Add a device to the list of MTD devices present in the system, and
373  *	notify each currently active MTD 'user' of its arrival. Returns
374  *	zero on success or 1 on failure, which currently will only happen
375  *	if there is insufficient memory or a sysfs error.
376  */
377 
add_mtd_device(struct mtd_info * mtd)378 int add_mtd_device(struct mtd_info *mtd)
379 {
380 	struct mtd_notifier *not;
381 	int i, error;
382 
383 	if (!mtd->backing_dev_info) {
384 		switch (mtd->type) {
385 		case MTD_RAM:
386 			mtd->backing_dev_info = &mtd_bdi_rw_mappable;
387 			break;
388 		case MTD_ROM:
389 			mtd->backing_dev_info = &mtd_bdi_ro_mappable;
390 			break;
391 		default:
392 			mtd->backing_dev_info = &mtd_bdi_unmappable;
393 			break;
394 		}
395 	}
396 
397 	BUG_ON(mtd->writesize == 0);
398 	mutex_lock(&mtd_table_mutex);
399 
400 	i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
401 	if (i < 0)
402 		goto fail_locked;
403 
404 	mtd->index = i;
405 	mtd->usecount = 0;
406 
407 	/* default value if not set by driver */
408 	if (mtd->bitflip_threshold == 0)
409 		mtd->bitflip_threshold = mtd->ecc_strength;
410 
411 	if (is_power_of_2(mtd->erasesize))
412 		mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
413 	else
414 		mtd->erasesize_shift = 0;
415 
416 	if (is_power_of_2(mtd->writesize))
417 		mtd->writesize_shift = ffs(mtd->writesize) - 1;
418 	else
419 		mtd->writesize_shift = 0;
420 
421 	mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
422 	mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
423 
424 	/* Some chips always power up locked. Unlock them now */
425 	if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
426 		error = mtd_unlock(mtd, 0, mtd->size);
427 		if (error && error != -EOPNOTSUPP)
428 			printk(KERN_WARNING
429 			       "%s: unlock failed, writes may not work\n",
430 			       mtd->name);
431 	}
432 
433 	/* Caller should have set dev.parent to match the
434 	 * physical device.
435 	 */
436 	mtd->dev.type = &mtd_devtype;
437 	mtd->dev.class = &mtd_class;
438 	mtd->dev.devt = MTD_DEVT(i);
439 	dev_set_name(&mtd->dev, "mtd%d", i);
440 	dev_set_drvdata(&mtd->dev, mtd);
441 	if (device_register(&mtd->dev) != 0)
442 		goto fail_added;
443 
444 	device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
445 		      "mtd%dro", i);
446 
447 	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
448 	/* No need to get a refcount on the module containing
449 	   the notifier, since we hold the mtd_table_mutex */
450 	list_for_each_entry(not, &mtd_notifiers, list)
451 		not->add(mtd);
452 
453 	mutex_unlock(&mtd_table_mutex);
454 	/* We _know_ we aren't being removed, because
455 	   our caller is still holding us here. So none
456 	   of this try_ nonsense, and no bitching about it
457 	   either. :) */
458 	__module_get(THIS_MODULE);
459 	return 0;
460 
461 fail_added:
462 	idr_remove(&mtd_idr, i);
463 fail_locked:
464 	mutex_unlock(&mtd_table_mutex);
465 	return 1;
466 }
467 
468 /**
469  *	del_mtd_device - unregister an MTD device
470  *	@mtd: pointer to MTD device info structure
471  *
472  *	Remove a device from the list of MTD devices present in the system,
473  *	and notify each currently active MTD 'user' of its departure.
474  *	Returns zero on success or 1 on failure, which currently will happen
475  *	if the requested device does not appear to be present in the list.
476  */
477 
del_mtd_device(struct mtd_info * mtd)478 int del_mtd_device(struct mtd_info *mtd)
479 {
480 	int ret;
481 	struct mtd_notifier *not;
482 
483 	mutex_lock(&mtd_table_mutex);
484 
485 	if (idr_find(&mtd_idr, mtd->index) != mtd) {
486 		ret = -ENODEV;
487 		goto out_error;
488 	}
489 
490 	/* No need to get a refcount on the module containing
491 		the notifier, since we hold the mtd_table_mutex */
492 	list_for_each_entry(not, &mtd_notifiers, list)
493 		not->remove(mtd);
494 
495 	if (mtd->usecount) {
496 		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
497 		       mtd->index, mtd->name, mtd->usecount);
498 		ret = -EBUSY;
499 	} else {
500 		device_unregister(&mtd->dev);
501 
502 		idr_remove(&mtd_idr, mtd->index);
503 
504 		module_put(THIS_MODULE);
505 		ret = 0;
506 	}
507 
508 out_error:
509 	mutex_unlock(&mtd_table_mutex);
510 	return ret;
511 }
512 
513 /**
514  * mtd_device_parse_register - parse partitions and register an MTD device.
515  *
516  * @mtd: the MTD device to register
517  * @types: the list of MTD partition probes to try, see
518  *         'parse_mtd_partitions()' for more information
519  * @parser_data: MTD partition parser-specific data
520  * @parts: fallback partition information to register, if parsing fails;
521  *         only valid if %nr_parts > %0
522  * @nr_parts: the number of partitions in parts, if zero then the full
523  *            MTD device is registered if no partition info is found
524  *
525  * This function aggregates MTD partitions parsing (done by
526  * 'parse_mtd_partitions()') and MTD device and partitions registering. It
527  * basically follows the most common pattern found in many MTD drivers:
528  *
529  * * It first tries to probe partitions on MTD device @mtd using parsers
530  *   specified in @types (if @types is %NULL, then the default list of parsers
531  *   is used, see 'parse_mtd_partitions()' for more information). If none are
532  *   found this functions tries to fallback to information specified in
533  *   @parts/@nr_parts.
534  * * If any partitioning info was found, this function registers the found
535  *   partitions.
536  * * If no partitions were found this function just registers the MTD device
537  *   @mtd and exits.
538  *
539  * Returns zero in case of success and a negative error code in case of failure.
540  */
mtd_device_parse_register(struct mtd_info * mtd,const char * const * types,struct mtd_part_parser_data * parser_data,const struct mtd_partition * parts,int nr_parts)541 int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
542 			      struct mtd_part_parser_data *parser_data,
543 			      const struct mtd_partition *parts,
544 			      int nr_parts)
545 {
546 	int err;
547 	struct mtd_partition *real_parts;
548 
549 	err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
550 	if (err <= 0 && nr_parts && parts) {
551 		real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
552 				     GFP_KERNEL);
553 		if (!real_parts)
554 			err = -ENOMEM;
555 		else
556 			err = nr_parts;
557 	}
558 
559 	if (err > 0) {
560 		err = add_mtd_partitions(mtd, real_parts, err);
561 		kfree(real_parts);
562 	} else if (err == 0) {
563 		err = add_mtd_device(mtd);
564 		if (err == 1)
565 			err = -ENODEV;
566 	}
567 
568 	return err;
569 }
570 EXPORT_SYMBOL_GPL(mtd_device_parse_register);
571 
572 /**
573  * mtd_device_unregister - unregister an existing MTD device.
574  *
575  * @master: the MTD device to unregister.  This will unregister both the master
576  *          and any partitions if registered.
577  */
mtd_device_unregister(struct mtd_info * master)578 int mtd_device_unregister(struct mtd_info *master)
579 {
580 	int err;
581 
582 	err = del_mtd_partitions(master);
583 	if (err)
584 		return err;
585 
586 	if (!device_is_registered(&master->dev))
587 		return 0;
588 
589 	return del_mtd_device(master);
590 }
591 EXPORT_SYMBOL_GPL(mtd_device_unregister);
592 
593 /**
594  *	register_mtd_user - register a 'user' of MTD devices.
595  *	@new: pointer to notifier info structure
596  *
597  *	Registers a pair of callbacks function to be called upon addition
598  *	or removal of MTD devices. Causes the 'add' callback to be immediately
599  *	invoked for each MTD device currently present in the system.
600  */
register_mtd_user(struct mtd_notifier * new)601 void register_mtd_user (struct mtd_notifier *new)
602 {
603 	struct mtd_info *mtd;
604 
605 	mutex_lock(&mtd_table_mutex);
606 
607 	list_add(&new->list, &mtd_notifiers);
608 
609 	__module_get(THIS_MODULE);
610 
611 	mtd_for_each_device(mtd)
612 		new->add(mtd);
613 
614 	mutex_unlock(&mtd_table_mutex);
615 }
616 EXPORT_SYMBOL_GPL(register_mtd_user);
617 
618 /**
619  *	unregister_mtd_user - unregister a 'user' of MTD devices.
620  *	@old: pointer to notifier info structure
621  *
622  *	Removes a callback function pair from the list of 'users' to be
623  *	notified upon addition or removal of MTD devices. Causes the
624  *	'remove' callback to be immediately invoked for each MTD device
625  *	currently present in the system.
626  */
unregister_mtd_user(struct mtd_notifier * old)627 int unregister_mtd_user (struct mtd_notifier *old)
628 {
629 	struct mtd_info *mtd;
630 
631 	mutex_lock(&mtd_table_mutex);
632 
633 	module_put(THIS_MODULE);
634 
635 	mtd_for_each_device(mtd)
636 		old->remove(mtd);
637 
638 	list_del(&old->list);
639 	mutex_unlock(&mtd_table_mutex);
640 	return 0;
641 }
642 EXPORT_SYMBOL_GPL(unregister_mtd_user);
643 
644 /**
645  *	get_mtd_device - obtain a validated handle for an MTD device
646  *	@mtd: last known address of the required MTD device
647  *	@num: internal device number of the required MTD device
648  *
649  *	Given a number and NULL address, return the num'th entry in the device
650  *	table, if any.	Given an address and num == -1, search the device table
651  *	for a device with that address and return if it's still present. Given
652  *	both, return the num'th driver only if its address matches. Return
653  *	error code if not.
654  */
get_mtd_device(struct mtd_info * mtd,int num)655 struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
656 {
657 	struct mtd_info *ret = NULL, *other;
658 	int err = -ENODEV;
659 
660 	mutex_lock(&mtd_table_mutex);
661 
662 	if (num == -1) {
663 		mtd_for_each_device(other) {
664 			if (other == mtd) {
665 				ret = mtd;
666 				break;
667 			}
668 		}
669 	} else if (num >= 0) {
670 		ret = idr_find(&mtd_idr, num);
671 		if (mtd && mtd != ret)
672 			ret = NULL;
673 	}
674 
675 	if (!ret) {
676 		ret = ERR_PTR(err);
677 		goto out;
678 	}
679 
680 	err = __get_mtd_device(ret);
681 	if (err)
682 		ret = ERR_PTR(err);
683 out:
684 	mutex_unlock(&mtd_table_mutex);
685 	return ret;
686 }
687 EXPORT_SYMBOL_GPL(get_mtd_device);
688 
689 
__get_mtd_device(struct mtd_info * mtd)690 int __get_mtd_device(struct mtd_info *mtd)
691 {
692 	int err;
693 
694 	if (!try_module_get(mtd->owner))
695 		return -ENODEV;
696 
697 	if (mtd->_get_device) {
698 		err = mtd->_get_device(mtd);
699 
700 		if (err) {
701 			module_put(mtd->owner);
702 			return err;
703 		}
704 	}
705 	mtd->usecount++;
706 	return 0;
707 }
708 EXPORT_SYMBOL_GPL(__get_mtd_device);
709 
710 /**
711  *	get_mtd_device_nm - obtain a validated handle for an MTD device by
712  *	device name
713  *	@name: MTD device name to open
714  *
715  * 	This function returns MTD device description structure in case of
716  * 	success and an error code in case of failure.
717  */
get_mtd_device_nm(const char * name)718 struct mtd_info *get_mtd_device_nm(const char *name)
719 {
720 	int err = -ENODEV;
721 	struct mtd_info *mtd = NULL, *other;
722 
723 	mutex_lock(&mtd_table_mutex);
724 
725 	mtd_for_each_device(other) {
726 		if (!strcmp(name, other->name)) {
727 			mtd = other;
728 			break;
729 		}
730 	}
731 
732 	if (!mtd)
733 		goto out_unlock;
734 
735 	err = __get_mtd_device(mtd);
736 	if (err)
737 		goto out_unlock;
738 
739 	mutex_unlock(&mtd_table_mutex);
740 	return mtd;
741 
742 out_unlock:
743 	mutex_unlock(&mtd_table_mutex);
744 	return ERR_PTR(err);
745 }
746 EXPORT_SYMBOL_GPL(get_mtd_device_nm);
747 
put_mtd_device(struct mtd_info * mtd)748 void put_mtd_device(struct mtd_info *mtd)
749 {
750 	mutex_lock(&mtd_table_mutex);
751 	__put_mtd_device(mtd);
752 	mutex_unlock(&mtd_table_mutex);
753 
754 }
755 EXPORT_SYMBOL_GPL(put_mtd_device);
756 
__put_mtd_device(struct mtd_info * mtd)757 void __put_mtd_device(struct mtd_info *mtd)
758 {
759 	--mtd->usecount;
760 	BUG_ON(mtd->usecount < 0);
761 
762 	if (mtd->_put_device)
763 		mtd->_put_device(mtd);
764 
765 	module_put(mtd->owner);
766 }
767 EXPORT_SYMBOL_GPL(__put_mtd_device);
768 
769 /*
770  * Erase is an asynchronous operation.  Device drivers are supposed
771  * to call instr->callback() whenever the operation completes, even
772  * if it completes with a failure.
773  * Callers are supposed to pass a callback function and wait for it
774  * to be called before writing to the block.
775  */
mtd_erase(struct mtd_info * mtd,struct erase_info * instr)776 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
777 {
778 	if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
779 		return -EINVAL;
780 	if (!(mtd->flags & MTD_WRITEABLE))
781 		return -EROFS;
782 	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
783 	if (!instr->len) {
784 		instr->state = MTD_ERASE_DONE;
785 		mtd_erase_callback(instr);
786 		return 0;
787 	}
788 	return mtd->_erase(mtd, instr);
789 }
790 EXPORT_SYMBOL_GPL(mtd_erase);
791 
792 /*
793  * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
794  */
mtd_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)795 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
796 	      void **virt, resource_size_t *phys)
797 {
798 	*retlen = 0;
799 	*virt = NULL;
800 	if (phys)
801 		*phys = 0;
802 	if (!mtd->_point)
803 		return -EOPNOTSUPP;
804 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
805 		return -EINVAL;
806 	if (!len)
807 		return 0;
808 	return mtd->_point(mtd, from, len, retlen, virt, phys);
809 }
810 EXPORT_SYMBOL_GPL(mtd_point);
811 
812 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
mtd_unpoint(struct mtd_info * mtd,loff_t from,size_t len)813 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
814 {
815 	if (!mtd->_point)
816 		return -EOPNOTSUPP;
817 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
818 		return -EINVAL;
819 	if (!len)
820 		return 0;
821 	return mtd->_unpoint(mtd, from, len);
822 }
823 EXPORT_SYMBOL_GPL(mtd_unpoint);
824 
825 /*
826  * Allow NOMMU mmap() to directly map the device (if not NULL)
827  * - return the address to which the offset maps
828  * - return -ENOSYS to indicate refusal to do the mapping
829  */
mtd_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)830 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
831 				    unsigned long offset, unsigned long flags)
832 {
833 	if (!mtd->_get_unmapped_area)
834 		return -EOPNOTSUPP;
835 	if (offset >= mtd->size || len > mtd->size - offset)
836 		return -EINVAL;
837 	return mtd->_get_unmapped_area(mtd, len, offset, flags);
838 }
839 EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
840 
mtd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)841 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
842 	     u_char *buf)
843 {
844 	int ret_code;
845 	*retlen = 0;
846 	if (from < 0 || from >= mtd->size || len > mtd->size - from)
847 		return -EINVAL;
848 	if (!len)
849 		return 0;
850 
851 	/*
852 	 * In the absence of an error, drivers return a non-negative integer
853 	 * representing the maximum number of bitflips that were corrected on
854 	 * any one ecc region (if applicable; zero otherwise).
855 	 */
856 	ret_code = mtd->_read(mtd, from, len, retlen, buf);
857 	if (unlikely(ret_code < 0))
858 		return ret_code;
859 	if (mtd->ecc_strength == 0)
860 		return 0;	/* device lacks ecc */
861 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
862 }
863 EXPORT_SYMBOL_GPL(mtd_read);
864 
mtd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)865 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
866 	      const u_char *buf)
867 {
868 	*retlen = 0;
869 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
870 		return -EINVAL;
871 	if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
872 		return -EROFS;
873 	if (!len)
874 		return 0;
875 	return mtd->_write(mtd, to, len, retlen, buf);
876 }
877 EXPORT_SYMBOL_GPL(mtd_write);
878 
879 /*
880  * In blackbox flight recorder like scenarios we want to make successful writes
881  * in interrupt context. panic_write() is only intended to be called when its
882  * known the kernel is about to panic and we need the write to succeed. Since
883  * the kernel is not going to be running for much longer, this function can
884  * break locks and delay to ensure the write succeeds (but not sleep).
885  */
mtd_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)886 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
887 		    const u_char *buf)
888 {
889 	*retlen = 0;
890 	if (!mtd->_panic_write)
891 		return -EOPNOTSUPP;
892 	if (to < 0 || to >= mtd->size || len > mtd->size - to)
893 		return -EINVAL;
894 	if (!(mtd->flags & MTD_WRITEABLE))
895 		return -EROFS;
896 	if (!len)
897 		return 0;
898 	return mtd->_panic_write(mtd, to, len, retlen, buf);
899 }
900 EXPORT_SYMBOL_GPL(mtd_panic_write);
901 
mtd_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)902 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
903 {
904 	int ret_code;
905 	ops->retlen = ops->oobretlen = 0;
906 	if (!mtd->_read_oob)
907 		return -EOPNOTSUPP;
908 	/*
909 	 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
910 	 * similar to mtd->_read(), returning a non-negative integer
911 	 * representing max bitflips. In other cases, mtd->_read_oob() may
912 	 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
913 	 */
914 	ret_code = mtd->_read_oob(mtd, from, ops);
915 	if (unlikely(ret_code < 0))
916 		return ret_code;
917 	if (mtd->ecc_strength == 0)
918 		return 0;	/* device lacks ecc */
919 	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
920 }
921 EXPORT_SYMBOL_GPL(mtd_read_oob);
922 
923 /*
924  * Method to access the protection register area, present in some flash
925  * devices. The user data is one time programmable but the factory data is read
926  * only.
927  */
mtd_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)928 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
929 			   struct otp_info *buf)
930 {
931 	if (!mtd->_get_fact_prot_info)
932 		return -EOPNOTSUPP;
933 	if (!len)
934 		return 0;
935 	return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
936 }
937 EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
938 
mtd_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)939 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
940 			   size_t *retlen, u_char *buf)
941 {
942 	*retlen = 0;
943 	if (!mtd->_read_fact_prot_reg)
944 		return -EOPNOTSUPP;
945 	if (!len)
946 		return 0;
947 	return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
948 }
949 EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
950 
mtd_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)951 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
952 			   struct otp_info *buf)
953 {
954 	if (!mtd->_get_user_prot_info)
955 		return -EOPNOTSUPP;
956 	if (!len)
957 		return 0;
958 	return mtd->_get_user_prot_info(mtd, len, retlen, buf);
959 }
960 EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
961 
mtd_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)962 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
963 			   size_t *retlen, u_char *buf)
964 {
965 	*retlen = 0;
966 	if (!mtd->_read_user_prot_reg)
967 		return -EOPNOTSUPP;
968 	if (!len)
969 		return 0;
970 	return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
971 }
972 EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
973 
mtd_write_user_prot_reg(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,u_char * buf)974 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
975 			    size_t *retlen, u_char *buf)
976 {
977 	int ret;
978 
979 	*retlen = 0;
980 	if (!mtd->_write_user_prot_reg)
981 		return -EOPNOTSUPP;
982 	if (!len)
983 		return 0;
984 	ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
985 	if (ret)
986 		return ret;
987 
988 	/*
989 	 * If no data could be written at all, we are out of memory and
990 	 * must return -ENOSPC.
991 	 */
992 	return (*retlen) ? 0 : -ENOSPC;
993 }
994 EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
995 
mtd_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)996 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
997 {
998 	if (!mtd->_lock_user_prot_reg)
999 		return -EOPNOTSUPP;
1000 	if (!len)
1001 		return 0;
1002 	return mtd->_lock_user_prot_reg(mtd, from, len);
1003 }
1004 EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1005 
1006 /* Chip-supported device locking */
mtd_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)1007 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1008 {
1009 	if (!mtd->_lock)
1010 		return -EOPNOTSUPP;
1011 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1012 		return -EINVAL;
1013 	if (!len)
1014 		return 0;
1015 	return mtd->_lock(mtd, ofs, len);
1016 }
1017 EXPORT_SYMBOL_GPL(mtd_lock);
1018 
mtd_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)1019 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1020 {
1021 	if (!mtd->_unlock)
1022 		return -EOPNOTSUPP;
1023 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1024 		return -EINVAL;
1025 	if (!len)
1026 		return 0;
1027 	return mtd->_unlock(mtd, ofs, len);
1028 }
1029 EXPORT_SYMBOL_GPL(mtd_unlock);
1030 
mtd_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)1031 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1032 {
1033 	if (!mtd->_is_locked)
1034 		return -EOPNOTSUPP;
1035 	if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1036 		return -EINVAL;
1037 	if (!len)
1038 		return 0;
1039 	return mtd->_is_locked(mtd, ofs, len);
1040 }
1041 EXPORT_SYMBOL_GPL(mtd_is_locked);
1042 
mtd_block_isreserved(struct mtd_info * mtd,loff_t ofs)1043 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1044 {
1045 	if (ofs < 0 || ofs >= mtd->size)
1046 		return -EINVAL;
1047 	if (!mtd->_block_isreserved)
1048 		return 0;
1049 	return mtd->_block_isreserved(mtd, ofs);
1050 }
1051 EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1052 
mtd_block_isbad(struct mtd_info * mtd,loff_t ofs)1053 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1054 {
1055 	if (ofs < 0 || ofs >= mtd->size)
1056 		return -EINVAL;
1057 	if (!mtd->_block_isbad)
1058 		return 0;
1059 	return mtd->_block_isbad(mtd, ofs);
1060 }
1061 EXPORT_SYMBOL_GPL(mtd_block_isbad);
1062 
mtd_block_markbad(struct mtd_info * mtd,loff_t ofs)1063 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1064 {
1065 	if (!mtd->_block_markbad)
1066 		return -EOPNOTSUPP;
1067 	if (ofs < 0 || ofs >= mtd->size)
1068 		return -EINVAL;
1069 	if (!(mtd->flags & MTD_WRITEABLE))
1070 		return -EROFS;
1071 	return mtd->_block_markbad(mtd, ofs);
1072 }
1073 EXPORT_SYMBOL_GPL(mtd_block_markbad);
1074 
1075 /*
1076  * default_mtd_writev - the default writev method
1077  * @mtd: mtd device description object pointer
1078  * @vecs: the vectors to write
1079  * @count: count of vectors in @vecs
1080  * @to: the MTD device offset to write to
1081  * @retlen: on exit contains the count of bytes written to the MTD device.
1082  *
1083  * This function returns zero in case of success and a negative error code in
1084  * case of failure.
1085  */
default_mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)1086 static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1087 			      unsigned long count, loff_t to, size_t *retlen)
1088 {
1089 	unsigned long i;
1090 	size_t totlen = 0, thislen;
1091 	int ret = 0;
1092 
1093 	for (i = 0; i < count; i++) {
1094 		if (!vecs[i].iov_len)
1095 			continue;
1096 		ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1097 				vecs[i].iov_base);
1098 		totlen += thislen;
1099 		if (ret || thislen != vecs[i].iov_len)
1100 			break;
1101 		to += vecs[i].iov_len;
1102 	}
1103 	*retlen = totlen;
1104 	return ret;
1105 }
1106 
1107 /*
1108  * mtd_writev - the vector-based MTD write method
1109  * @mtd: mtd device description object pointer
1110  * @vecs: the vectors to write
1111  * @count: count of vectors in @vecs
1112  * @to: the MTD device offset to write to
1113  * @retlen: on exit contains the count of bytes written to the MTD device.
1114  *
1115  * This function returns zero in case of success and a negative error code in
1116  * case of failure.
1117  */
mtd_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)1118 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1119 	       unsigned long count, loff_t to, size_t *retlen)
1120 {
1121 	*retlen = 0;
1122 	if (!(mtd->flags & MTD_WRITEABLE))
1123 		return -EROFS;
1124 	if (!mtd->_writev)
1125 		return default_mtd_writev(mtd, vecs, count, to, retlen);
1126 	return mtd->_writev(mtd, vecs, count, to, retlen);
1127 }
1128 EXPORT_SYMBOL_GPL(mtd_writev);
1129 
1130 /**
1131  * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
1132  * @mtd: mtd device description object pointer
1133  * @size: a pointer to the ideal or maximum size of the allocation, points
1134  *        to the actual allocation size on success.
1135  *
1136  * This routine attempts to allocate a contiguous kernel buffer up to
1137  * the specified size, backing off the size of the request exponentially
1138  * until the request succeeds or until the allocation size falls below
1139  * the system page size. This attempts to make sure it does not adversely
1140  * impact system performance, so when allocating more than one page, we
1141  * ask the memory allocator to avoid re-trying, swapping, writing back
1142  * or performing I/O.
1143  *
1144  * Note, this function also makes sure that the allocated buffer is aligned to
1145  * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
1146  *
1147  * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
1148  * to handle smaller (i.e. degraded) buffer allocations under low- or
1149  * fragmented-memory situations where such reduced allocations, from a
1150  * requested ideal, are allowed.
1151  *
1152  * Returns a pointer to the allocated buffer on success; otherwise, NULL.
1153  */
mtd_kmalloc_up_to(const struct mtd_info * mtd,size_t * size)1154 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1155 {
1156 	gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
1157 		       __GFP_NORETRY | __GFP_NO_KSWAPD;
1158 	size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1159 	void *kbuf;
1160 
1161 	*size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1162 
1163 	while (*size > min_alloc) {
1164 		kbuf = kmalloc(*size, flags);
1165 		if (kbuf)
1166 			return kbuf;
1167 
1168 		*size >>= 1;
1169 		*size = ALIGN(*size, mtd->writesize);
1170 	}
1171 
1172 	/*
1173 	 * For the last resort allocation allow 'kmalloc()' to do all sorts of
1174 	 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
1175 	 */
1176 	return kmalloc(*size, GFP_KERNEL);
1177 }
1178 EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
1179 
1180 #ifdef CONFIG_PROC_FS
1181 
1182 /*====================================================================*/
1183 /* Support for /proc/mtd */
1184 
mtd_proc_show(struct seq_file * m,void * v)1185 static int mtd_proc_show(struct seq_file *m, void *v)
1186 {
1187 	struct mtd_info *mtd;
1188 
1189 	seq_puts(m, "dev:    size   erasesize  name\n");
1190 	mutex_lock(&mtd_table_mutex);
1191 	mtd_for_each_device(mtd) {
1192 		seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1193 			   mtd->index, (unsigned long long)mtd->size,
1194 			   mtd->erasesize, mtd->name);
1195 	}
1196 	mutex_unlock(&mtd_table_mutex);
1197 	return 0;
1198 }
1199 
mtd_proc_open(struct inode * inode,struct file * file)1200 static int mtd_proc_open(struct inode *inode, struct file *file)
1201 {
1202 	return single_open(file, mtd_proc_show, NULL);
1203 }
1204 
1205 static const struct file_operations mtd_proc_ops = {
1206 	.open		= mtd_proc_open,
1207 	.read		= seq_read,
1208 	.llseek		= seq_lseek,
1209 	.release	= single_release,
1210 };
1211 #endif /* CONFIG_PROC_FS */
1212 
1213 /*====================================================================*/
1214 /* Init code */
1215 
mtd_bdi_init(struct backing_dev_info * bdi,const char * name)1216 static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
1217 {
1218 	int ret;
1219 
1220 	ret = bdi_init(bdi);
1221 	if (!ret)
1222 		ret = bdi_register(bdi, NULL, "%s", name);
1223 
1224 	if (ret)
1225 		bdi_destroy(bdi);
1226 
1227 	return ret;
1228 }
1229 
1230 static struct proc_dir_entry *proc_mtd;
1231 
init_mtd(void)1232 static int __init init_mtd(void)
1233 {
1234 	int ret;
1235 
1236 	ret = class_register(&mtd_class);
1237 	if (ret)
1238 		goto err_reg;
1239 
1240 	ret = mtd_bdi_init(&mtd_bdi_unmappable, "mtd-unmap");
1241 	if (ret)
1242 		goto err_bdi1;
1243 
1244 	ret = mtd_bdi_init(&mtd_bdi_ro_mappable, "mtd-romap");
1245 	if (ret)
1246 		goto err_bdi2;
1247 
1248 	ret = mtd_bdi_init(&mtd_bdi_rw_mappable, "mtd-rwmap");
1249 	if (ret)
1250 		goto err_bdi3;
1251 
1252 	proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
1253 
1254 	ret = init_mtdchar();
1255 	if (ret)
1256 		goto out_procfs;
1257 
1258 	return 0;
1259 
1260 out_procfs:
1261 	if (proc_mtd)
1262 		remove_proc_entry("mtd", NULL);
1263 err_bdi3:
1264 	bdi_destroy(&mtd_bdi_ro_mappable);
1265 err_bdi2:
1266 	bdi_destroy(&mtd_bdi_unmappable);
1267 err_bdi1:
1268 	class_unregister(&mtd_class);
1269 err_reg:
1270 	pr_err("Error registering mtd class or bdi: %d\n", ret);
1271 	return ret;
1272 }
1273 
cleanup_mtd(void)1274 static void __exit cleanup_mtd(void)
1275 {
1276 	cleanup_mtdchar();
1277 	if (proc_mtd)
1278 		remove_proc_entry("mtd", NULL);
1279 	class_unregister(&mtd_class);
1280 	bdi_destroy(&mtd_bdi_unmappable);
1281 	bdi_destroy(&mtd_bdi_ro_mappable);
1282 	bdi_destroy(&mtd_bdi_rw_mappable);
1283 }
1284 
1285 module_init(init_mtd);
1286 module_exit(cleanup_mtd);
1287 
1288 MODULE_LICENSE("GPL");
1289 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1290 MODULE_DESCRIPTION("Core MTD registration and access routines");
1291