1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Simple MTD partitioning layer
4 *
5 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
6 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
7 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
8 *
9 */
10
11 #ifndef __UBOOT__
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/kmod.h>
18 #endif
19
20 #include <common.h>
21 #include <malloc.h>
22 #include <linux/errno.h>
23 #include <linux/compat.h>
24 #include <ubi_uboot.h>
25
26 #include <linux/mtd/mtd.h>
27 #include <linux/mtd/partitions.h>
28 #include <linux/err.h>
29
30 #include "mtdcore.h"
31
32 /* Our partition linked list */
33 static LIST_HEAD(mtd_partitions);
34 #ifndef __UBOOT__
35 static DEFINE_MUTEX(mtd_partitions_mutex);
36 #else
37 DEFINE_MUTEX(mtd_partitions_mutex);
38 #endif
39
40 /* Our partition node structure */
41 struct mtd_part {
42 struct mtd_info mtd;
43 struct mtd_info *master;
44 uint64_t offset;
45 struct list_head list;
46 };
47
48 /*
49 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
50 * the pointer to that structure with this macro.
51 */
52 #define PART(x) ((struct mtd_part *)(x))
53
54
55 #ifdef __UBOOT__
56 /* from mm/util.c */
57
58 /**
59 * kstrdup - allocate space for and copy an existing string
60 * @s: the string to duplicate
61 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
62 */
kstrdup(const char * s,gfp_t gfp)63 char *kstrdup(const char *s, gfp_t gfp)
64 {
65 size_t len;
66 char *buf;
67
68 if (!s)
69 return NULL;
70
71 len = strlen(s) + 1;
72 buf = kmalloc(len, gfp);
73 if (buf)
74 memcpy(buf, s, len);
75 return buf;
76 }
77 #endif
78
79 /*
80 * MTD methods which simply translate the effective address and pass through
81 * to the _real_ device.
82 */
83
part_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)84 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
85 size_t *retlen, u_char *buf)
86 {
87 struct mtd_part *part = PART(mtd);
88 struct mtd_ecc_stats stats;
89 int res;
90
91 stats = part->master->ecc_stats;
92 res = part->master->_read(part->master, from + part->offset, len,
93 retlen, buf);
94 if (unlikely(mtd_is_eccerr(res)))
95 mtd->ecc_stats.failed +=
96 part->master->ecc_stats.failed - stats.failed;
97 else
98 mtd->ecc_stats.corrected +=
99 part->master->ecc_stats.corrected - stats.corrected;
100 return res;
101 }
102
103 #ifndef __UBOOT__
part_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)104 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
105 size_t *retlen, void **virt, resource_size_t *phys)
106 {
107 struct mtd_part *part = PART(mtd);
108
109 return part->master->_point(part->master, from + part->offset, len,
110 retlen, virt, phys);
111 }
112
part_unpoint(struct mtd_info * mtd,loff_t from,size_t len)113 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
114 {
115 struct mtd_part *part = PART(mtd);
116
117 return part->master->_unpoint(part->master, from + part->offset, len);
118 }
119 #endif
120
part_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)121 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
122 unsigned long len,
123 unsigned long offset,
124 unsigned long flags)
125 {
126 struct mtd_part *part = PART(mtd);
127
128 offset += part->offset;
129 return part->master->_get_unmapped_area(part->master, len, offset,
130 flags);
131 }
132
part_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)133 static int part_read_oob(struct mtd_info *mtd, loff_t from,
134 struct mtd_oob_ops *ops)
135 {
136 struct mtd_part *part = PART(mtd);
137 int res;
138
139 if (from >= mtd->size)
140 return -EINVAL;
141 if (ops->datbuf && from + ops->len > mtd->size)
142 return -EINVAL;
143
144 /*
145 * If OOB is also requested, make sure that we do not read past the end
146 * of this partition.
147 */
148 if (ops->oobbuf) {
149 size_t len, pages;
150
151 if (ops->mode == MTD_OPS_AUTO_OOB)
152 len = mtd->oobavail;
153 else
154 len = mtd->oobsize;
155 pages = mtd_div_by_ws(mtd->size, mtd);
156 pages -= mtd_div_by_ws(from, mtd);
157 if (ops->ooboffs + ops->ooblen > pages * len)
158 return -EINVAL;
159 }
160
161 res = part->master->_read_oob(part->master, from + part->offset, ops);
162 if (unlikely(res)) {
163 if (mtd_is_bitflip(res))
164 mtd->ecc_stats.corrected++;
165 if (mtd_is_eccerr(res))
166 mtd->ecc_stats.failed++;
167 }
168 return res;
169 }
170
part_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)171 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
172 size_t len, size_t *retlen, u_char *buf)
173 {
174 struct mtd_part *part = PART(mtd);
175 return part->master->_read_user_prot_reg(part->master, from, len,
176 retlen, buf);
177 }
178
part_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)179 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
180 size_t *retlen, struct otp_info *buf)
181 {
182 struct mtd_part *part = PART(mtd);
183 return part->master->_get_user_prot_info(part->master, len, retlen,
184 buf);
185 }
186
part_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)187 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
188 size_t len, size_t *retlen, u_char *buf)
189 {
190 struct mtd_part *part = PART(mtd);
191 return part->master->_read_fact_prot_reg(part->master, from, len,
192 retlen, buf);
193 }
194
part_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)195 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
196 size_t *retlen, struct otp_info *buf)
197 {
198 struct mtd_part *part = PART(mtd);
199 return part->master->_get_fact_prot_info(part->master, len, retlen,
200 buf);
201 }
202
part_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)203 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
204 size_t *retlen, const u_char *buf)
205 {
206 struct mtd_part *part = PART(mtd);
207 return part->master->_write(part->master, to + part->offset, len,
208 retlen, buf);
209 }
210
part_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)211 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
212 size_t *retlen, const u_char *buf)
213 {
214 struct mtd_part *part = PART(mtd);
215 return part->master->_panic_write(part->master, to + part->offset, len,
216 retlen, buf);
217 }
218
part_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)219 static int part_write_oob(struct mtd_info *mtd, loff_t to,
220 struct mtd_oob_ops *ops)
221 {
222 struct mtd_part *part = PART(mtd);
223
224 if (to >= mtd->size)
225 return -EINVAL;
226 if (ops->datbuf && to + ops->len > mtd->size)
227 return -EINVAL;
228 return part->master->_write_oob(part->master, to + part->offset, ops);
229 }
230
part_write_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)231 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
232 size_t len, size_t *retlen, u_char *buf)
233 {
234 struct mtd_part *part = PART(mtd);
235 return part->master->_write_user_prot_reg(part->master, from, len,
236 retlen, buf);
237 }
238
part_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)239 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
240 size_t len)
241 {
242 struct mtd_part *part = PART(mtd);
243 return part->master->_lock_user_prot_reg(part->master, from, len);
244 }
245
246 #ifndef __UBOOT__
part_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)247 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
248 unsigned long count, loff_t to, size_t *retlen)
249 {
250 struct mtd_part *part = PART(mtd);
251 return part->master->_writev(part->master, vecs, count,
252 to + part->offset, retlen);
253 }
254 #endif
255
part_erase(struct mtd_info * mtd,struct erase_info * instr)256 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
257 {
258 struct mtd_part *part = PART(mtd);
259 int ret;
260
261 instr->addr += part->offset;
262 ret = part->master->_erase(part->master, instr);
263 if (ret) {
264 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
265 instr->fail_addr -= part->offset;
266 instr->addr -= part->offset;
267 }
268 return ret;
269 }
270
mtd_erase_callback(struct erase_info * instr)271 void mtd_erase_callback(struct erase_info *instr)
272 {
273 if (instr->mtd->_erase == part_erase) {
274 struct mtd_part *part = PART(instr->mtd);
275
276 if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
277 instr->fail_addr -= part->offset;
278 instr->addr -= part->offset;
279 }
280 if (instr->callback)
281 instr->callback(instr);
282 }
283 EXPORT_SYMBOL_GPL(mtd_erase_callback);
284
part_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)285 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
286 {
287 struct mtd_part *part = PART(mtd);
288 return part->master->_lock(part->master, ofs + part->offset, len);
289 }
290
part_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)291 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
292 {
293 struct mtd_part *part = PART(mtd);
294 return part->master->_unlock(part->master, ofs + part->offset, len);
295 }
296
part_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)297 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
298 {
299 struct mtd_part *part = PART(mtd);
300 return part->master->_is_locked(part->master, ofs + part->offset, len);
301 }
302
part_sync(struct mtd_info * mtd)303 static void part_sync(struct mtd_info *mtd)
304 {
305 struct mtd_part *part = PART(mtd);
306 part->master->_sync(part->master);
307 }
308
309 #ifndef __UBOOT__
part_suspend(struct mtd_info * mtd)310 static int part_suspend(struct mtd_info *mtd)
311 {
312 struct mtd_part *part = PART(mtd);
313 return part->master->_suspend(part->master);
314 }
315
part_resume(struct mtd_info * mtd)316 static void part_resume(struct mtd_info *mtd)
317 {
318 struct mtd_part *part = PART(mtd);
319 part->master->_resume(part->master);
320 }
321 #endif
322
part_block_isreserved(struct mtd_info * mtd,loff_t ofs)323 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
324 {
325 struct mtd_part *part = PART(mtd);
326 ofs += part->offset;
327 return part->master->_block_isreserved(part->master, ofs);
328 }
329
part_block_isbad(struct mtd_info * mtd,loff_t ofs)330 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
331 {
332 struct mtd_part *part = PART(mtd);
333 ofs += part->offset;
334 return part->master->_block_isbad(part->master, ofs);
335 }
336
part_block_markbad(struct mtd_info * mtd,loff_t ofs)337 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
338 {
339 struct mtd_part *part = PART(mtd);
340 int res;
341
342 ofs += part->offset;
343 res = part->master->_block_markbad(part->master, ofs);
344 if (!res)
345 mtd->ecc_stats.badblocks++;
346 return res;
347 }
348
free_partition(struct mtd_part * p)349 static inline void free_partition(struct mtd_part *p)
350 {
351 kfree(p->mtd.name);
352 kfree(p);
353 }
354
355 /*
356 * This function unregisters and destroy all slave MTD objects which are
357 * attached to the given master MTD object.
358 */
359
del_mtd_partitions(struct mtd_info * master)360 int del_mtd_partitions(struct mtd_info *master)
361 {
362 struct mtd_part *slave, *next;
363 int ret, err = 0;
364
365 mutex_lock(&mtd_partitions_mutex);
366 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
367 if (slave->master == master) {
368 ret = del_mtd_device(&slave->mtd);
369 if (ret < 0) {
370 err = ret;
371 continue;
372 }
373 list_del(&slave->list);
374 free_partition(slave);
375 }
376 mutex_unlock(&mtd_partitions_mutex);
377
378 return err;
379 }
380
allocate_partition(struct mtd_info * master,const struct mtd_partition * part,int partno,uint64_t cur_offset)381 static struct mtd_part *allocate_partition(struct mtd_info *master,
382 const struct mtd_partition *part, int partno,
383 uint64_t cur_offset)
384 {
385 struct mtd_part *slave;
386 char *name;
387
388 /* allocate the partition structure */
389 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
390 name = kstrdup(part->name, GFP_KERNEL);
391 if (!name || !slave) {
392 printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
393 master->name);
394 kfree(name);
395 kfree(slave);
396 return ERR_PTR(-ENOMEM);
397 }
398
399 /* set up the MTD object for this partition */
400 slave->mtd.type = master->type;
401 slave->mtd.flags = master->flags & ~part->mask_flags;
402 slave->mtd.size = part->size;
403 slave->mtd.writesize = master->writesize;
404 slave->mtd.writebufsize = master->writebufsize;
405 slave->mtd.oobsize = master->oobsize;
406 slave->mtd.oobavail = master->oobavail;
407 slave->mtd.subpage_sft = master->subpage_sft;
408
409 slave->mtd.name = name;
410 slave->mtd.owner = master->owner;
411 #ifndef __UBOOT__
412 slave->mtd.backing_dev_info = master->backing_dev_info;
413
414 /* NOTE: we don't arrange MTDs as a tree; it'd be error-prone
415 * to have the same data be in two different partitions.
416 */
417 slave->mtd.dev.parent = master->dev.parent;
418 #endif
419
420 slave->mtd._read = part_read;
421 slave->mtd._write = part_write;
422
423 if (master->_panic_write)
424 slave->mtd._panic_write = part_panic_write;
425
426 #ifndef __UBOOT__
427 if (master->_point && master->_unpoint) {
428 slave->mtd._point = part_point;
429 slave->mtd._unpoint = part_unpoint;
430 }
431 #endif
432
433 if (master->_get_unmapped_area)
434 slave->mtd._get_unmapped_area = part_get_unmapped_area;
435 if (master->_read_oob)
436 slave->mtd._read_oob = part_read_oob;
437 if (master->_write_oob)
438 slave->mtd._write_oob = part_write_oob;
439 if (master->_read_user_prot_reg)
440 slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
441 if (master->_read_fact_prot_reg)
442 slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
443 if (master->_write_user_prot_reg)
444 slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
445 if (master->_lock_user_prot_reg)
446 slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
447 if (master->_get_user_prot_info)
448 slave->mtd._get_user_prot_info = part_get_user_prot_info;
449 if (master->_get_fact_prot_info)
450 slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
451 if (master->_sync)
452 slave->mtd._sync = part_sync;
453 #ifndef __UBOOT__
454 if (!partno && !master->dev.class && master->_suspend &&
455 master->_resume) {
456 slave->mtd._suspend = part_suspend;
457 slave->mtd._resume = part_resume;
458 }
459 if (master->_writev)
460 slave->mtd._writev = part_writev;
461 #endif
462 if (master->_lock)
463 slave->mtd._lock = part_lock;
464 if (master->_unlock)
465 slave->mtd._unlock = part_unlock;
466 if (master->_is_locked)
467 slave->mtd._is_locked = part_is_locked;
468 if (master->_block_isreserved)
469 slave->mtd._block_isreserved = part_block_isreserved;
470 if (master->_block_isbad)
471 slave->mtd._block_isbad = part_block_isbad;
472 if (master->_block_markbad)
473 slave->mtd._block_markbad = part_block_markbad;
474 slave->mtd._erase = part_erase;
475 slave->master = master;
476 slave->offset = part->offset;
477
478 if (slave->offset == MTDPART_OFS_APPEND)
479 slave->offset = cur_offset;
480 if (slave->offset == MTDPART_OFS_NXTBLK) {
481 slave->offset = cur_offset;
482 if (mtd_mod_by_eb(cur_offset, master) != 0) {
483 /* Round up to next erasesize */
484 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
485 debug("Moving partition %d: "
486 "0x%012llx -> 0x%012llx\n", partno,
487 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
488 }
489 }
490 if (slave->offset == MTDPART_OFS_RETAIN) {
491 slave->offset = cur_offset;
492 if (master->size - slave->offset >= slave->mtd.size) {
493 slave->mtd.size = master->size - slave->offset
494 - slave->mtd.size;
495 } else {
496 debug("mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
497 part->name, master->size - slave->offset,
498 slave->mtd.size);
499 /* register to preserve ordering */
500 goto out_register;
501 }
502 }
503 if (slave->mtd.size == MTDPART_SIZ_FULL)
504 slave->mtd.size = master->size - slave->offset;
505
506 debug("0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
507 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
508
509 /* let's do some sanity checks */
510 if (slave->offset >= master->size) {
511 /* let's register it anyway to preserve ordering */
512 slave->offset = 0;
513 slave->mtd.size = 0;
514 printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
515 part->name);
516 goto out_register;
517 }
518 if (slave->offset + slave->mtd.size > master->size) {
519 slave->mtd.size = master->size - slave->offset;
520 printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
521 part->name, master->name, (unsigned long long)slave->mtd.size);
522 }
523 if (master->numeraseregions > 1) {
524 /* Deal with variable erase size stuff */
525 int i, max = master->numeraseregions;
526 u64 end = slave->offset + slave->mtd.size;
527 struct mtd_erase_region_info *regions = master->eraseregions;
528
529 /* Find the first erase regions which is part of this
530 * partition. */
531 for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
532 ;
533 /* The loop searched for the region _behind_ the first one */
534 if (i > 0)
535 i--;
536
537 /* Pick biggest erasesize */
538 for (; i < max && regions[i].offset < end; i++) {
539 if (slave->mtd.erasesize < regions[i].erasesize) {
540 slave->mtd.erasesize = regions[i].erasesize;
541 }
542 }
543 BUG_ON(slave->mtd.erasesize == 0);
544 } else {
545 /* Single erase size */
546 slave->mtd.erasesize = master->erasesize;
547 }
548
549 if ((slave->mtd.flags & MTD_WRITEABLE) &&
550 mtd_mod_by_eb(slave->offset, &slave->mtd)) {
551 /* Doesn't start on a boundary of major erase size */
552 /* FIXME: Let it be writable if it is on a boundary of
553 * _minor_ erase size though */
554 slave->mtd.flags &= ~MTD_WRITEABLE;
555 printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
556 part->name);
557 }
558 if ((slave->mtd.flags & MTD_WRITEABLE) &&
559 mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
560 slave->mtd.flags &= ~MTD_WRITEABLE;
561 printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
562 part->name);
563 }
564
565 slave->mtd.ecclayout = master->ecclayout;
566 slave->mtd.ecc_step_size = master->ecc_step_size;
567 slave->mtd.ecc_strength = master->ecc_strength;
568 slave->mtd.bitflip_threshold = master->bitflip_threshold;
569
570 if (master->_block_isbad) {
571 uint64_t offs = 0;
572
573 while (offs < slave->mtd.size) {
574 if (mtd_block_isbad(master, offs + slave->offset))
575 slave->mtd.ecc_stats.badblocks++;
576 offs += slave->mtd.erasesize;
577 }
578 }
579
580 out_register:
581 return slave;
582 }
583
584 #ifndef __UBOOT__
mtd_add_partition(struct mtd_info * master,const char * name,long long offset,long long length)585 int mtd_add_partition(struct mtd_info *master, const char *name,
586 long long offset, long long length)
587 {
588 struct mtd_partition part;
589 struct mtd_part *p, *new;
590 uint64_t start, end;
591 int ret = 0;
592
593 /* the direct offset is expected */
594 if (offset == MTDPART_OFS_APPEND ||
595 offset == MTDPART_OFS_NXTBLK)
596 return -EINVAL;
597
598 if (length == MTDPART_SIZ_FULL)
599 length = master->size - offset;
600
601 if (length <= 0)
602 return -EINVAL;
603
604 part.name = name;
605 part.size = length;
606 part.offset = offset;
607 part.mask_flags = 0;
608 part.ecclayout = NULL;
609
610 new = allocate_partition(master, &part, -1, offset);
611 if (IS_ERR(new))
612 return PTR_ERR(new);
613
614 start = offset;
615 end = offset + length;
616
617 mutex_lock(&mtd_partitions_mutex);
618 list_for_each_entry(p, &mtd_partitions, list)
619 if (p->master == master) {
620 if ((start >= p->offset) &&
621 (start < (p->offset + p->mtd.size)))
622 goto err_inv;
623
624 if ((end >= p->offset) &&
625 (end < (p->offset + p->mtd.size)))
626 goto err_inv;
627 }
628
629 list_add(&new->list, &mtd_partitions);
630 mutex_unlock(&mtd_partitions_mutex);
631
632 add_mtd_device(&new->mtd);
633
634 return ret;
635 err_inv:
636 mutex_unlock(&mtd_partitions_mutex);
637 free_partition(new);
638 return -EINVAL;
639 }
640 EXPORT_SYMBOL_GPL(mtd_add_partition);
641
mtd_del_partition(struct mtd_info * master,int partno)642 int mtd_del_partition(struct mtd_info *master, int partno)
643 {
644 struct mtd_part *slave, *next;
645 int ret = -EINVAL;
646
647 mutex_lock(&mtd_partitions_mutex);
648 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
649 if ((slave->master == master) &&
650 (slave->mtd.index == partno)) {
651 ret = del_mtd_device(&slave->mtd);
652 if (ret < 0)
653 break;
654
655 list_del(&slave->list);
656 free_partition(slave);
657 break;
658 }
659 mutex_unlock(&mtd_partitions_mutex);
660
661 return ret;
662 }
663 EXPORT_SYMBOL_GPL(mtd_del_partition);
664 #endif
665
666 /*
667 * This function, given a master MTD object and a partition table, creates
668 * and registers slave MTD objects which are bound to the master according to
669 * the partition definitions.
670 *
671 * We don't register the master, or expect the caller to have done so,
672 * for reasons of data integrity.
673 */
674
add_mtd_partitions(struct mtd_info * master,const struct mtd_partition * parts,int nbparts)675 int add_mtd_partitions(struct mtd_info *master,
676 const struct mtd_partition *parts,
677 int nbparts)
678 {
679 struct mtd_part *slave;
680 uint64_t cur_offset = 0;
681 int i;
682
683 #ifdef __UBOOT__
684 /*
685 * Need to init the list here, since LIST_INIT() does not
686 * work on platforms where relocation has problems (like MIPS
687 * & PPC).
688 */
689 if (mtd_partitions.next == NULL)
690 INIT_LIST_HEAD(&mtd_partitions);
691 #endif
692
693 debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
694
695 for (i = 0; i < nbparts; i++) {
696 slave = allocate_partition(master, parts + i, i, cur_offset);
697 if (IS_ERR(slave))
698 return PTR_ERR(slave);
699
700 mutex_lock(&mtd_partitions_mutex);
701 list_add(&slave->list, &mtd_partitions);
702 mutex_unlock(&mtd_partitions_mutex);
703
704 add_mtd_device(&slave->mtd);
705
706 cur_offset = slave->offset + slave->mtd.size;
707 }
708
709 return 0;
710 }
711
712 #ifndef __UBOOT__
713 static DEFINE_SPINLOCK(part_parser_lock);
714 static LIST_HEAD(part_parsers);
715
get_partition_parser(const char * name)716 static struct mtd_part_parser *get_partition_parser(const char *name)
717 {
718 struct mtd_part_parser *p, *ret = NULL;
719
720 spin_lock(&part_parser_lock);
721
722 list_for_each_entry(p, &part_parsers, list)
723 if (!strcmp(p->name, name) && try_module_get(p->owner)) {
724 ret = p;
725 break;
726 }
727
728 spin_unlock(&part_parser_lock);
729
730 return ret;
731 }
732
733 #define put_partition_parser(p) do { module_put((p)->owner); } while (0)
734
register_mtd_parser(struct mtd_part_parser * p)735 void register_mtd_parser(struct mtd_part_parser *p)
736 {
737 spin_lock(&part_parser_lock);
738 list_add(&p->list, &part_parsers);
739 spin_unlock(&part_parser_lock);
740 }
741 EXPORT_SYMBOL_GPL(register_mtd_parser);
742
deregister_mtd_parser(struct mtd_part_parser * p)743 void deregister_mtd_parser(struct mtd_part_parser *p)
744 {
745 spin_lock(&part_parser_lock);
746 list_del(&p->list);
747 spin_unlock(&part_parser_lock);
748 }
749 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
750
751 /*
752 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
753 * are changing this array!
754 */
755 static const char * const default_mtd_part_types[] = {
756 "cmdlinepart",
757 "ofpart",
758 NULL
759 };
760
761 /**
762 * parse_mtd_partitions - parse MTD partitions
763 * @master: the master partition (describes whole MTD device)
764 * @types: names of partition parsers to try or %NULL
765 * @pparts: array of partitions found is returned here
766 * @data: MTD partition parser-specific data
767 *
768 * This function tries to find partition on MTD device @master. It uses MTD
769 * partition parsers, specified in @types. However, if @types is %NULL, then
770 * the default list of parsers is used. The default list contains only the
771 * "cmdlinepart" and "ofpart" parsers ATM.
772 * Note: If there are more then one parser in @types, the kernel only takes the
773 * partitions parsed out by the first parser.
774 *
775 * This function may return:
776 * o a negative error code in case of failure
777 * o zero if no partitions were found
778 * o a positive number of found partitions, in which case on exit @pparts will
779 * point to an array containing this number of &struct mtd_info objects.
780 */
parse_mtd_partitions(struct mtd_info * master,const char * const * types,struct mtd_partition ** pparts,struct mtd_part_parser_data * data)781 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
782 struct mtd_partition **pparts,
783 struct mtd_part_parser_data *data)
784 {
785 struct mtd_part_parser *parser;
786 int ret = 0;
787
788 if (!types)
789 types = default_mtd_part_types;
790
791 for ( ; ret <= 0 && *types; types++) {
792 parser = get_partition_parser(*types);
793 if (!parser && !request_module("%s", *types))
794 parser = get_partition_parser(*types);
795 if (!parser)
796 continue;
797 ret = (*parser->parse_fn)(master, pparts, data);
798 put_partition_parser(parser);
799 if (ret > 0) {
800 printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
801 ret, parser->name, master->name);
802 break;
803 }
804 }
805 return ret;
806 }
807 #endif
808
mtd_is_partition(const struct mtd_info * mtd)809 int mtd_is_partition(const struct mtd_info *mtd)
810 {
811 struct mtd_part *part;
812 int ispart = 0;
813
814 mutex_lock(&mtd_partitions_mutex);
815 list_for_each_entry(part, &mtd_partitions, list)
816 if (&part->mtd == mtd) {
817 ispart = 1;
818 break;
819 }
820 mutex_unlock(&mtd_partitions_mutex);
821
822 return ispart;
823 }
824 EXPORT_SYMBOL_GPL(mtd_is_partition);
825
826 /* Returns the size of the entire flash chip */
mtd_get_device_size(const struct mtd_info * mtd)827 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
828 {
829 if (!mtd_is_partition(mtd))
830 return mtd->size;
831
832 return PART(mtd)->master->size;
833 }
834 EXPORT_SYMBOL_GPL(mtd_get_device_size);
835