• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Simple MTD partitioning layer
3  *
4  * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
5  * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
6  * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/slab.h>
28 #include <linux/list.h>
29 #include <linux/kmod.h>
30 #include <linux/mtd/mtd.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/err.h>
33 
34 #include "mtdcore.h"
35 
36 /* Our partition linked list */
37 static LIST_HEAD(mtd_partitions);
38 static DEFINE_MUTEX(mtd_partitions_mutex);
39 
40 /* Our partition node structure */
41 struct mtd_part {
42 	struct mtd_info mtd;
43 	struct mtd_info *master;
44 	uint64_t offset;
45 	struct list_head list;
46 };
47 
48 /*
49  * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
50  * the pointer to that structure.
51  */
mtd_to_part(const struct mtd_info * mtd)52 static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
53 {
54 	return container_of(mtd, struct mtd_part, mtd);
55 }
56 
57 
58 /*
59  * MTD methods which simply translate the effective address and pass through
60  * to the _real_ device.
61  */
62 
part_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)63 static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
64 		size_t *retlen, u_char *buf)
65 {
66 	struct mtd_part *part = mtd_to_part(mtd);
67 	struct mtd_ecc_stats stats;
68 	int res;
69 
70 	stats = part->master->ecc_stats;
71 	res = part->master->_read(part->master, from + part->offset, len,
72 				  retlen, buf);
73 	if (unlikely(mtd_is_eccerr(res)))
74 		mtd->ecc_stats.failed +=
75 			part->master->ecc_stats.failed - stats.failed;
76 	else
77 		mtd->ecc_stats.corrected +=
78 			part->master->ecc_stats.corrected - stats.corrected;
79 	return res;
80 }
81 
part_point(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,void ** virt,resource_size_t * phys)82 static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
83 		size_t *retlen, void **virt, resource_size_t *phys)
84 {
85 	struct mtd_part *part = mtd_to_part(mtd);
86 
87 	return part->master->_point(part->master, from + part->offset, len,
88 				    retlen, virt, phys);
89 }
90 
part_unpoint(struct mtd_info * mtd,loff_t from,size_t len)91 static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
92 {
93 	struct mtd_part *part = mtd_to_part(mtd);
94 
95 	return part->master->_unpoint(part->master, from + part->offset, len);
96 }
97 
part_get_unmapped_area(struct mtd_info * mtd,unsigned long len,unsigned long offset,unsigned long flags)98 static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
99 					    unsigned long len,
100 					    unsigned long offset,
101 					    unsigned long flags)
102 {
103 	struct mtd_part *part = mtd_to_part(mtd);
104 
105 	offset += part->offset;
106 	return part->master->_get_unmapped_area(part->master, len, offset,
107 						flags);
108 }
109 
part_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)110 static int part_read_oob(struct mtd_info *mtd, loff_t from,
111 		struct mtd_oob_ops *ops)
112 {
113 	struct mtd_part *part = mtd_to_part(mtd);
114 	int res;
115 
116 	if (from >= mtd->size)
117 		return -EINVAL;
118 	if (ops->datbuf && from + ops->len > mtd->size)
119 		return -EINVAL;
120 
121 	/*
122 	 * If OOB is also requested, make sure that we do not read past the end
123 	 * of this partition.
124 	 */
125 	if (ops->oobbuf) {
126 		size_t len, pages;
127 
128 		len = mtd_oobavail(mtd, ops);
129 		pages = mtd_div_by_ws(mtd->size, mtd);
130 		pages -= mtd_div_by_ws(from, mtd);
131 		if (ops->ooboffs + ops->ooblen > pages * len)
132 			return -EINVAL;
133 	}
134 
135 	res = part->master->_read_oob(part->master, from + part->offset, ops);
136 	if (unlikely(res)) {
137 		if (mtd_is_bitflip(res))
138 			mtd->ecc_stats.corrected++;
139 		if (mtd_is_eccerr(res))
140 			mtd->ecc_stats.failed++;
141 	}
142 	return res;
143 }
144 
part_read_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)145 static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
146 		size_t len, size_t *retlen, u_char *buf)
147 {
148 	struct mtd_part *part = mtd_to_part(mtd);
149 	return part->master->_read_user_prot_reg(part->master, from, len,
150 						 retlen, buf);
151 }
152 
part_get_user_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)153 static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
154 				   size_t *retlen, struct otp_info *buf)
155 {
156 	struct mtd_part *part = mtd_to_part(mtd);
157 	return part->master->_get_user_prot_info(part->master, len, retlen,
158 						 buf);
159 }
160 
part_read_fact_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)161 static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
162 		size_t len, size_t *retlen, u_char *buf)
163 {
164 	struct mtd_part *part = mtd_to_part(mtd);
165 	return part->master->_read_fact_prot_reg(part->master, from, len,
166 						 retlen, buf);
167 }
168 
part_get_fact_prot_info(struct mtd_info * mtd,size_t len,size_t * retlen,struct otp_info * buf)169 static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
170 				   size_t *retlen, struct otp_info *buf)
171 {
172 	struct mtd_part *part = mtd_to_part(mtd);
173 	return part->master->_get_fact_prot_info(part->master, len, retlen,
174 						 buf);
175 }
176 
part_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)177 static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
178 		size_t *retlen, const u_char *buf)
179 {
180 	struct mtd_part *part = mtd_to_part(mtd);
181 	return part->master->_write(part->master, to + part->offset, len,
182 				    retlen, buf);
183 }
184 
part_panic_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)185 static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
186 		size_t *retlen, const u_char *buf)
187 {
188 	struct mtd_part *part = mtd_to_part(mtd);
189 	return part->master->_panic_write(part->master, to + part->offset, len,
190 					  retlen, buf);
191 }
192 
part_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)193 static int part_write_oob(struct mtd_info *mtd, loff_t to,
194 		struct mtd_oob_ops *ops)
195 {
196 	struct mtd_part *part = mtd_to_part(mtd);
197 
198 	if (to >= mtd->size)
199 		return -EINVAL;
200 	if (ops->datbuf && to + ops->len > mtd->size)
201 		return -EINVAL;
202 	return part->master->_write_oob(part->master, to + part->offset, ops);
203 }
204 
part_write_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)205 static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
206 		size_t len, size_t *retlen, u_char *buf)
207 {
208 	struct mtd_part *part = mtd_to_part(mtd);
209 	return part->master->_write_user_prot_reg(part->master, from, len,
210 						  retlen, buf);
211 }
212 
part_lock_user_prot_reg(struct mtd_info * mtd,loff_t from,size_t len)213 static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
214 		size_t len)
215 {
216 	struct mtd_part *part = mtd_to_part(mtd);
217 	return part->master->_lock_user_prot_reg(part->master, from, len);
218 }
219 
part_writev(struct mtd_info * mtd,const struct kvec * vecs,unsigned long count,loff_t to,size_t * retlen)220 static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
221 		unsigned long count, loff_t to, size_t *retlen)
222 {
223 	struct mtd_part *part = mtd_to_part(mtd);
224 	return part->master->_writev(part->master, vecs, count,
225 				     to + part->offset, retlen);
226 }
227 
part_erase(struct mtd_info * mtd,struct erase_info * instr)228 static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
229 {
230 	struct mtd_part *part = mtd_to_part(mtd);
231 	int ret;
232 
233 	instr->addr += part->offset;
234 	ret = part->master->_erase(part->master, instr);
235 	if (ret) {
236 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
237 			instr->fail_addr -= part->offset;
238 		instr->addr -= part->offset;
239 	}
240 	return ret;
241 }
242 
mtd_erase_callback(struct erase_info * instr)243 void mtd_erase_callback(struct erase_info *instr)
244 {
245 	if (instr->mtd->_erase == part_erase) {
246 		struct mtd_part *part = mtd_to_part(instr->mtd);
247 
248 		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
249 			instr->fail_addr -= part->offset;
250 		instr->addr -= part->offset;
251 	}
252 	if (instr->callback)
253 		instr->callback(instr);
254 }
255 EXPORT_SYMBOL_GPL(mtd_erase_callback);
256 
part_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)257 static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
258 {
259 	struct mtd_part *part = mtd_to_part(mtd);
260 	return part->master->_lock(part->master, ofs + part->offset, len);
261 }
262 
part_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)263 static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
264 {
265 	struct mtd_part *part = mtd_to_part(mtd);
266 	return part->master->_unlock(part->master, ofs + part->offset, len);
267 }
268 
part_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)269 static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
270 {
271 	struct mtd_part *part = mtd_to_part(mtd);
272 	return part->master->_is_locked(part->master, ofs + part->offset, len);
273 }
274 
part_sync(struct mtd_info * mtd)275 static void part_sync(struct mtd_info *mtd)
276 {
277 	struct mtd_part *part = mtd_to_part(mtd);
278 	part->master->_sync(part->master);
279 }
280 
part_suspend(struct mtd_info * mtd)281 static int part_suspend(struct mtd_info *mtd)
282 {
283 	struct mtd_part *part = mtd_to_part(mtd);
284 	return part->master->_suspend(part->master);
285 }
286 
part_resume(struct mtd_info * mtd)287 static void part_resume(struct mtd_info *mtd)
288 {
289 	struct mtd_part *part = mtd_to_part(mtd);
290 	part->master->_resume(part->master);
291 }
292 
part_block_isreserved(struct mtd_info * mtd,loff_t ofs)293 static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
294 {
295 	struct mtd_part *part = mtd_to_part(mtd);
296 	ofs += part->offset;
297 	return part->master->_block_isreserved(part->master, ofs);
298 }
299 
part_block_isbad(struct mtd_info * mtd,loff_t ofs)300 static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
301 {
302 	struct mtd_part *part = mtd_to_part(mtd);
303 	ofs += part->offset;
304 	return part->master->_block_isbad(part->master, ofs);
305 }
306 
part_block_markbad(struct mtd_info * mtd,loff_t ofs)307 static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
308 {
309 	struct mtd_part *part = mtd_to_part(mtd);
310 	int res;
311 
312 	ofs += part->offset;
313 	res = part->master->_block_markbad(part->master, ofs);
314 	if (!res)
315 		mtd->ecc_stats.badblocks++;
316 	return res;
317 }
318 
part_get_device(struct mtd_info * mtd)319 static int part_get_device(struct mtd_info *mtd)
320 {
321 	struct mtd_part *part = mtd_to_part(mtd);
322 	return part->master->_get_device(part->master);
323 }
324 
part_put_device(struct mtd_info * mtd)325 static void part_put_device(struct mtd_info *mtd)
326 {
327 	struct mtd_part *part = mtd_to_part(mtd);
328 	part->master->_put_device(part->master);
329 }
330 
part_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)331 static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
332 			      struct mtd_oob_region *oobregion)
333 {
334 	struct mtd_part *part = mtd_to_part(mtd);
335 
336 	return mtd_ooblayout_ecc(part->master, section, oobregion);
337 }
338 
part_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)339 static int part_ooblayout_free(struct mtd_info *mtd, int section,
340 			       struct mtd_oob_region *oobregion)
341 {
342 	struct mtd_part *part = mtd_to_part(mtd);
343 
344 	return mtd_ooblayout_free(part->master, section, oobregion);
345 }
346 
347 static const struct mtd_ooblayout_ops part_ooblayout_ops = {
348 	.ecc = part_ooblayout_ecc,
349 	.free = part_ooblayout_free,
350 };
351 
free_partition(struct mtd_part * p)352 static inline void free_partition(struct mtd_part *p)
353 {
354 	kfree(p->mtd.name);
355 	kfree(p);
356 }
357 
358 /*
359  * This function unregisters and destroy all slave MTD objects which are
360  * attached to the given master MTD object.
361  */
362 
del_mtd_partitions(struct mtd_info * master)363 int del_mtd_partitions(struct mtd_info *master)
364 {
365 	struct mtd_part *slave, *next;
366 	int ret, err = 0;
367 
368 	mutex_lock(&mtd_partitions_mutex);
369 	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
370 		if (slave->master == master) {
371 			ret = del_mtd_device(&slave->mtd);
372 			if (ret < 0) {
373 				err = ret;
374 				continue;
375 			}
376 			list_del(&slave->list);
377 			free_partition(slave);
378 		}
379 	mutex_unlock(&mtd_partitions_mutex);
380 
381 	return err;
382 }
383 
allocate_partition(struct mtd_info * master,const struct mtd_partition * part,int partno,uint64_t cur_offset)384 static struct mtd_part *allocate_partition(struct mtd_info *master,
385 			const struct mtd_partition *part, int partno,
386 			uint64_t cur_offset)
387 {
388 	struct mtd_part *slave;
389 	char *name;
390 
391 	/* allocate the partition structure */
392 	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
393 	name = kstrdup(part->name, GFP_KERNEL);
394 	if (!name || !slave) {
395 		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
396 		       master->name);
397 		kfree(name);
398 		kfree(slave);
399 		return ERR_PTR(-ENOMEM);
400 	}
401 
402 	/* set up the MTD object for this partition */
403 	slave->mtd.type = master->type;
404 	slave->mtd.flags = master->flags & ~part->mask_flags;
405 	slave->mtd.size = part->size;
406 	slave->mtd.writesize = master->writesize;
407 	slave->mtd.writebufsize = master->writebufsize;
408 	slave->mtd.oobsize = master->oobsize;
409 	slave->mtd.oobavail = master->oobavail;
410 	slave->mtd.subpage_sft = master->subpage_sft;
411 	slave->mtd.pairing = master->pairing;
412 
413 	slave->mtd.name = name;
414 	slave->mtd.owner = master->owner;
415 
416 	/* NOTE: Historically, we didn't arrange MTDs as a tree out of
417 	 * concern for showing the same data in multiple partitions.
418 	 * However, it is very useful to have the master node present,
419 	 * so the MTD_PARTITIONED_MASTER option allows that. The master
420 	 * will have device nodes etc only if this is set, so make the
421 	 * parent conditional on that option. Note, this is a way to
422 	 * distinguish between the master and the partition in sysfs.
423 	 */
424 	slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ?
425 				&master->dev :
426 				master->dev.parent;
427 
428 	slave->mtd._read = part_read;
429 	slave->mtd._write = part_write;
430 
431 	if (master->_panic_write)
432 		slave->mtd._panic_write = part_panic_write;
433 
434 	if (master->_point && master->_unpoint) {
435 		slave->mtd._point = part_point;
436 		slave->mtd._unpoint = part_unpoint;
437 	}
438 
439 	if (master->_get_unmapped_area)
440 		slave->mtd._get_unmapped_area = part_get_unmapped_area;
441 	if (master->_read_oob)
442 		slave->mtd._read_oob = part_read_oob;
443 	if (master->_write_oob)
444 		slave->mtd._write_oob = part_write_oob;
445 	if (master->_read_user_prot_reg)
446 		slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
447 	if (master->_read_fact_prot_reg)
448 		slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
449 	if (master->_write_user_prot_reg)
450 		slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
451 	if (master->_lock_user_prot_reg)
452 		slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
453 	if (master->_get_user_prot_info)
454 		slave->mtd._get_user_prot_info = part_get_user_prot_info;
455 	if (master->_get_fact_prot_info)
456 		slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
457 	if (master->_sync)
458 		slave->mtd._sync = part_sync;
459 	if (!partno && !master->dev.class && master->_suspend &&
460 	    master->_resume) {
461 			slave->mtd._suspend = part_suspend;
462 			slave->mtd._resume = part_resume;
463 	}
464 	if (master->_writev)
465 		slave->mtd._writev = part_writev;
466 	if (master->_lock)
467 		slave->mtd._lock = part_lock;
468 	if (master->_unlock)
469 		slave->mtd._unlock = part_unlock;
470 	if (master->_is_locked)
471 		slave->mtd._is_locked = part_is_locked;
472 	if (master->_block_isreserved)
473 		slave->mtd._block_isreserved = part_block_isreserved;
474 	if (master->_block_isbad)
475 		slave->mtd._block_isbad = part_block_isbad;
476 	if (master->_block_markbad)
477 		slave->mtd._block_markbad = part_block_markbad;
478 
479 	if (master->_get_device)
480 		slave->mtd._get_device = part_get_device;
481 	if (master->_put_device)
482 		slave->mtd._put_device = part_put_device;
483 
484 	slave->mtd._erase = part_erase;
485 	slave->master = master;
486 	slave->offset = part->offset;
487 
488 	if (slave->offset == MTDPART_OFS_APPEND)
489 		slave->offset = cur_offset;
490 	if (slave->offset == MTDPART_OFS_NXTBLK) {
491 		slave->offset = cur_offset;
492 		if (mtd_mod_by_eb(cur_offset, master) != 0) {
493 			/* Round up to next erasesize */
494 			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
495 			printk(KERN_NOTICE "Moving partition %d: "
496 			       "0x%012llx -> 0x%012llx\n", partno,
497 			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
498 		}
499 	}
500 	if (slave->offset == MTDPART_OFS_RETAIN) {
501 		slave->offset = cur_offset;
502 		if (master->size - slave->offset >= slave->mtd.size) {
503 			slave->mtd.size = master->size - slave->offset
504 							- slave->mtd.size;
505 		} else {
506 			printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
507 				part->name, master->size - slave->offset,
508 				slave->mtd.size);
509 			/* register to preserve ordering */
510 			goto out_register;
511 		}
512 	}
513 	if (slave->mtd.size == MTDPART_SIZ_FULL)
514 		slave->mtd.size = master->size - slave->offset;
515 
516 	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
517 		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
518 
519 	/* let's do some sanity checks */
520 	if (slave->offset >= master->size) {
521 		/* let's register it anyway to preserve ordering */
522 		slave->offset = 0;
523 		slave->mtd.size = 0;
524 		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
525 			part->name);
526 		goto out_register;
527 	}
528 	if (slave->offset + slave->mtd.size > master->size) {
529 		slave->mtd.size = master->size - slave->offset;
530 		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
531 			part->name, master->name, (unsigned long long)slave->mtd.size);
532 	}
533 	if (master->numeraseregions > 1) {
534 		/* Deal with variable erase size stuff */
535 		int i, max = master->numeraseregions;
536 		u64 end = slave->offset + slave->mtd.size;
537 		struct mtd_erase_region_info *regions = master->eraseregions;
538 
539 		/* Find the first erase regions which is part of this
540 		 * partition. */
541 		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
542 			;
543 		/* The loop searched for the region _behind_ the first one */
544 		if (i > 0)
545 			i--;
546 
547 		/* Pick biggest erasesize */
548 		for (; i < max && regions[i].offset < end; i++) {
549 			if (slave->mtd.erasesize < regions[i].erasesize) {
550 				slave->mtd.erasesize = regions[i].erasesize;
551 			}
552 		}
553 		BUG_ON(slave->mtd.erasesize == 0);
554 	} else {
555 		/* Single erase size */
556 		slave->mtd.erasesize = master->erasesize;
557 	}
558 
559 	if ((slave->mtd.flags & MTD_WRITEABLE) &&
560 	    mtd_mod_by_eb(slave->offset, &slave->mtd)) {
561 		/* Doesn't start on a boundary of major erase size */
562 		/* FIXME: Let it be writable if it is on a boundary of
563 		 * _minor_ erase size though */
564 		slave->mtd.flags &= ~MTD_WRITEABLE;
565 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
566 			part->name);
567 	}
568 	if ((slave->mtd.flags & MTD_WRITEABLE) &&
569 	    mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
570 		slave->mtd.flags &= ~MTD_WRITEABLE;
571 		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
572 			part->name);
573 	}
574 
575 	mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
576 	slave->mtd.ecc_step_size = master->ecc_step_size;
577 	slave->mtd.ecc_strength = master->ecc_strength;
578 	slave->mtd.bitflip_threshold = master->bitflip_threshold;
579 
580 	if (master->_block_isbad) {
581 		uint64_t offs = 0;
582 
583 		while (offs < slave->mtd.size) {
584 			if (mtd_block_isreserved(master, offs + slave->offset))
585 				slave->mtd.ecc_stats.bbtblocks++;
586 			else if (mtd_block_isbad(master, offs + slave->offset))
587 				slave->mtd.ecc_stats.badblocks++;
588 			offs += slave->mtd.erasesize;
589 		}
590 	}
591 
592 out_register:
593 	return slave;
594 }
595 
mtd_partition_offset_show(struct device * dev,struct device_attribute * attr,char * buf)596 static ssize_t mtd_partition_offset_show(struct device *dev,
597 		struct device_attribute *attr, char *buf)
598 {
599 	struct mtd_info *mtd = dev_get_drvdata(dev);
600 	struct mtd_part *part = mtd_to_part(mtd);
601 	return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
602 }
603 
604 static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
605 
606 static const struct attribute *mtd_partition_attrs[] = {
607 	&dev_attr_offset.attr,
608 	NULL
609 };
610 
mtd_add_partition_attrs(struct mtd_part * new)611 static int mtd_add_partition_attrs(struct mtd_part *new)
612 {
613 	int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
614 	if (ret)
615 		printk(KERN_WARNING
616 		       "mtd: failed to create partition attrs, err=%d\n", ret);
617 	return ret;
618 }
619 
mtd_add_partition(struct mtd_info * master,const char * name,long long offset,long long length)620 int mtd_add_partition(struct mtd_info *master, const char *name,
621 		      long long offset, long long length)
622 {
623 	struct mtd_partition part;
624 	struct mtd_part *new;
625 	int ret = 0;
626 
627 	/* the direct offset is expected */
628 	if (offset == MTDPART_OFS_APPEND ||
629 	    offset == MTDPART_OFS_NXTBLK)
630 		return -EINVAL;
631 
632 	if (length == MTDPART_SIZ_FULL)
633 		length = master->size - offset;
634 
635 	if (length <= 0)
636 		return -EINVAL;
637 
638 	memset(&part, 0, sizeof(part));
639 	part.name = name;
640 	part.size = length;
641 	part.offset = offset;
642 
643 	new = allocate_partition(master, &part, -1, offset);
644 	if (IS_ERR(new))
645 		return PTR_ERR(new);
646 
647 	mutex_lock(&mtd_partitions_mutex);
648 	list_add(&new->list, &mtd_partitions);
649 	mutex_unlock(&mtd_partitions_mutex);
650 
651 	add_mtd_device(&new->mtd);
652 
653 	mtd_add_partition_attrs(new);
654 
655 	return ret;
656 }
657 EXPORT_SYMBOL_GPL(mtd_add_partition);
658 
mtd_del_partition(struct mtd_info * master,int partno)659 int mtd_del_partition(struct mtd_info *master, int partno)
660 {
661 	struct mtd_part *slave, *next;
662 	int ret = -EINVAL;
663 
664 	mutex_lock(&mtd_partitions_mutex);
665 	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
666 		if ((slave->master == master) &&
667 		    (slave->mtd.index == partno)) {
668 			sysfs_remove_files(&slave->mtd.dev.kobj,
669 					   mtd_partition_attrs);
670 			ret = del_mtd_device(&slave->mtd);
671 			if (ret < 0)
672 				break;
673 
674 			list_del(&slave->list);
675 			free_partition(slave);
676 			break;
677 		}
678 	mutex_unlock(&mtd_partitions_mutex);
679 
680 	return ret;
681 }
682 EXPORT_SYMBOL_GPL(mtd_del_partition);
683 
684 /*
685  * This function, given a master MTD object and a partition table, creates
686  * and registers slave MTD objects which are bound to the master according to
687  * the partition definitions.
688  *
689  * For historical reasons, this function's caller only registers the master
690  * if the MTD_PARTITIONED_MASTER config option is set.
691  */
692 
add_mtd_partitions(struct mtd_info * master,const struct mtd_partition * parts,int nbparts)693 int add_mtd_partitions(struct mtd_info *master,
694 		       const struct mtd_partition *parts,
695 		       int nbparts)
696 {
697 	struct mtd_part *slave;
698 	uint64_t cur_offset = 0;
699 	int i;
700 
701 	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
702 
703 	for (i = 0; i < nbparts; i++) {
704 		slave = allocate_partition(master, parts + i, i, cur_offset);
705 		if (IS_ERR(slave)) {
706 			del_mtd_partitions(master);
707 			return PTR_ERR(slave);
708 		}
709 
710 		mutex_lock(&mtd_partitions_mutex);
711 		list_add(&slave->list, &mtd_partitions);
712 		mutex_unlock(&mtd_partitions_mutex);
713 
714 		add_mtd_device(&slave->mtd);
715 		mtd_add_partition_attrs(slave);
716 
717 		cur_offset = slave->offset + slave->mtd.size;
718 	}
719 
720 	return 0;
721 }
722 
723 static DEFINE_SPINLOCK(part_parser_lock);
724 static LIST_HEAD(part_parsers);
725 
mtd_part_parser_get(const char * name)726 static struct mtd_part_parser *mtd_part_parser_get(const char *name)
727 {
728 	struct mtd_part_parser *p, *ret = NULL;
729 
730 	spin_lock(&part_parser_lock);
731 
732 	list_for_each_entry(p, &part_parsers, list)
733 		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
734 			ret = p;
735 			break;
736 		}
737 
738 	spin_unlock(&part_parser_lock);
739 
740 	return ret;
741 }
742 
mtd_part_parser_put(const struct mtd_part_parser * p)743 static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
744 {
745 	module_put(p->owner);
746 }
747 
748 /*
749  * Many partition parsers just expected the core to kfree() all their data in
750  * one chunk. Do that by default.
751  */
mtd_part_parser_cleanup_default(const struct mtd_partition * pparts,int nr_parts)752 static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
753 					    int nr_parts)
754 {
755 	kfree(pparts);
756 }
757 
__register_mtd_parser(struct mtd_part_parser * p,struct module * owner)758 int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
759 {
760 	p->owner = owner;
761 
762 	if (!p->cleanup)
763 		p->cleanup = &mtd_part_parser_cleanup_default;
764 
765 	spin_lock(&part_parser_lock);
766 	list_add(&p->list, &part_parsers);
767 	spin_unlock(&part_parser_lock);
768 
769 	return 0;
770 }
771 EXPORT_SYMBOL_GPL(__register_mtd_parser);
772 
deregister_mtd_parser(struct mtd_part_parser * p)773 void deregister_mtd_parser(struct mtd_part_parser *p)
774 {
775 	spin_lock(&part_parser_lock);
776 	list_del(&p->list);
777 	spin_unlock(&part_parser_lock);
778 }
779 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
780 
781 /*
782  * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
783  * are changing this array!
784  */
785 static const char * const default_mtd_part_types[] = {
786 	"cmdlinepart",
787 	"ofpart",
788 	NULL
789 };
790 
791 /**
792  * parse_mtd_partitions - parse MTD partitions
793  * @master: the master partition (describes whole MTD device)
794  * @types: names of partition parsers to try or %NULL
795  * @pparts: info about partitions found is returned here
796  * @data: MTD partition parser-specific data
797  *
798  * This function tries to find partition on MTD device @master. It uses MTD
799  * partition parsers, specified in @types. However, if @types is %NULL, then
800  * the default list of parsers is used. The default list contains only the
801  * "cmdlinepart" and "ofpart" parsers ATM.
802  * Note: If there are more then one parser in @types, the kernel only takes the
803  * partitions parsed out by the first parser.
804  *
805  * This function may return:
806  * o a negative error code in case of failure
807  * o zero otherwise, and @pparts will describe the partitions, number of
808  *   partitions, and the parser which parsed them. Caller must release
809  *   resources with mtd_part_parser_cleanup() when finished with the returned
810  *   data.
811  */
parse_mtd_partitions(struct mtd_info * master,const char * const * types,struct mtd_partitions * pparts,struct mtd_part_parser_data * data)812 int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
813 			 struct mtd_partitions *pparts,
814 			 struct mtd_part_parser_data *data)
815 {
816 	struct mtd_part_parser *parser;
817 	int ret, err = 0;
818 
819 	if (!types)
820 		types = default_mtd_part_types;
821 
822 	for ( ; *types; types++) {
823 		pr_debug("%s: parsing partitions %s\n", master->name, *types);
824 		parser = mtd_part_parser_get(*types);
825 		if (!parser && !request_module("%s", *types))
826 			parser = mtd_part_parser_get(*types);
827 		pr_debug("%s: got parser %s\n", master->name,
828 			 parser ? parser->name : NULL);
829 		if (!parser)
830 			continue;
831 		ret = (*parser->parse_fn)(master, &pparts->parts, data);
832 		pr_debug("%s: parser %s: %i\n",
833 			 master->name, parser->name, ret);
834 		if (ret > 0) {
835 			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
836 			       ret, parser->name, master->name);
837 			pparts->nr_parts = ret;
838 			pparts->parser = parser;
839 			return 0;
840 		}
841 		mtd_part_parser_put(parser);
842 		/*
843 		 * Stash the first error we see; only report it if no parser
844 		 * succeeds
845 		 */
846 		if (ret < 0 && !err)
847 			err = ret;
848 	}
849 	return err;
850 }
851 
mtd_part_parser_cleanup(struct mtd_partitions * parts)852 void mtd_part_parser_cleanup(struct mtd_partitions *parts)
853 {
854 	const struct mtd_part_parser *parser;
855 
856 	if (!parts)
857 		return;
858 
859 	parser = parts->parser;
860 	if (parser) {
861 		if (parser->cleanup)
862 			parser->cleanup(parts->parts, parts->nr_parts);
863 
864 		mtd_part_parser_put(parser);
865 	}
866 }
867 
mtd_is_partition(const struct mtd_info * mtd)868 int mtd_is_partition(const struct mtd_info *mtd)
869 {
870 	struct mtd_part *part;
871 	int ispart = 0;
872 
873 	mutex_lock(&mtd_partitions_mutex);
874 	list_for_each_entry(part, &mtd_partitions, list)
875 		if (&part->mtd == mtd) {
876 			ispart = 1;
877 			break;
878 		}
879 	mutex_unlock(&mtd_partitions_mutex);
880 
881 	return ispart;
882 }
883 EXPORT_SYMBOL_GPL(mtd_is_partition);
884 
885 /* Returns the size of the entire flash chip */
mtd_get_device_size(const struct mtd_info * mtd)886 uint64_t mtd_get_device_size(const struct mtd_info *mtd)
887 {
888 	if (!mtd_is_partition(mtd))
889 		return mtd->size;
890 
891 	return mtd_to_part(mtd)->master->size;
892 }
893 EXPORT_SYMBOL_GPL(mtd_get_device_size);
894