• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12  * the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * Author: Artem Bityutskiy (Битюцкий Артём)
19  */
20 
21 /*
22  * The UBI Eraseblock Association (EBA) sub-system.
23  *
24  * This sub-system is responsible for I/O to/from logical eraseblock.
25  *
26  * Although in this implementation the EBA table is fully kept and managed in
27  * RAM, which assumes poor scalability, it might be (partially) maintained on
28  * flash in future implementations.
29  *
30  * The EBA sub-system implements per-logical eraseblock locking. Before
31  * accessing a logical eraseblock it is locked for reading or writing. The
32  * per-logical eraseblock locking is implemented by means of the lock tree. The
33  * lock tree is an RB-tree which refers all the currently locked logical
34  * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
35  * They are indexed by (@vol_id, @lnum) pairs.
36  *
37  * EBA also maintains the global sequence counter which is incremented each
38  * time a logical eraseblock is mapped to a physical eraseblock and it is
39  * stored in the volume identifier header. This means that each VID header has
40  * a unique sequence number. The sequence number is only increased an we assume
41  * 64 bits is enough to never overflow.
42  */
43 
44 #include <linux/slab.h>
45 #include <linux/crc32.h>
46 #include <linux/err.h>
47 #include "ubi.h"
48 
49 /* Number of physical eraseblocks reserved for atomic LEB change operation */
50 #define EBA_RESERVED_PEBS 1
51 
52 /**
53  * next_sqnum - get next sequence number.
54  * @ubi: UBI device description object
55  *
56  * This function returns next sequence number to use, which is just the current
57  * global sequence counter value. It also increases the global sequence
58  * counter.
59  */
ubi_next_sqnum(struct ubi_device * ubi)60 unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
61 {
62 	unsigned long long sqnum;
63 
64 	spin_lock(&ubi->ltree_lock);
65 	sqnum = ubi->global_sqnum++;
66 	spin_unlock(&ubi->ltree_lock);
67 
68 	return sqnum;
69 }
70 
71 /**
72  * ubi_get_compat - get compatibility flags of a volume.
73  * @ubi: UBI device description object
74  * @vol_id: volume ID
75  *
76  * This function returns compatibility flags for an internal volume. User
77  * volumes have no compatibility flags, so %0 is returned.
78  */
ubi_get_compat(const struct ubi_device * ubi,int vol_id)79 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
80 {
81 	if (vol_id == UBI_LAYOUT_VOLUME_ID)
82 		return UBI_LAYOUT_VOLUME_COMPAT;
83 	return 0;
84 }
85 
86 /**
87  * ltree_lookup - look up the lock tree.
88  * @ubi: UBI device description object
89  * @vol_id: volume ID
90  * @lnum: logical eraseblock number
91  *
92  * This function returns a pointer to the corresponding &struct ubi_ltree_entry
93  * object if the logical eraseblock is locked and %NULL if it is not.
94  * @ubi->ltree_lock has to be locked.
95  */
ltree_lookup(struct ubi_device * ubi,int vol_id,int lnum)96 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
97 					    int lnum)
98 {
99 	struct rb_node *p;
100 
101 	p = ubi->ltree.rb_node;
102 	while (p) {
103 		struct ubi_ltree_entry *le;
104 
105 		le = rb_entry(p, struct ubi_ltree_entry, rb);
106 
107 		if (vol_id < le->vol_id)
108 			p = p->rb_left;
109 		else if (vol_id > le->vol_id)
110 			p = p->rb_right;
111 		else {
112 			if (lnum < le->lnum)
113 				p = p->rb_left;
114 			else if (lnum > le->lnum)
115 				p = p->rb_right;
116 			else
117 				return le;
118 		}
119 	}
120 
121 	return NULL;
122 }
123 
124 /**
125  * ltree_add_entry - add new entry to the lock tree.
126  * @ubi: UBI device description object
127  * @vol_id: volume ID
128  * @lnum: logical eraseblock number
129  *
130  * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
131  * lock tree. If such entry is already there, its usage counter is increased.
132  * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
133  * failed.
134  */
ltree_add_entry(struct ubi_device * ubi,int vol_id,int lnum)135 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
136 					       int vol_id, int lnum)
137 {
138 	struct ubi_ltree_entry *le, *le1, *le_free;
139 
140 	le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
141 	if (!le)
142 		return ERR_PTR(-ENOMEM);
143 
144 	le->users = 0;
145 	init_rwsem(&le->mutex);
146 	le->vol_id = vol_id;
147 	le->lnum = lnum;
148 
149 	spin_lock(&ubi->ltree_lock);
150 	le1 = ltree_lookup(ubi, vol_id, lnum);
151 
152 	if (le1) {
153 		/*
154 		 * This logical eraseblock is already locked. The newly
155 		 * allocated lock entry is not needed.
156 		 */
157 		le_free = le;
158 		le = le1;
159 	} else {
160 		struct rb_node **p, *parent = NULL;
161 
162 		/*
163 		 * No lock entry, add the newly allocated one to the
164 		 * @ubi->ltree RB-tree.
165 		 */
166 		le_free = NULL;
167 
168 		p = &ubi->ltree.rb_node;
169 		while (*p) {
170 			parent = *p;
171 			le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
172 
173 			if (vol_id < le1->vol_id)
174 				p = &(*p)->rb_left;
175 			else if (vol_id > le1->vol_id)
176 				p = &(*p)->rb_right;
177 			else {
178 				ubi_assert(lnum != le1->lnum);
179 				if (lnum < le1->lnum)
180 					p = &(*p)->rb_left;
181 				else
182 					p = &(*p)->rb_right;
183 			}
184 		}
185 
186 		rb_link_node(&le->rb, parent, p);
187 		rb_insert_color(&le->rb, &ubi->ltree);
188 	}
189 	le->users += 1;
190 	spin_unlock(&ubi->ltree_lock);
191 
192 	kfree(le_free);
193 	return le;
194 }
195 
196 /**
197  * leb_read_lock - lock logical eraseblock for reading.
198  * @ubi: UBI device description object
199  * @vol_id: volume ID
200  * @lnum: logical eraseblock number
201  *
202  * This function locks a logical eraseblock for reading. Returns zero in case
203  * of success and a negative error code in case of failure.
204  */
leb_read_lock(struct ubi_device * ubi,int vol_id,int lnum)205 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
206 {
207 	struct ubi_ltree_entry *le;
208 
209 	le = ltree_add_entry(ubi, vol_id, lnum);
210 	if (IS_ERR(le))
211 		return PTR_ERR(le);
212 	down_read(&le->mutex);
213 	return 0;
214 }
215 
216 /**
217  * leb_read_unlock - unlock logical eraseblock.
218  * @ubi: UBI device description object
219  * @vol_id: volume ID
220  * @lnum: logical eraseblock number
221  */
leb_read_unlock(struct ubi_device * ubi,int vol_id,int lnum)222 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
223 {
224 	struct ubi_ltree_entry *le;
225 
226 	spin_lock(&ubi->ltree_lock);
227 	le = ltree_lookup(ubi, vol_id, lnum);
228 	le->users -= 1;
229 	ubi_assert(le->users >= 0);
230 	up_read(&le->mutex);
231 	if (le->users == 0) {
232 		rb_erase(&le->rb, &ubi->ltree);
233 		kfree(le);
234 	}
235 	spin_unlock(&ubi->ltree_lock);
236 }
237 
238 /**
239  * leb_write_lock - lock logical eraseblock for writing.
240  * @ubi: UBI device description object
241  * @vol_id: volume ID
242  * @lnum: logical eraseblock number
243  *
244  * This function locks a logical eraseblock for writing. Returns zero in case
245  * of success and a negative error code in case of failure.
246  */
leb_write_lock(struct ubi_device * ubi,int vol_id,int lnum)247 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
248 {
249 	struct ubi_ltree_entry *le;
250 
251 	le = ltree_add_entry(ubi, vol_id, lnum);
252 	if (IS_ERR(le))
253 		return PTR_ERR(le);
254 	down_write(&le->mutex);
255 	return 0;
256 }
257 
258 /**
259  * leb_write_lock - lock logical eraseblock for writing.
260  * @ubi: UBI device description object
261  * @vol_id: volume ID
262  * @lnum: logical eraseblock number
263  *
264  * This function locks a logical eraseblock for writing if there is no
265  * contention and does nothing if there is contention. Returns %0 in case of
266  * success, %1 in case of contention, and and a negative error code in case of
267  * failure.
268  */
leb_write_trylock(struct ubi_device * ubi,int vol_id,int lnum)269 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
270 {
271 	struct ubi_ltree_entry *le;
272 
273 	le = ltree_add_entry(ubi, vol_id, lnum);
274 	if (IS_ERR(le))
275 		return PTR_ERR(le);
276 	if (down_write_trylock(&le->mutex))
277 		return 0;
278 
279 	/* Contention, cancel */
280 	spin_lock(&ubi->ltree_lock);
281 	le->users -= 1;
282 	ubi_assert(le->users >= 0);
283 	if (le->users == 0) {
284 		rb_erase(&le->rb, &ubi->ltree);
285 		kfree(le);
286 	}
287 	spin_unlock(&ubi->ltree_lock);
288 
289 	return 1;
290 }
291 
292 /**
293  * leb_write_unlock - unlock logical eraseblock.
294  * @ubi: UBI device description object
295  * @vol_id: volume ID
296  * @lnum: logical eraseblock number
297  */
leb_write_unlock(struct ubi_device * ubi,int vol_id,int lnum)298 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
299 {
300 	struct ubi_ltree_entry *le;
301 
302 	spin_lock(&ubi->ltree_lock);
303 	le = ltree_lookup(ubi, vol_id, lnum);
304 	le->users -= 1;
305 	ubi_assert(le->users >= 0);
306 	up_write(&le->mutex);
307 	if (le->users == 0) {
308 		rb_erase(&le->rb, &ubi->ltree);
309 		kfree(le);
310 	}
311 	spin_unlock(&ubi->ltree_lock);
312 }
313 
314 /**
315  * ubi_eba_unmap_leb - un-map logical eraseblock.
316  * @ubi: UBI device description object
317  * @vol: volume description object
318  * @lnum: logical eraseblock number
319  *
320  * This function un-maps logical eraseblock @lnum and schedules corresponding
321  * physical eraseblock for erasure. Returns zero in case of success and a
322  * negative error code in case of failure.
323  */
ubi_eba_unmap_leb(struct ubi_device * ubi,struct ubi_volume * vol,int lnum)324 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
325 		      int lnum)
326 {
327 	int err, pnum, vol_id = vol->vol_id;
328 
329 	if (ubi->ro_mode)
330 		return -EROFS;
331 
332 	err = leb_write_lock(ubi, vol_id, lnum);
333 	if (err)
334 		return err;
335 
336 	pnum = vol->eba_tbl[lnum];
337 	if (pnum < 0)
338 		/* This logical eraseblock is already unmapped */
339 		goto out_unlock;
340 
341 	dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
342 
343 	down_read(&ubi->fm_sem);
344 	vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
345 	up_read(&ubi->fm_sem);
346 	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
347 
348 out_unlock:
349 	leb_write_unlock(ubi, vol_id, lnum);
350 	return err;
351 }
352 
353 /**
354  * ubi_eba_read_leb - read data.
355  * @ubi: UBI device description object
356  * @vol: volume description object
357  * @lnum: logical eraseblock number
358  * @buf: buffer to store the read data
359  * @offset: offset from where to read
360  * @len: how many bytes to read
361  * @check: data CRC check flag
362  *
363  * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
364  * bytes. The @check flag only makes sense for static volumes and forces
365  * eraseblock data CRC checking.
366  *
367  * In case of success this function returns zero. In case of a static volume,
368  * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
369  * returned for any volume type if an ECC error was detected by the MTD device
370  * driver. Other negative error cored may be returned in case of other errors.
371  */
ubi_eba_read_leb(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,void * buf,int offset,int len,int check)372 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
373 		     void *buf, int offset, int len, int check)
374 {
375 	int err, pnum, scrub = 0, vol_id = vol->vol_id;
376 	struct ubi_vid_hdr *vid_hdr;
377 	uint32_t uninitialized_var(crc);
378 
379 	err = leb_read_lock(ubi, vol_id, lnum);
380 	if (err)
381 		return err;
382 
383 	pnum = vol->eba_tbl[lnum];
384 	if (pnum < 0) {
385 		/*
386 		 * The logical eraseblock is not mapped, fill the whole buffer
387 		 * with 0xFF bytes. The exception is static volumes for which
388 		 * it is an error to read unmapped logical eraseblocks.
389 		 */
390 		dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
391 			len, offset, vol_id, lnum);
392 		leb_read_unlock(ubi, vol_id, lnum);
393 		ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
394 		memset(buf, 0xFF, len);
395 		return 0;
396 	}
397 
398 	dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
399 		len, offset, vol_id, lnum, pnum);
400 
401 	if (vol->vol_type == UBI_DYNAMIC_VOLUME)
402 		check = 0;
403 
404 retry:
405 	if (check) {
406 		vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
407 		if (!vid_hdr) {
408 			err = -ENOMEM;
409 			goto out_unlock;
410 		}
411 
412 		err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
413 		if (err && err != UBI_IO_BITFLIPS) {
414 			if (err > 0) {
415 				/*
416 				 * The header is either absent or corrupted.
417 				 * The former case means there is a bug -
418 				 * switch to read-only mode just in case.
419 				 * The latter case means a real corruption - we
420 				 * may try to recover data. FIXME: but this is
421 				 * not implemented.
422 				 */
423 				if (err == UBI_IO_BAD_HDR_EBADMSG ||
424 				    err == UBI_IO_BAD_HDR) {
425 					ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
426 						 pnum, vol_id, lnum);
427 					err = -EBADMSG;
428 				} else {
429 					/*
430 					 * Ending up here in the non-Fastmap case
431 					 * is a clear bug as the VID header had to
432 					 * be present at scan time to have it referenced.
433 					 * With fastmap the story is more complicated.
434 					 * Fastmap has the mapping info without the need
435 					 * of a full scan. So the LEB could have been
436 					 * unmapped, Fastmap cannot know this and keeps
437 					 * the LEB referenced.
438 					 * This is valid and works as the layer above UBI
439 					 * has to do bookkeeping about used/referenced
440 					 * LEBs in any case.
441 					 */
442 					if (ubi->fast_attach) {
443 						err = -EBADMSG;
444 					} else {
445 						err = -EINVAL;
446 						ubi_ro_mode(ubi);
447 					}
448 				}
449 			}
450 			goto out_free;
451 		} else if (err == UBI_IO_BITFLIPS)
452 			scrub = 1;
453 
454 		ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
455 		ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
456 
457 		crc = be32_to_cpu(vid_hdr->data_crc);
458 		ubi_free_vid_hdr(ubi, vid_hdr);
459 	}
460 
461 	err = ubi_io_read_data(ubi, buf, pnum, offset, len);
462 	if (err) {
463 		if (err == UBI_IO_BITFLIPS)
464 			scrub = 1;
465 		else if (mtd_is_eccerr(err)) {
466 			if (vol->vol_type == UBI_DYNAMIC_VOLUME)
467 				goto out_unlock;
468 			scrub = 1;
469 			if (!check) {
470 				ubi_msg("force data checking");
471 				check = 1;
472 				goto retry;
473 			}
474 		} else
475 			goto out_unlock;
476 	}
477 
478 	if (check) {
479 		uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
480 		if (crc1 != crc) {
481 			ubi_warn("CRC error: calculated %#08x, must be %#08x",
482 				 crc1, crc);
483 			err = -EBADMSG;
484 			goto out_unlock;
485 		}
486 	}
487 
488 	if (scrub)
489 		err = ubi_wl_scrub_peb(ubi, pnum);
490 
491 	leb_read_unlock(ubi, vol_id, lnum);
492 	return err;
493 
494 out_free:
495 	ubi_free_vid_hdr(ubi, vid_hdr);
496 out_unlock:
497 	leb_read_unlock(ubi, vol_id, lnum);
498 	return err;
499 }
500 
501 /**
502  * recover_peb - recover from write failure.
503  * @ubi: UBI device description object
504  * @pnum: the physical eraseblock to recover
505  * @vol_id: volume ID
506  * @lnum: logical eraseblock number
507  * @buf: data which was not written because of the write failure
508  * @offset: offset of the failed write
509  * @len: how many bytes should have been written
510  *
511  * This function is called in case of a write failure and moves all good data
512  * from the potentially bad physical eraseblock to a good physical eraseblock.
513  * This function also writes the data which was not written due to the failure.
514  * Returns new physical eraseblock number in case of success, and a negative
515  * error code in case of failure.
516  */
recover_peb(struct ubi_device * ubi,int pnum,int vol_id,int lnum,const void * buf,int offset,int len)517 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
518 		       const void *buf, int offset, int len)
519 {
520 	int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
521 	struct ubi_volume *vol = ubi->volumes[idx];
522 	struct ubi_vid_hdr *vid_hdr;
523 
524 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
525 	if (!vid_hdr)
526 		return -ENOMEM;
527 
528 retry:
529 	new_pnum = ubi_wl_get_peb(ubi);
530 	if (new_pnum < 0) {
531 		ubi_free_vid_hdr(ubi, vid_hdr);
532 		return new_pnum;
533 	}
534 
535 	ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
536 
537 	err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
538 	if (err && err != UBI_IO_BITFLIPS) {
539 		if (err > 0)
540 			err = -EIO;
541 		goto out_put;
542 	}
543 
544 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
545 	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
546 	if (err)
547 		goto write_error;
548 
549 	data_size = offset + len;
550 	mutex_lock(&ubi->buf_mutex);
551 	memset(ubi->peb_buf + offset, 0xFF, len);
552 
553 	/* Read everything before the area where the write failure happened */
554 	if (offset > 0) {
555 		err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
556 		if (err && err != UBI_IO_BITFLIPS)
557 			goto out_unlock;
558 	}
559 
560 	memcpy(ubi->peb_buf + offset, buf, len);
561 
562 	err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
563 	if (err) {
564 		mutex_unlock(&ubi->buf_mutex);
565 		goto write_error;
566 	}
567 
568 	mutex_unlock(&ubi->buf_mutex);
569 	ubi_free_vid_hdr(ubi, vid_hdr);
570 
571 	down_read(&ubi->fm_sem);
572 	vol->eba_tbl[lnum] = new_pnum;
573 	up_read(&ubi->fm_sem);
574 	ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
575 
576 	ubi_msg("data was successfully recovered");
577 	return 0;
578 
579 out_unlock:
580 	mutex_unlock(&ubi->buf_mutex);
581 out_put:
582 	ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
583 	ubi_free_vid_hdr(ubi, vid_hdr);
584 	return err;
585 
586 write_error:
587 	/*
588 	 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
589 	 * get another one.
590 	 */
591 	ubi_warn("failed to write to PEB %d", new_pnum);
592 	ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
593 	if (++tries > UBI_IO_RETRIES) {
594 		ubi_free_vid_hdr(ubi, vid_hdr);
595 		return err;
596 	}
597 	ubi_msg("try again");
598 	goto retry;
599 }
600 
601 /**
602  * ubi_eba_write_leb - write data to dynamic volume.
603  * @ubi: UBI device description object
604  * @vol: volume description object
605  * @lnum: logical eraseblock number
606  * @buf: the data to write
607  * @offset: offset within the logical eraseblock where to write
608  * @len: how many bytes to write
609  *
610  * This function writes data to logical eraseblock @lnum of a dynamic volume
611  * @vol. Returns zero in case of success and a negative error code in case
612  * of failure. In case of error, it is possible that something was still
613  * written to the flash media, but may be some garbage.
614  */
ubi_eba_write_leb(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,const void * buf,int offset,int len)615 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
616 		      const void *buf, int offset, int len)
617 {
618 	int err, pnum, tries = 0, vol_id = vol->vol_id;
619 	struct ubi_vid_hdr *vid_hdr;
620 
621 	if (ubi->ro_mode)
622 		return -EROFS;
623 
624 	err = leb_write_lock(ubi, vol_id, lnum);
625 	if (err)
626 		return err;
627 
628 	pnum = vol->eba_tbl[lnum];
629 	if (pnum >= 0) {
630 		dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
631 			len, offset, vol_id, lnum, pnum);
632 
633 		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
634 		if (err) {
635 			ubi_warn("failed to write data to PEB %d", pnum);
636 			if (err == -EIO && ubi->bad_allowed)
637 				err = recover_peb(ubi, pnum, vol_id, lnum, buf,
638 						  offset, len);
639 			if (err)
640 				ubi_ro_mode(ubi);
641 		}
642 		leb_write_unlock(ubi, vol_id, lnum);
643 		return err;
644 	}
645 
646 	/*
647 	 * The logical eraseblock is not mapped. We have to get a free physical
648 	 * eraseblock and write the volume identifier header there first.
649 	 */
650 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
651 	if (!vid_hdr) {
652 		leb_write_unlock(ubi, vol_id, lnum);
653 		return -ENOMEM;
654 	}
655 
656 	vid_hdr->vol_type = UBI_VID_DYNAMIC;
657 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
658 	vid_hdr->vol_id = cpu_to_be32(vol_id);
659 	vid_hdr->lnum = cpu_to_be32(lnum);
660 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
661 	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
662 
663 retry:
664 	pnum = ubi_wl_get_peb(ubi);
665 	if (pnum < 0) {
666 		ubi_free_vid_hdr(ubi, vid_hdr);
667 		leb_write_unlock(ubi, vol_id, lnum);
668 		return pnum;
669 	}
670 
671 	dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
672 		len, offset, vol_id, lnum, pnum);
673 
674 	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
675 	if (err) {
676 		ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
677 			 vol_id, lnum, pnum);
678 		goto write_error;
679 	}
680 
681 	if (len) {
682 		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
683 		if (err) {
684 			ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
685 				 len, offset, vol_id, lnum, pnum);
686 			goto write_error;
687 		}
688 	}
689 
690 	down_read(&ubi->fm_sem);
691 	vol->eba_tbl[lnum] = pnum;
692 	up_read(&ubi->fm_sem);
693 
694 	leb_write_unlock(ubi, vol_id, lnum);
695 	ubi_free_vid_hdr(ubi, vid_hdr);
696 	return 0;
697 
698 write_error:
699 	if (err != -EIO || !ubi->bad_allowed) {
700 		ubi_ro_mode(ubi);
701 		leb_write_unlock(ubi, vol_id, lnum);
702 		ubi_free_vid_hdr(ubi, vid_hdr);
703 		return err;
704 	}
705 
706 	/*
707 	 * Fortunately, this is the first write operation to this physical
708 	 * eraseblock, so just put it and request a new one. We assume that if
709 	 * this physical eraseblock went bad, the erase code will handle that.
710 	 */
711 	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
712 	if (err || ++tries > UBI_IO_RETRIES) {
713 		ubi_ro_mode(ubi);
714 		leb_write_unlock(ubi, vol_id, lnum);
715 		ubi_free_vid_hdr(ubi, vid_hdr);
716 		return err;
717 	}
718 
719 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
720 	ubi_msg("try another PEB");
721 	goto retry;
722 }
723 
724 /**
725  * ubi_eba_write_leb_st - write data to static volume.
726  * @ubi: UBI device description object
727  * @vol: volume description object
728  * @lnum: logical eraseblock number
729  * @buf: data to write
730  * @len: how many bytes to write
731  * @used_ebs: how many logical eraseblocks will this volume contain
732  *
733  * This function writes data to logical eraseblock @lnum of static volume
734  * @vol. The @used_ebs argument should contain total number of logical
735  * eraseblock in this static volume.
736  *
737  * When writing to the last logical eraseblock, the @len argument doesn't have
738  * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
739  * to the real data size, although the @buf buffer has to contain the
740  * alignment. In all other cases, @len has to be aligned.
741  *
742  * It is prohibited to write more than once to logical eraseblocks of static
743  * volumes. This function returns zero in case of success and a negative error
744  * code in case of failure.
745  */
ubi_eba_write_leb_st(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,const void * buf,int len,int used_ebs)746 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
747 			 int lnum, const void *buf, int len, int used_ebs)
748 {
749 	int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
750 	struct ubi_vid_hdr *vid_hdr;
751 	uint32_t crc;
752 
753 	if (ubi->ro_mode)
754 		return -EROFS;
755 
756 	if (lnum == used_ebs - 1)
757 		/* If this is the last LEB @len may be unaligned */
758 		len = ALIGN(data_size, ubi->min_io_size);
759 	else
760 		ubi_assert(!(len & (ubi->min_io_size - 1)));
761 
762 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
763 	if (!vid_hdr)
764 		return -ENOMEM;
765 
766 	err = leb_write_lock(ubi, vol_id, lnum);
767 	if (err) {
768 		ubi_free_vid_hdr(ubi, vid_hdr);
769 		return err;
770 	}
771 
772 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
773 	vid_hdr->vol_id = cpu_to_be32(vol_id);
774 	vid_hdr->lnum = cpu_to_be32(lnum);
775 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
776 	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
777 
778 	crc = crc32(UBI_CRC32_INIT, buf, data_size);
779 	vid_hdr->vol_type = UBI_VID_STATIC;
780 	vid_hdr->data_size = cpu_to_be32(data_size);
781 	vid_hdr->used_ebs = cpu_to_be32(used_ebs);
782 	vid_hdr->data_crc = cpu_to_be32(crc);
783 
784 retry:
785 	pnum = ubi_wl_get_peb(ubi);
786 	if (pnum < 0) {
787 		ubi_free_vid_hdr(ubi, vid_hdr);
788 		leb_write_unlock(ubi, vol_id, lnum);
789 		return pnum;
790 	}
791 
792 	dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
793 		len, vol_id, lnum, pnum, used_ebs);
794 
795 	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
796 	if (err) {
797 		ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
798 			 vol_id, lnum, pnum);
799 		goto write_error;
800 	}
801 
802 	err = ubi_io_write_data(ubi, buf, pnum, 0, len);
803 	if (err) {
804 		ubi_warn("failed to write %d bytes of data to PEB %d",
805 			 len, pnum);
806 		goto write_error;
807 	}
808 
809 	ubi_assert(vol->eba_tbl[lnum] < 0);
810 	down_read(&ubi->fm_sem);
811 	vol->eba_tbl[lnum] = pnum;
812 	up_read(&ubi->fm_sem);
813 
814 	leb_write_unlock(ubi, vol_id, lnum);
815 	ubi_free_vid_hdr(ubi, vid_hdr);
816 	return 0;
817 
818 write_error:
819 	if (err != -EIO || !ubi->bad_allowed) {
820 		/*
821 		 * This flash device does not admit of bad eraseblocks or
822 		 * something nasty and unexpected happened. Switch to read-only
823 		 * mode just in case.
824 		 */
825 		ubi_ro_mode(ubi);
826 		leb_write_unlock(ubi, vol_id, lnum);
827 		ubi_free_vid_hdr(ubi, vid_hdr);
828 		return err;
829 	}
830 
831 	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
832 	if (err || ++tries > UBI_IO_RETRIES) {
833 		ubi_ro_mode(ubi);
834 		leb_write_unlock(ubi, vol_id, lnum);
835 		ubi_free_vid_hdr(ubi, vid_hdr);
836 		return err;
837 	}
838 
839 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
840 	ubi_msg("try another PEB");
841 	goto retry;
842 }
843 
844 /*
845  * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
846  * @ubi: UBI device description object
847  * @vol: volume description object
848  * @lnum: logical eraseblock number
849  * @buf: data to write
850  * @len: how many bytes to write
851  *
852  * This function changes the contents of a logical eraseblock atomically. @buf
853  * has to contain new logical eraseblock data, and @len - the length of the
854  * data, which has to be aligned. This function guarantees that in case of an
855  * unclean reboot the old contents is preserved. Returns zero in case of
856  * success and a negative error code in case of failure.
857  *
858  * UBI reserves one LEB for the "atomic LEB change" operation, so only one
859  * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
860  */
ubi_eba_atomic_leb_change(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,const void * buf,int len)861 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
862 			      int lnum, const void *buf, int len)
863 {
864 	int err, pnum, old_pnum, tries = 0, vol_id = vol->vol_id;
865 	struct ubi_vid_hdr *vid_hdr;
866 	uint32_t crc;
867 
868 	if (ubi->ro_mode)
869 		return -EROFS;
870 
871 	if (len == 0) {
872 		/*
873 		 * Special case when data length is zero. In this case the LEB
874 		 * has to be unmapped and mapped somewhere else.
875 		 */
876 		err = ubi_eba_unmap_leb(ubi, vol, lnum);
877 		if (err)
878 			return err;
879 		return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
880 	}
881 
882 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
883 	if (!vid_hdr)
884 		return -ENOMEM;
885 
886 	mutex_lock(&ubi->alc_mutex);
887 	err = leb_write_lock(ubi, vol_id, lnum);
888 	if (err)
889 		goto out_mutex;
890 
891 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
892 	vid_hdr->vol_id = cpu_to_be32(vol_id);
893 	vid_hdr->lnum = cpu_to_be32(lnum);
894 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
895 	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
896 
897 	crc = crc32(UBI_CRC32_INIT, buf, len);
898 	vid_hdr->vol_type = UBI_VID_DYNAMIC;
899 	vid_hdr->data_size = cpu_to_be32(len);
900 	vid_hdr->copy_flag = 1;
901 	vid_hdr->data_crc = cpu_to_be32(crc);
902 
903 retry:
904 	pnum = ubi_wl_get_peb(ubi);
905 	if (pnum < 0) {
906 		err = pnum;
907 		goto out_leb_unlock;
908 	}
909 
910 	dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
911 		vol_id, lnum, vol->eba_tbl[lnum], pnum);
912 
913 	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
914 	if (err) {
915 		ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
916 			 vol_id, lnum, pnum);
917 		goto write_error;
918 	}
919 
920 	err = ubi_io_write_data(ubi, buf, pnum, 0, len);
921 	if (err) {
922 		ubi_warn("failed to write %d bytes of data to PEB %d",
923 			 len, pnum);
924 		goto write_error;
925 	}
926 
927 	down_read(&ubi->fm_sem);
928 	old_pnum = vol->eba_tbl[lnum];
929 	vol->eba_tbl[lnum] = pnum;
930 	up_read(&ubi->fm_sem);
931 
932 	if (old_pnum >= 0) {
933 		err = ubi_wl_put_peb(ubi, vol_id, lnum, old_pnum, 0);
934 		if (err)
935 			goto out_leb_unlock;
936 	}
937 
938 out_leb_unlock:
939 	leb_write_unlock(ubi, vol_id, lnum);
940 out_mutex:
941 	mutex_unlock(&ubi->alc_mutex);
942 	ubi_free_vid_hdr(ubi, vid_hdr);
943 	return err;
944 
945 write_error:
946 	if (err != -EIO || !ubi->bad_allowed) {
947 		/*
948 		 * This flash device does not admit of bad eraseblocks or
949 		 * something nasty and unexpected happened. Switch to read-only
950 		 * mode just in case.
951 		 */
952 		ubi_ro_mode(ubi);
953 		goto out_leb_unlock;
954 	}
955 
956 	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
957 	if (err || ++tries > UBI_IO_RETRIES) {
958 		ubi_ro_mode(ubi);
959 		goto out_leb_unlock;
960 	}
961 
962 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
963 	ubi_msg("try another PEB");
964 	goto retry;
965 }
966 
967 /**
968  * is_error_sane - check whether a read error is sane.
969  * @err: code of the error happened during reading
970  *
971  * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
972  * cannot read data from the target PEB (an error @err happened). If the error
973  * code is sane, then we treat this error as non-fatal. Otherwise the error is
974  * fatal and UBI will be switched to R/O mode later.
975  *
976  * The idea is that we try not to switch to R/O mode if the read error is
977  * something which suggests there was a real read problem. E.g., %-EIO. Or a
978  * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
979  * mode, simply because we do not know what happened at the MTD level, and we
980  * cannot handle this. E.g., the underlying driver may have become crazy, and
981  * it is safer to switch to R/O mode to preserve the data.
982  *
983  * And bear in mind, this is about reading from the target PEB, i.e. the PEB
984  * which we have just written.
985  */
is_error_sane(int err)986 static int is_error_sane(int err)
987 {
988 	if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
989 	    err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
990 		return 0;
991 	return 1;
992 }
993 
994 /**
995  * ubi_eba_copy_leb - copy logical eraseblock.
996  * @ubi: UBI device description object
997  * @from: physical eraseblock number from where to copy
998  * @to: physical eraseblock number where to copy
999  * @vid_hdr: VID header of the @from physical eraseblock
1000  *
1001  * This function copies logical eraseblock from physical eraseblock @from to
1002  * physical eraseblock @to. The @vid_hdr buffer may be changed by this
1003  * function. Returns:
1004  *   o %0 in case of success;
1005  *   o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
1006  *   o a negative error code in case of failure.
1007  */
ubi_eba_copy_leb(struct ubi_device * ubi,int from,int to,struct ubi_vid_hdr * vid_hdr)1008 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1009 		     struct ubi_vid_hdr *vid_hdr)
1010 {
1011 	int err, vol_id, lnum, data_size, aldata_size, idx;
1012 	struct ubi_volume *vol;
1013 	uint32_t crc;
1014 
1015 	vol_id = be32_to_cpu(vid_hdr->vol_id);
1016 	lnum = be32_to_cpu(vid_hdr->lnum);
1017 
1018 	dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
1019 
1020 	if (vid_hdr->vol_type == UBI_VID_STATIC) {
1021 		data_size = be32_to_cpu(vid_hdr->data_size);
1022 		aldata_size = ALIGN(data_size, ubi->min_io_size);
1023 	} else
1024 		data_size = aldata_size =
1025 			    ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
1026 
1027 	idx = vol_id2idx(ubi, vol_id);
1028 	spin_lock(&ubi->volumes_lock);
1029 	/*
1030 	 * Note, we may race with volume deletion, which means that the volume
1031 	 * this logical eraseblock belongs to might be being deleted. Since the
1032 	 * volume deletion un-maps all the volume's logical eraseblocks, it will
1033 	 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
1034 	 */
1035 	vol = ubi->volumes[idx];
1036 	spin_unlock(&ubi->volumes_lock);
1037 	if (!vol) {
1038 		/* No need to do further work, cancel */
1039 		dbg_wl("volume %d is being removed, cancel", vol_id);
1040 		return MOVE_CANCEL_RACE;
1041 	}
1042 
1043 	/*
1044 	 * We do not want anybody to write to this logical eraseblock while we
1045 	 * are moving it, so lock it.
1046 	 *
1047 	 * Note, we are using non-waiting locking here, because we cannot sleep
1048 	 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1049 	 * unmapping the LEB which is mapped to the PEB we are going to move
1050 	 * (@from). This task locks the LEB and goes sleep in the
1051 	 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1052 	 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1053 	 * LEB is already locked, we just do not move it and return
1054 	 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
1055 	 * we do not know the reasons of the contention - it may be just a
1056 	 * normal I/O on this LEB, so we want to re-try.
1057 	 */
1058 	err = leb_write_trylock(ubi, vol_id, lnum);
1059 	if (err) {
1060 		dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1061 		return MOVE_RETRY;
1062 	}
1063 
1064 	/*
1065 	 * The LEB might have been put meanwhile, and the task which put it is
1066 	 * probably waiting on @ubi->move_mutex. No need to continue the work,
1067 	 * cancel it.
1068 	 */
1069 	if (vol->eba_tbl[lnum] != from) {
1070 		dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
1071 		       vol_id, lnum, from, vol->eba_tbl[lnum]);
1072 		err = MOVE_CANCEL_RACE;
1073 		goto out_unlock_leb;
1074 	}
1075 
1076 	/*
1077 	 * OK, now the LEB is locked and we can safely start moving it. Since
1078 	 * this function utilizes the @ubi->peb_buf buffer which is shared
1079 	 * with some other functions - we lock the buffer by taking the
1080 	 * @ubi->buf_mutex.
1081 	 */
1082 	mutex_lock(&ubi->buf_mutex);
1083 	dbg_wl("read %d bytes of data", aldata_size);
1084 	err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
1085 	if (err && err != UBI_IO_BITFLIPS) {
1086 		ubi_warn("error %d while reading data from PEB %d",
1087 			 err, from);
1088 		err = MOVE_SOURCE_RD_ERR;
1089 		goto out_unlock_buf;
1090 	}
1091 
1092 	/*
1093 	 * Now we have got to calculate how much data we have to copy. In
1094 	 * case of a static volume it is fairly easy - the VID header contains
1095 	 * the data size. In case of a dynamic volume it is more difficult - we
1096 	 * have to read the contents, cut 0xFF bytes from the end and copy only
1097 	 * the first part. We must do this to avoid writing 0xFF bytes as it
1098 	 * may have some side-effects. And not only this. It is important not
1099 	 * to include those 0xFFs to CRC because later the they may be filled
1100 	 * by data.
1101 	 */
1102 	if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1103 		aldata_size = data_size =
1104 			ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
1105 
1106 	cond_resched();
1107 	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
1108 	cond_resched();
1109 
1110 	/*
1111 	 * It may turn out to be that the whole @from physical eraseblock
1112 	 * contains only 0xFF bytes. Then we have to only write the VID header
1113 	 * and do not write any data. This also means we should not set
1114 	 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
1115 	 */
1116 	if (data_size > 0) {
1117 		vid_hdr->copy_flag = 1;
1118 		vid_hdr->data_size = cpu_to_be32(data_size);
1119 		vid_hdr->data_crc = cpu_to_be32(crc);
1120 	}
1121 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1122 
1123 	err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1124 	if (err) {
1125 		if (err == -EIO)
1126 			err = MOVE_TARGET_WR_ERR;
1127 		goto out_unlock_buf;
1128 	}
1129 
1130 	cond_resched();
1131 
1132 	/* Read the VID header back and check if it was written correctly */
1133 	err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1134 	if (err) {
1135 		if (err != UBI_IO_BITFLIPS) {
1136 			ubi_warn("error %d while reading VID header back from PEB %d",
1137 				 err, to);
1138 			if (is_error_sane(err))
1139 				err = MOVE_TARGET_RD_ERR;
1140 		} else
1141 			err = MOVE_TARGET_BITFLIPS;
1142 		goto out_unlock_buf;
1143 	}
1144 
1145 	if (data_size > 0) {
1146 		err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1147 		if (err) {
1148 			if (err == -EIO)
1149 				err = MOVE_TARGET_WR_ERR;
1150 			goto out_unlock_buf;
1151 		}
1152 
1153 		cond_resched();
1154 
1155 		/*
1156 		 * We've written the data and are going to read it back to make
1157 		 * sure it was written correctly.
1158 		 */
1159 		memset(ubi->peb_buf, 0xFF, aldata_size);
1160 		err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1161 		if (err) {
1162 			if (err != UBI_IO_BITFLIPS) {
1163 				ubi_warn("error %d while reading data back from PEB %d",
1164 					 err, to);
1165 				if (is_error_sane(err))
1166 					err = MOVE_TARGET_RD_ERR;
1167 			} else
1168 				err = MOVE_TARGET_BITFLIPS;
1169 			goto out_unlock_buf;
1170 		}
1171 
1172 		cond_resched();
1173 
1174 		if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
1175 			ubi_warn("read data back from PEB %d and it is different",
1176 				 to);
1177 			err = -EINVAL;
1178 			goto out_unlock_buf;
1179 		}
1180 	}
1181 
1182 	ubi_assert(vol->eba_tbl[lnum] == from);
1183 	down_read(&ubi->fm_sem);
1184 	vol->eba_tbl[lnum] = to;
1185 	up_read(&ubi->fm_sem);
1186 
1187 out_unlock_buf:
1188 	mutex_unlock(&ubi->buf_mutex);
1189 out_unlock_leb:
1190 	leb_write_unlock(ubi, vol_id, lnum);
1191 	return err;
1192 }
1193 
1194 /**
1195  * print_rsvd_warning - warn about not having enough reserved PEBs.
1196  * @ubi: UBI device description object
1197  *
1198  * This is a helper function for 'ubi_eba_init()' which is called when UBI
1199  * cannot reserve enough PEBs for bad block handling. This function makes a
1200  * decision whether we have to print a warning or not. The algorithm is as
1201  * follows:
1202  *   o if this is a new UBI image, then just print the warning
1203  *   o if this is an UBI image which has already been used for some time, print
1204  *     a warning only if we can reserve less than 10% of the expected amount of
1205  *     the reserved PEB.
1206  *
1207  * The idea is that when UBI is used, PEBs become bad, and the reserved pool
1208  * of PEBs becomes smaller, which is normal and we do not want to scare users
1209  * with a warning every time they attach the MTD device. This was an issue
1210  * reported by real users.
1211  */
print_rsvd_warning(struct ubi_device * ubi,struct ubi_attach_info * ai)1212 static void print_rsvd_warning(struct ubi_device *ubi,
1213 			       struct ubi_attach_info *ai)
1214 {
1215 	/*
1216 	 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
1217 	 * large number to distinguish between newly flashed and used images.
1218 	 */
1219 	if (ai->max_sqnum > (1 << 18)) {
1220 		int min = ubi->beb_rsvd_level / 10;
1221 
1222 		if (!min)
1223 			min = 1;
1224 		if (ubi->beb_rsvd_pebs > min)
1225 			return;
1226 	}
1227 
1228 	ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
1229 		 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1230 	if (ubi->corr_peb_count)
1231 		ubi_warn("%d PEBs are corrupted and not used",
1232 			 ubi->corr_peb_count);
1233 }
1234 
1235 /**
1236  * self_check_eba - run a self check on the EBA table constructed by fastmap.
1237  * @ubi: UBI device description object
1238  * @ai_fastmap: UBI attach info object created by fastmap
1239  * @ai_scan: UBI attach info object created by scanning
1240  *
1241  * Returns < 0 in case of an internal error, 0 otherwise.
1242  * If a bad EBA table entry was found it will be printed out and
1243  * ubi_assert() triggers.
1244  */
self_check_eba(struct ubi_device * ubi,struct ubi_attach_info * ai_fastmap,struct ubi_attach_info * ai_scan)1245 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1246 		   struct ubi_attach_info *ai_scan)
1247 {
1248 	int i, j, num_volumes, ret = 0;
1249 	int **scan_eba, **fm_eba;
1250 	struct ubi_ainf_volume *av;
1251 	struct ubi_volume *vol;
1252 	struct ubi_ainf_peb *aeb;
1253 	struct rb_node *rb;
1254 
1255 	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1256 
1257 	scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
1258 	if (!scan_eba)
1259 		return -ENOMEM;
1260 
1261 	fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
1262 	if (!fm_eba) {
1263 		kfree(scan_eba);
1264 		return -ENOMEM;
1265 	}
1266 
1267 	for (i = 0; i < num_volumes; i++) {
1268 		vol = ubi->volumes[i];
1269 		if (!vol)
1270 			continue;
1271 
1272 		scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
1273 				      GFP_KERNEL);
1274 		if (!scan_eba[i]) {
1275 			ret = -ENOMEM;
1276 			goto out_free;
1277 		}
1278 
1279 		fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
1280 				    GFP_KERNEL);
1281 		if (!fm_eba[i]) {
1282 			ret = -ENOMEM;
1283 			goto out_free;
1284 		}
1285 
1286 		for (j = 0; j < vol->reserved_pebs; j++)
1287 			scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
1288 
1289 		av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
1290 		if (!av)
1291 			continue;
1292 
1293 		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1294 			scan_eba[i][aeb->lnum] = aeb->pnum;
1295 
1296 		av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
1297 		if (!av)
1298 			continue;
1299 
1300 		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1301 			fm_eba[i][aeb->lnum] = aeb->pnum;
1302 
1303 		for (j = 0; j < vol->reserved_pebs; j++) {
1304 			if (scan_eba[i][j] != fm_eba[i][j]) {
1305 				if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
1306 					fm_eba[i][j] == UBI_LEB_UNMAPPED)
1307 					continue;
1308 
1309 				ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
1310 					vol->vol_id, i, fm_eba[i][j],
1311 					scan_eba[i][j]);
1312 				ubi_assert(0);
1313 			}
1314 		}
1315 	}
1316 
1317 out_free:
1318 	for (i = 0; i < num_volumes; i++) {
1319 		if (!ubi->volumes[i])
1320 			continue;
1321 
1322 		kfree(scan_eba[i]);
1323 		kfree(fm_eba[i]);
1324 	}
1325 
1326 	kfree(scan_eba);
1327 	kfree(fm_eba);
1328 	return ret;
1329 }
1330 
1331 /**
1332  * ubi_eba_init - initialize the EBA sub-system using attaching information.
1333  * @ubi: UBI device description object
1334  * @ai: attaching information
1335  *
1336  * This function returns zero in case of success and a negative error code in
1337  * case of failure.
1338  */
ubi_eba_init(struct ubi_device * ubi,struct ubi_attach_info * ai)1339 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1340 {
1341 	int i, j, err, num_volumes;
1342 	struct ubi_ainf_volume *av;
1343 	struct ubi_volume *vol;
1344 	struct ubi_ainf_peb *aeb;
1345 	struct rb_node *rb;
1346 
1347 	dbg_eba("initialize EBA sub-system");
1348 
1349 	spin_lock_init(&ubi->ltree_lock);
1350 	mutex_init(&ubi->alc_mutex);
1351 	ubi->ltree = RB_ROOT;
1352 
1353 	ubi->global_sqnum = ai->max_sqnum + 1;
1354 	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1355 
1356 	for (i = 0; i < num_volumes; i++) {
1357 		vol = ubi->volumes[i];
1358 		if (!vol)
1359 			continue;
1360 
1361 		cond_resched();
1362 
1363 		vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
1364 				       GFP_KERNEL);
1365 		if (!vol->eba_tbl) {
1366 			err = -ENOMEM;
1367 			goto out_free;
1368 		}
1369 
1370 		for (j = 0; j < vol->reserved_pebs; j++)
1371 			vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
1372 
1373 		av = ubi_find_av(ai, idx2vol_id(ubi, i));
1374 		if (!av)
1375 			continue;
1376 
1377 		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
1378 			if (aeb->lnum >= vol->reserved_pebs)
1379 				/*
1380 				 * This may happen in case of an unclean reboot
1381 				 * during re-size.
1382 				 */
1383 				ubi_move_aeb_to_list(av, aeb, &ai->erase);
1384 			else
1385 				vol->eba_tbl[aeb->lnum] = aeb->pnum;
1386 		}
1387 	}
1388 
1389 	if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1390 		ubi_err("no enough physical eraseblocks (%d, need %d)",
1391 			ubi->avail_pebs, EBA_RESERVED_PEBS);
1392 		if (ubi->corr_peb_count)
1393 			ubi_err("%d PEBs are corrupted and not used",
1394 				ubi->corr_peb_count);
1395 		err = -ENOSPC;
1396 		goto out_free;
1397 	}
1398 	ubi->avail_pebs -= EBA_RESERVED_PEBS;
1399 	ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1400 
1401 	if (ubi->bad_allowed) {
1402 		ubi_calculate_reserved(ubi);
1403 
1404 		if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1405 			/* No enough free physical eraseblocks */
1406 			ubi->beb_rsvd_pebs = ubi->avail_pebs;
1407 			print_rsvd_warning(ubi, ai);
1408 		} else
1409 			ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1410 
1411 		ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1412 		ubi->rsvd_pebs  += ubi->beb_rsvd_pebs;
1413 	}
1414 
1415 	dbg_eba("EBA sub-system is initialized");
1416 	return 0;
1417 
1418 out_free:
1419 	for (i = 0; i < num_volumes; i++) {
1420 		if (!ubi->volumes[i])
1421 			continue;
1422 		kfree(ubi->volumes[i]->eba_tbl);
1423 		ubi->volumes[i]->eba_tbl = NULL;
1424 	}
1425 	return err;
1426 }
1427