• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12  * the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * Author: Artem Bityutskiy (Битюцкий Артём)
19  */
20 
21 /*
22  * The UBI Eraseblock Association (EBA) sub-system.
23  *
24  * This sub-system is responsible for I/O to/from logical eraseblock.
25  *
26  * Although in this implementation the EBA table is fully kept and managed in
27  * RAM, which assumes poor scalability, it might be (partially) maintained on
28  * flash in future implementations.
29  *
30  * The EBA sub-system implements per-logical eraseblock locking. Before
31  * accessing a logical eraseblock it is locked for reading or writing. The
32  * per-logical eraseblock locking is implemented by means of the lock tree. The
33  * lock tree is an RB-tree which refers all the currently locked logical
34  * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
35  * They are indexed by (@vol_id, @lnum) pairs.
36  *
37  * EBA also maintains the global sequence counter which is incremented each
38  * time a logical eraseblock is mapped to a physical eraseblock and it is
39  * stored in the volume identifier header. This means that each VID header has
40  * a unique sequence number. The sequence number is only increased an we assume
41  * 64 bits is enough to never overflow.
42  */
43 
44 #include <linux/slab.h>
45 #include <linux/crc32.h>
46 #include <linux/err.h>
47 #include "ubi.h"
48 
49 /* Number of physical eraseblocks reserved for atomic LEB change operation */
50 #define EBA_RESERVED_PEBS 1
51 
52 /**
53  * struct ubi_eba_entry - structure encoding a single LEB -> PEB association
54  * @pnum: the physical eraseblock number attached to the LEB
55  *
56  * This structure is encoding a LEB -> PEB association. Note that the LEB
57  * number is not stored here, because it is the index used to access the
58  * entries table.
59  */
60 struct ubi_eba_entry {
61 	int pnum;
62 };
63 
64 /**
65  * struct ubi_eba_table - LEB -> PEB association information
66  * @entries: the LEB to PEB mapping (one entry per LEB).
67  *
68  * This structure is private to the EBA logic and should be kept here.
69  * It is encoding the LEB to PEB association table, and is subject to
70  * changes.
71  */
72 struct ubi_eba_table {
73 	struct ubi_eba_entry *entries;
74 };
75 
76 /**
77  * next_sqnum - get next sequence number.
78  * @ubi: UBI device description object
79  *
80  * This function returns next sequence number to use, which is just the current
81  * global sequence counter value. It also increases the global sequence
82  * counter.
83  */
ubi_next_sqnum(struct ubi_device * ubi)84 unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
85 {
86 	unsigned long long sqnum;
87 
88 	spin_lock(&ubi->ltree_lock);
89 	sqnum = ubi->global_sqnum++;
90 	spin_unlock(&ubi->ltree_lock);
91 
92 	return sqnum;
93 }
94 
95 /**
96  * ubi_get_compat - get compatibility flags of a volume.
97  * @ubi: UBI device description object
98  * @vol_id: volume ID
99  *
100  * This function returns compatibility flags for an internal volume. User
101  * volumes have no compatibility flags, so %0 is returned.
102  */
ubi_get_compat(const struct ubi_device * ubi,int vol_id)103 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
104 {
105 	if (vol_id == UBI_LAYOUT_VOLUME_ID)
106 		return UBI_LAYOUT_VOLUME_COMPAT;
107 	return 0;
108 }
109 
110 /**
111  * ubi_eba_get_ldesc - get information about a LEB
112  * @vol: volume description object
113  * @lnum: logical eraseblock number
114  * @ldesc: the LEB descriptor to fill
115  *
116  * Used to query information about a specific LEB.
117  * It is currently only returning the physical position of the LEB, but will be
118  * extended to provide more information.
119  */
ubi_eba_get_ldesc(struct ubi_volume * vol,int lnum,struct ubi_eba_leb_desc * ldesc)120 void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
121 		       struct ubi_eba_leb_desc *ldesc)
122 {
123 	ldesc->lnum = lnum;
124 	ldesc->pnum = vol->eba_tbl->entries[lnum].pnum;
125 }
126 
127 /**
128  * ubi_eba_create_table - allocate a new EBA table and initialize it with all
129  *			  LEBs unmapped
130  * @vol: volume containing the EBA table to copy
131  * @nentries: number of entries in the table
132  *
133  * Allocate a new EBA table and initialize it with all LEBs unmapped.
134  * Returns a valid pointer if it succeed, an ERR_PTR() otherwise.
135  */
ubi_eba_create_table(struct ubi_volume * vol,int nentries)136 struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
137 					   int nentries)
138 {
139 	struct ubi_eba_table *tbl;
140 	int err = -ENOMEM;
141 	int i;
142 
143 	tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
144 	if (!tbl)
145 		return ERR_PTR(-ENOMEM);
146 
147 	tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
148 				     GFP_KERNEL);
149 	if (!tbl->entries)
150 		goto err;
151 
152 	for (i = 0; i < nentries; i++)
153 		tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
154 
155 	return tbl;
156 
157 err:
158 	kfree(tbl->entries);
159 	kfree(tbl);
160 
161 	return ERR_PTR(err);
162 }
163 
164 /**
165  * ubi_eba_destroy_table - destroy an EBA table
166  * @tbl: the table to destroy
167  *
168  * Destroy an EBA table.
169  */
ubi_eba_destroy_table(struct ubi_eba_table * tbl)170 void ubi_eba_destroy_table(struct ubi_eba_table *tbl)
171 {
172 	if (!tbl)
173 		return;
174 
175 	kfree(tbl->entries);
176 	kfree(tbl);
177 }
178 
179 /**
180  * ubi_eba_copy_table - copy the EBA table attached to vol into another table
181  * @vol: volume containing the EBA table to copy
182  * @dst: destination
183  * @nentries: number of entries to copy
184  *
185  * Copy the EBA table stored in vol into the one pointed by dst.
186  */
ubi_eba_copy_table(struct ubi_volume * vol,struct ubi_eba_table * dst,int nentries)187 void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
188 			int nentries)
189 {
190 	struct ubi_eba_table *src;
191 	int i;
192 
193 	ubi_assert(dst && vol && vol->eba_tbl);
194 
195 	src = vol->eba_tbl;
196 
197 	for (i = 0; i < nentries; i++)
198 		dst->entries[i].pnum = src->entries[i].pnum;
199 }
200 
201 /**
202  * ubi_eba_replace_table - assign a new EBA table to a volume
203  * @vol: volume containing the EBA table to copy
204  * @tbl: new EBA table
205  *
206  * Assign a new EBA table to the volume and release the old one.
207  */
ubi_eba_replace_table(struct ubi_volume * vol,struct ubi_eba_table * tbl)208 void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl)
209 {
210 	ubi_eba_destroy_table(vol->eba_tbl);
211 	vol->eba_tbl = tbl;
212 }
213 
214 /**
215  * ltree_lookup - look up the lock tree.
216  * @ubi: UBI device description object
217  * @vol_id: volume ID
218  * @lnum: logical eraseblock number
219  *
220  * This function returns a pointer to the corresponding &struct ubi_ltree_entry
221  * object if the logical eraseblock is locked and %NULL if it is not.
222  * @ubi->ltree_lock has to be locked.
223  */
ltree_lookup(struct ubi_device * ubi,int vol_id,int lnum)224 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
225 					    int lnum)
226 {
227 	struct rb_node *p;
228 
229 	p = ubi->ltree.rb_node;
230 	while (p) {
231 		struct ubi_ltree_entry *le;
232 
233 		le = rb_entry(p, struct ubi_ltree_entry, rb);
234 
235 		if (vol_id < le->vol_id)
236 			p = p->rb_left;
237 		else if (vol_id > le->vol_id)
238 			p = p->rb_right;
239 		else {
240 			if (lnum < le->lnum)
241 				p = p->rb_left;
242 			else if (lnum > le->lnum)
243 				p = p->rb_right;
244 			else
245 				return le;
246 		}
247 	}
248 
249 	return NULL;
250 }
251 
252 /**
253  * ltree_add_entry - add new entry to the lock tree.
254  * @ubi: UBI device description object
255  * @vol_id: volume ID
256  * @lnum: logical eraseblock number
257  *
258  * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
259  * lock tree. If such entry is already there, its usage counter is increased.
260  * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
261  * failed.
262  */
ltree_add_entry(struct ubi_device * ubi,int vol_id,int lnum)263 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
264 					       int vol_id, int lnum)
265 {
266 	struct ubi_ltree_entry *le, *le1, *le_free;
267 
268 	le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
269 	if (!le)
270 		return ERR_PTR(-ENOMEM);
271 
272 	le->users = 0;
273 	init_rwsem(&le->mutex);
274 	le->vol_id = vol_id;
275 	le->lnum = lnum;
276 
277 	spin_lock(&ubi->ltree_lock);
278 	le1 = ltree_lookup(ubi, vol_id, lnum);
279 
280 	if (le1) {
281 		/*
282 		 * This logical eraseblock is already locked. The newly
283 		 * allocated lock entry is not needed.
284 		 */
285 		le_free = le;
286 		le = le1;
287 	} else {
288 		struct rb_node **p, *parent = NULL;
289 
290 		/*
291 		 * No lock entry, add the newly allocated one to the
292 		 * @ubi->ltree RB-tree.
293 		 */
294 		le_free = NULL;
295 
296 		p = &ubi->ltree.rb_node;
297 		while (*p) {
298 			parent = *p;
299 			le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
300 
301 			if (vol_id < le1->vol_id)
302 				p = &(*p)->rb_left;
303 			else if (vol_id > le1->vol_id)
304 				p = &(*p)->rb_right;
305 			else {
306 				ubi_assert(lnum != le1->lnum);
307 				if (lnum < le1->lnum)
308 					p = &(*p)->rb_left;
309 				else
310 					p = &(*p)->rb_right;
311 			}
312 		}
313 
314 		rb_link_node(&le->rb, parent, p);
315 		rb_insert_color(&le->rb, &ubi->ltree);
316 	}
317 	le->users += 1;
318 	spin_unlock(&ubi->ltree_lock);
319 
320 	kfree(le_free);
321 	return le;
322 }
323 
324 /**
325  * leb_read_lock - lock logical eraseblock for reading.
326  * @ubi: UBI device description object
327  * @vol_id: volume ID
328  * @lnum: logical eraseblock number
329  *
330  * This function locks a logical eraseblock for reading. Returns zero in case
331  * of success and a negative error code in case of failure.
332  */
leb_read_lock(struct ubi_device * ubi,int vol_id,int lnum)333 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
334 {
335 	struct ubi_ltree_entry *le;
336 
337 	le = ltree_add_entry(ubi, vol_id, lnum);
338 	if (IS_ERR(le))
339 		return PTR_ERR(le);
340 	down_read(&le->mutex);
341 	return 0;
342 }
343 
344 /**
345  * leb_read_unlock - unlock logical eraseblock.
346  * @ubi: UBI device description object
347  * @vol_id: volume ID
348  * @lnum: logical eraseblock number
349  */
leb_read_unlock(struct ubi_device * ubi,int vol_id,int lnum)350 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
351 {
352 	struct ubi_ltree_entry *le;
353 
354 	spin_lock(&ubi->ltree_lock);
355 	le = ltree_lookup(ubi, vol_id, lnum);
356 	le->users -= 1;
357 	ubi_assert(le->users >= 0);
358 	up_read(&le->mutex);
359 	if (le->users == 0) {
360 		rb_erase(&le->rb, &ubi->ltree);
361 		kfree(le);
362 	}
363 	spin_unlock(&ubi->ltree_lock);
364 }
365 
366 /**
367  * leb_write_lock - lock logical eraseblock for writing.
368  * @ubi: UBI device description object
369  * @vol_id: volume ID
370  * @lnum: logical eraseblock number
371  *
372  * This function locks a logical eraseblock for writing. Returns zero in case
373  * of success and a negative error code in case of failure.
374  */
leb_write_lock(struct ubi_device * ubi,int vol_id,int lnum)375 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
376 {
377 	struct ubi_ltree_entry *le;
378 
379 	le = ltree_add_entry(ubi, vol_id, lnum);
380 	if (IS_ERR(le))
381 		return PTR_ERR(le);
382 	down_write(&le->mutex);
383 	return 0;
384 }
385 
386 /**
387  * leb_write_lock - lock logical eraseblock for writing.
388  * @ubi: UBI device description object
389  * @vol_id: volume ID
390  * @lnum: logical eraseblock number
391  *
392  * This function locks a logical eraseblock for writing if there is no
393  * contention and does nothing if there is contention. Returns %0 in case of
394  * success, %1 in case of contention, and and a negative error code in case of
395  * failure.
396  */
leb_write_trylock(struct ubi_device * ubi,int vol_id,int lnum)397 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
398 {
399 	struct ubi_ltree_entry *le;
400 
401 	le = ltree_add_entry(ubi, vol_id, lnum);
402 	if (IS_ERR(le))
403 		return PTR_ERR(le);
404 	if (down_write_trylock(&le->mutex))
405 		return 0;
406 
407 	/* Contention, cancel */
408 	spin_lock(&ubi->ltree_lock);
409 	le->users -= 1;
410 	ubi_assert(le->users >= 0);
411 	if (le->users == 0) {
412 		rb_erase(&le->rb, &ubi->ltree);
413 		kfree(le);
414 	}
415 	spin_unlock(&ubi->ltree_lock);
416 
417 	return 1;
418 }
419 
420 /**
421  * leb_write_unlock - unlock logical eraseblock.
422  * @ubi: UBI device description object
423  * @vol_id: volume ID
424  * @lnum: logical eraseblock number
425  */
leb_write_unlock(struct ubi_device * ubi,int vol_id,int lnum)426 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
427 {
428 	struct ubi_ltree_entry *le;
429 
430 	spin_lock(&ubi->ltree_lock);
431 	le = ltree_lookup(ubi, vol_id, lnum);
432 	le->users -= 1;
433 	ubi_assert(le->users >= 0);
434 	up_write(&le->mutex);
435 	if (le->users == 0) {
436 		rb_erase(&le->rb, &ubi->ltree);
437 		kfree(le);
438 	}
439 	spin_unlock(&ubi->ltree_lock);
440 }
441 
442 /**
443  * ubi_eba_is_mapped - check if a LEB is mapped.
444  * @vol: volume description object
445  * @lnum: logical eraseblock number
446  *
447  * This function returns true if the LEB is mapped, false otherwise.
448  */
ubi_eba_is_mapped(struct ubi_volume * vol,int lnum)449 bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum)
450 {
451 	return vol->eba_tbl->entries[lnum].pnum >= 0;
452 }
453 
454 /**
455  * ubi_eba_unmap_leb - un-map logical eraseblock.
456  * @ubi: UBI device description object
457  * @vol: volume description object
458  * @lnum: logical eraseblock number
459  *
460  * This function un-maps logical eraseblock @lnum and schedules corresponding
461  * physical eraseblock for erasure. Returns zero in case of success and a
462  * negative error code in case of failure.
463  */
ubi_eba_unmap_leb(struct ubi_device * ubi,struct ubi_volume * vol,int lnum)464 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
465 		      int lnum)
466 {
467 	int err, pnum, vol_id = vol->vol_id;
468 
469 	if (ubi->ro_mode)
470 		return -EROFS;
471 
472 	err = leb_write_lock(ubi, vol_id, lnum);
473 	if (err)
474 		return err;
475 
476 	pnum = vol->eba_tbl->entries[lnum].pnum;
477 	if (pnum < 0)
478 		/* This logical eraseblock is already unmapped */
479 		goto out_unlock;
480 
481 	dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
482 
483 	down_read(&ubi->fm_eba_sem);
484 	vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
485 	up_read(&ubi->fm_eba_sem);
486 	err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
487 
488 out_unlock:
489 	leb_write_unlock(ubi, vol_id, lnum);
490 	return err;
491 }
492 
493 #ifdef CONFIG_MTD_UBI_FASTMAP
494 /**
495  * check_mapping - check and fixup a mapping
496  * @ubi: UBI device description object
497  * @vol: volume description object
498  * @lnum: logical eraseblock number
499  * @pnum: physical eraseblock number
500  *
501  * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
502  * operations, if such an operation is interrupted the mapping still looks
503  * good, but upon first read an ECC is reported to the upper layer.
504  * Normaly during the full-scan at attach time this is fixed, for Fastmap
505  * we have to deal with it while reading.
506  * If the PEB behind a LEB shows this symthom we change the mapping to
507  * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
508  *
509  * Returns 0 on success, negative error code in case of failure.
510  */
check_mapping(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,int * pnum)511 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
512 			 int *pnum)
513 {
514 	int err;
515 	struct ubi_vid_io_buf *vidb;
516 
517 	if (!ubi->fast_attach)
518 		return 0;
519 
520 	if (!vol->checkmap || test_bit(lnum, vol->checkmap))
521 		return 0;
522 
523 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
524 	if (!vidb)
525 		return -ENOMEM;
526 
527 	err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
528 	if (err > 0 && err != UBI_IO_BITFLIPS) {
529 		int torture = 0;
530 
531 		switch (err) {
532 			case UBI_IO_FF:
533 			case UBI_IO_FF_BITFLIPS:
534 			case UBI_IO_BAD_HDR:
535 			case UBI_IO_BAD_HDR_EBADMSG:
536 				break;
537 			default:
538 				ubi_assert(0);
539 		}
540 
541 		if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
542 			torture = 1;
543 
544 		down_read(&ubi->fm_eba_sem);
545 		vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
546 		up_read(&ubi->fm_eba_sem);
547 		ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
548 
549 		*pnum = UBI_LEB_UNMAPPED;
550 	} else if (err < 0) {
551 		ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
552 			*pnum, err);
553 
554 		goto out_free;
555 	}
556 
557 	set_bit(lnum, vol->checkmap);
558 	err = 0;
559 
560 out_free:
561 	ubi_free_vid_buf(vidb);
562 
563 	return err;
564 }
565 #else
check_mapping(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,int * pnum)566 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
567 		  int *pnum)
568 {
569 	return 0;
570 }
571 #endif
572 
573 /**
574  * ubi_eba_read_leb - read data.
575  * @ubi: UBI device description object
576  * @vol: volume description object
577  * @lnum: logical eraseblock number
578  * @buf: buffer to store the read data
579  * @offset: offset from where to read
580  * @len: how many bytes to read
581  * @check: data CRC check flag
582  *
583  * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
584  * bytes. The @check flag only makes sense for static volumes and forces
585  * eraseblock data CRC checking.
586  *
587  * In case of success this function returns zero. In case of a static volume,
588  * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
589  * returned for any volume type if an ECC error was detected by the MTD device
590  * driver. Other negative error cored may be returned in case of other errors.
591  */
ubi_eba_read_leb(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,void * buf,int offset,int len,int check)592 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
593 		     void *buf, int offset, int len, int check)
594 {
595 	int err, pnum, scrub = 0, vol_id = vol->vol_id;
596 	struct ubi_vid_io_buf *vidb;
597 	struct ubi_vid_hdr *vid_hdr;
598 	uint32_t uninitialized_var(crc);
599 
600 	err = leb_read_lock(ubi, vol_id, lnum);
601 	if (err)
602 		return err;
603 
604 	pnum = vol->eba_tbl->entries[lnum].pnum;
605 	if (pnum >= 0) {
606 		err = check_mapping(ubi, vol, lnum, &pnum);
607 		if (err < 0)
608 			goto out_unlock;
609 	}
610 
611 	if (pnum == UBI_LEB_UNMAPPED) {
612 		/*
613 		 * The logical eraseblock is not mapped, fill the whole buffer
614 		 * with 0xFF bytes. The exception is static volumes for which
615 		 * it is an error to read unmapped logical eraseblocks.
616 		 */
617 		dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
618 			len, offset, vol_id, lnum);
619 		leb_read_unlock(ubi, vol_id, lnum);
620 		ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
621 		memset(buf, 0xFF, len);
622 		return 0;
623 	}
624 
625 	dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
626 		len, offset, vol_id, lnum, pnum);
627 
628 	if (vol->vol_type == UBI_DYNAMIC_VOLUME)
629 		check = 0;
630 
631 retry:
632 	if (check) {
633 		vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
634 		if (!vidb) {
635 			err = -ENOMEM;
636 			goto out_unlock;
637 		}
638 
639 		vid_hdr = ubi_get_vid_hdr(vidb);
640 
641 		err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
642 		if (err && err != UBI_IO_BITFLIPS) {
643 			if (err > 0) {
644 				/*
645 				 * The header is either absent or corrupted.
646 				 * The former case means there is a bug -
647 				 * switch to read-only mode just in case.
648 				 * The latter case means a real corruption - we
649 				 * may try to recover data. FIXME: but this is
650 				 * not implemented.
651 				 */
652 				if (err == UBI_IO_BAD_HDR_EBADMSG ||
653 				    err == UBI_IO_BAD_HDR) {
654 					ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
655 						 pnum, vol_id, lnum);
656 					err = -EBADMSG;
657 				} else {
658 					/*
659 					 * Ending up here in the non-Fastmap case
660 					 * is a clear bug as the VID header had to
661 					 * be present at scan time to have it referenced.
662 					 * With fastmap the story is more complicated.
663 					 * Fastmap has the mapping info without the need
664 					 * of a full scan. So the LEB could have been
665 					 * unmapped, Fastmap cannot know this and keeps
666 					 * the LEB referenced.
667 					 * This is valid and works as the layer above UBI
668 					 * has to do bookkeeping about used/referenced
669 					 * LEBs in any case.
670 					 */
671 					if (ubi->fast_attach) {
672 						err = -EBADMSG;
673 					} else {
674 						err = -EINVAL;
675 						ubi_ro_mode(ubi);
676 					}
677 				}
678 			}
679 			goto out_free;
680 		} else if (err == UBI_IO_BITFLIPS)
681 			scrub = 1;
682 
683 		ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
684 		ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
685 
686 		crc = be32_to_cpu(vid_hdr->data_crc);
687 		ubi_free_vid_buf(vidb);
688 	}
689 
690 	err = ubi_io_read_data(ubi, buf, pnum, offset, len);
691 	if (err) {
692 		if (err == UBI_IO_BITFLIPS)
693 			scrub = 1;
694 		else if (mtd_is_eccerr(err)) {
695 			if (vol->vol_type == UBI_DYNAMIC_VOLUME)
696 				goto out_unlock;
697 			scrub = 1;
698 			if (!check) {
699 				ubi_msg(ubi, "force data checking");
700 				check = 1;
701 				goto retry;
702 			}
703 		} else
704 			goto out_unlock;
705 	}
706 
707 	if (check) {
708 		uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
709 		if (crc1 != crc) {
710 			ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
711 				 crc1, crc);
712 			err = -EBADMSG;
713 			goto out_unlock;
714 		}
715 	}
716 
717 	if (scrub)
718 		err = ubi_wl_scrub_peb(ubi, pnum);
719 
720 	leb_read_unlock(ubi, vol_id, lnum);
721 	return err;
722 
723 out_free:
724 	ubi_free_vid_buf(vidb);
725 out_unlock:
726 	leb_read_unlock(ubi, vol_id, lnum);
727 	return err;
728 }
729 
730 /**
731  * ubi_eba_read_leb_sg - read data into a scatter gather list.
732  * @ubi: UBI device description object
733  * @vol: volume description object
734  * @lnum: logical eraseblock number
735  * @sgl: UBI scatter gather list to store the read data
736  * @offset: offset from where to read
737  * @len: how many bytes to read
738  * @check: data CRC check flag
739  *
740  * This function works exactly like ubi_eba_read_leb(). But instead of
741  * storing the read data into a buffer it writes to an UBI scatter gather
742  * list.
743  */
ubi_eba_read_leb_sg(struct ubi_device * ubi,struct ubi_volume * vol,struct ubi_sgl * sgl,int lnum,int offset,int len,int check)744 int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
745 			struct ubi_sgl *sgl, int lnum, int offset, int len,
746 			int check)
747 {
748 	int to_read;
749 	int ret;
750 	struct scatterlist *sg;
751 
752 	for (;;) {
753 		ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
754 		sg = &sgl->sg[sgl->list_pos];
755 		if (len < sg->length - sgl->page_pos)
756 			to_read = len;
757 		else
758 			to_read = sg->length - sgl->page_pos;
759 
760 		ret = ubi_eba_read_leb(ubi, vol, lnum,
761 				       sg_virt(sg) + sgl->page_pos, offset,
762 				       to_read, check);
763 		if (ret < 0)
764 			return ret;
765 
766 		offset += to_read;
767 		len -= to_read;
768 		if (!len) {
769 			sgl->page_pos += to_read;
770 			if (sgl->page_pos == sg->length) {
771 				sgl->list_pos++;
772 				sgl->page_pos = 0;
773 			}
774 
775 			break;
776 		}
777 
778 		sgl->list_pos++;
779 		sgl->page_pos = 0;
780 	}
781 
782 	return ret;
783 }
784 
785 /**
786  * try_recover_peb - try to recover from write failure.
787  * @vol: volume description object
788  * @pnum: the physical eraseblock to recover
789  * @lnum: logical eraseblock number
790  * @buf: data which was not written because of the write failure
791  * @offset: offset of the failed write
792  * @len: how many bytes should have been written
793  * @vidb: VID buffer
794  * @retry: whether the caller should retry in case of failure
795  *
796  * This function is called in case of a write failure and moves all good data
797  * from the potentially bad physical eraseblock to a good physical eraseblock.
798  * This function also writes the data which was not written due to the failure.
799  * Returns 0 in case of success, and a negative error code in case of failure.
800  * In case of failure, the %retry parameter is set to false if this is a fatal
801  * error (retrying won't help), and true otherwise.
802  */
try_recover_peb(struct ubi_volume * vol,int pnum,int lnum,const void * buf,int offset,int len,struct ubi_vid_io_buf * vidb,bool * retry)803 static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
804 			   const void *buf, int offset, int len,
805 			   struct ubi_vid_io_buf *vidb, bool *retry)
806 {
807 	struct ubi_device *ubi = vol->ubi;
808 	struct ubi_vid_hdr *vid_hdr;
809 	int new_pnum, err, vol_id = vol->vol_id, data_size;
810 	uint32_t crc;
811 
812 	*retry = false;
813 
814 	new_pnum = ubi_wl_get_peb(ubi);
815 	if (new_pnum < 0) {
816 		err = new_pnum;
817 		goto out_put;
818 	}
819 
820 	ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
821 		pnum, new_pnum);
822 
823 	err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
824 	if (err && err != UBI_IO_BITFLIPS) {
825 		if (err > 0)
826 			err = -EIO;
827 		goto out_put;
828 	}
829 
830 	vid_hdr = ubi_get_vid_hdr(vidb);
831 	ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
832 
833 	mutex_lock(&ubi->buf_mutex);
834 	memset(ubi->peb_buf + offset, 0xFF, len);
835 
836 	/* Read everything before the area where the write failure happened */
837 	if (offset > 0) {
838 		err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
839 		if (err && err != UBI_IO_BITFLIPS)
840 			goto out_unlock;
841 	}
842 
843 	*retry = true;
844 
845 	memcpy(ubi->peb_buf + offset, buf, len);
846 
847 	data_size = offset + len;
848 	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
849 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
850 	vid_hdr->copy_flag = 1;
851 	vid_hdr->data_size = cpu_to_be32(data_size);
852 	vid_hdr->data_crc = cpu_to_be32(crc);
853 	err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
854 	if (err)
855 		goto out_unlock;
856 
857 	err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
858 
859 out_unlock:
860 	mutex_unlock(&ubi->buf_mutex);
861 
862 	if (!err)
863 		vol->eba_tbl->entries[lnum].pnum = new_pnum;
864 
865 out_put:
866 	up_read(&ubi->fm_eba_sem);
867 
868 	if (!err) {
869 		ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
870 		ubi_msg(ubi, "data was successfully recovered");
871 	} else if (new_pnum >= 0) {
872 		/*
873 		 * Bad luck? This physical eraseblock is bad too? Crud. Let's
874 		 * try to get another one.
875 		 */
876 		ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
877 		ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
878 	}
879 
880 	return err;
881 }
882 
883 /**
884  * recover_peb - recover from write failure.
885  * @ubi: UBI device description object
886  * @pnum: the physical eraseblock to recover
887  * @vol_id: volume ID
888  * @lnum: logical eraseblock number
889  * @buf: data which was not written because of the write failure
890  * @offset: offset of the failed write
891  * @len: how many bytes should have been written
892  *
893  * This function is called in case of a write failure and moves all good data
894  * from the potentially bad physical eraseblock to a good physical eraseblock.
895  * This function also writes the data which was not written due to the failure.
896  * Returns 0 in case of success, and a negative error code in case of failure.
897  * This function tries %UBI_IO_RETRIES before giving up.
898  */
recover_peb(struct ubi_device * ubi,int pnum,int vol_id,int lnum,const void * buf,int offset,int len)899 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
900 		       const void *buf, int offset, int len)
901 {
902 	int err, idx = vol_id2idx(ubi, vol_id), tries;
903 	struct ubi_volume *vol = ubi->volumes[idx];
904 	struct ubi_vid_io_buf *vidb;
905 
906 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
907 	if (!vidb)
908 		return -ENOMEM;
909 
910 	for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
911 		bool retry;
912 
913 		err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb,
914 				      &retry);
915 		if (!err || !retry)
916 			break;
917 
918 		ubi_msg(ubi, "try again");
919 	}
920 
921 	ubi_free_vid_buf(vidb);
922 
923 	return err;
924 }
925 
926 /**
927  * try_write_vid_and_data - try to write VID header and data to a new PEB.
928  * @vol: volume description object
929  * @lnum: logical eraseblock number
930  * @vidb: the VID buffer to write
931  * @buf: buffer containing the data
932  * @offset: where to start writing data
933  * @len: how many bytes should be written
934  *
935  * This function tries to write VID header and data belonging to logical
936  * eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero
937  * in case of success and a negative error code in case of failure.
938  * In case of error, it is possible that something was still written to the
939  * flash media, but may be some garbage.
940  */
try_write_vid_and_data(struct ubi_volume * vol,int lnum,struct ubi_vid_io_buf * vidb,const void * buf,int offset,int len)941 static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
942 				  struct ubi_vid_io_buf *vidb, const void *buf,
943 				  int offset, int len)
944 {
945 	struct ubi_device *ubi = vol->ubi;
946 	int pnum, opnum, err, vol_id = vol->vol_id;
947 
948 	pnum = ubi_wl_get_peb(ubi);
949 	if (pnum < 0) {
950 		err = pnum;
951 		goto out_put;
952 	}
953 
954 	opnum = vol->eba_tbl->entries[lnum].pnum;
955 
956 	dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
957 		len, offset, vol_id, lnum, pnum);
958 
959 	err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
960 	if (err) {
961 		ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
962 			 vol_id, lnum, pnum);
963 		goto out_put;
964 	}
965 
966 	if (len) {
967 		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
968 		if (err) {
969 			ubi_warn(ubi,
970 				 "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
971 				 len, offset, vol_id, lnum, pnum);
972 			goto out_put;
973 		}
974 	}
975 
976 	vol->eba_tbl->entries[lnum].pnum = pnum;
977 
978 out_put:
979 	up_read(&ubi->fm_eba_sem);
980 
981 	if (err && pnum >= 0)
982 		err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
983 	else if (!err && opnum >= 0)
984 		err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
985 
986 	return err;
987 }
988 
989 /**
990  * ubi_eba_write_leb - write data to dynamic volume.
991  * @ubi: UBI device description object
992  * @vol: volume description object
993  * @lnum: logical eraseblock number
994  * @buf: the data to write
995  * @offset: offset within the logical eraseblock where to write
996  * @len: how many bytes to write
997  *
998  * This function writes data to logical eraseblock @lnum of a dynamic volume
999  * @vol. Returns zero in case of success and a negative error code in case
1000  * of failure. In case of error, it is possible that something was still
1001  * written to the flash media, but may be some garbage.
1002  * This function retries %UBI_IO_RETRIES times before giving up.
1003  */
ubi_eba_write_leb(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,const void * buf,int offset,int len)1004 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
1005 		      const void *buf, int offset, int len)
1006 {
1007 	int err, pnum, tries, vol_id = vol->vol_id;
1008 	struct ubi_vid_io_buf *vidb;
1009 	struct ubi_vid_hdr *vid_hdr;
1010 
1011 	if (ubi->ro_mode)
1012 		return -EROFS;
1013 
1014 	err = leb_write_lock(ubi, vol_id, lnum);
1015 	if (err)
1016 		return err;
1017 
1018 	pnum = vol->eba_tbl->entries[lnum].pnum;
1019 	if (pnum >= 0) {
1020 		err = check_mapping(ubi, vol, lnum, &pnum);
1021 		if (err < 0)
1022 			goto out;
1023 	}
1024 
1025 	if (pnum >= 0) {
1026 		dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
1027 			len, offset, vol_id, lnum, pnum);
1028 
1029 		err = ubi_io_write_data(ubi, buf, pnum, offset, len);
1030 		if (err) {
1031 			ubi_warn(ubi, "failed to write data to PEB %d", pnum);
1032 			if (err == -EIO && ubi->bad_allowed)
1033 				err = recover_peb(ubi, pnum, vol_id, lnum, buf,
1034 						  offset, len);
1035 		}
1036 
1037 		goto out;
1038 	}
1039 
1040 	/*
1041 	 * The logical eraseblock is not mapped. We have to get a free physical
1042 	 * eraseblock and write the volume identifier header there first.
1043 	 */
1044 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1045 	if (!vidb) {
1046 		leb_write_unlock(ubi, vol_id, lnum);
1047 		return -ENOMEM;
1048 	}
1049 
1050 	vid_hdr = ubi_get_vid_hdr(vidb);
1051 
1052 	vid_hdr->vol_type = UBI_VID_DYNAMIC;
1053 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1054 	vid_hdr->vol_id = cpu_to_be32(vol_id);
1055 	vid_hdr->lnum = cpu_to_be32(lnum);
1056 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1057 	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1058 
1059 	for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1060 		err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len);
1061 		if (err != -EIO || !ubi->bad_allowed)
1062 			break;
1063 
1064 		/*
1065 		 * Fortunately, this is the first write operation to this
1066 		 * physical eraseblock, so just put it and request a new one.
1067 		 * We assume that if this physical eraseblock went bad, the
1068 		 * erase code will handle that.
1069 		 */
1070 		vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1071 		ubi_msg(ubi, "try another PEB");
1072 	}
1073 
1074 	ubi_free_vid_buf(vidb);
1075 
1076 out:
1077 	if (err)
1078 		ubi_ro_mode(ubi);
1079 
1080 	leb_write_unlock(ubi, vol_id, lnum);
1081 
1082 	return err;
1083 }
1084 
1085 /**
1086  * ubi_eba_write_leb_st - write data to static volume.
1087  * @ubi: UBI device description object
1088  * @vol: volume description object
1089  * @lnum: logical eraseblock number
1090  * @buf: data to write
1091  * @len: how many bytes to write
1092  * @used_ebs: how many logical eraseblocks will this volume contain
1093  *
1094  * This function writes data to logical eraseblock @lnum of static volume
1095  * @vol. The @used_ebs argument should contain total number of logical
1096  * eraseblock in this static volume.
1097  *
1098  * When writing to the last logical eraseblock, the @len argument doesn't have
1099  * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
1100  * to the real data size, although the @buf buffer has to contain the
1101  * alignment. In all other cases, @len has to be aligned.
1102  *
1103  * It is prohibited to write more than once to logical eraseblocks of static
1104  * volumes. This function returns zero in case of success and a negative error
1105  * code in case of failure.
1106  */
ubi_eba_write_leb_st(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,const void * buf,int len,int used_ebs)1107 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
1108 			 int lnum, const void *buf, int len, int used_ebs)
1109 {
1110 	int err, tries, data_size = len, vol_id = vol->vol_id;
1111 	struct ubi_vid_io_buf *vidb;
1112 	struct ubi_vid_hdr *vid_hdr;
1113 	uint32_t crc;
1114 
1115 	if (ubi->ro_mode)
1116 		return -EROFS;
1117 
1118 	if (lnum == used_ebs - 1)
1119 		/* If this is the last LEB @len may be unaligned */
1120 		len = ALIGN(data_size, ubi->min_io_size);
1121 	else
1122 		ubi_assert(!(len & (ubi->min_io_size - 1)));
1123 
1124 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1125 	if (!vidb)
1126 		return -ENOMEM;
1127 
1128 	vid_hdr = ubi_get_vid_hdr(vidb);
1129 
1130 	err = leb_write_lock(ubi, vol_id, lnum);
1131 	if (err)
1132 		goto out;
1133 
1134 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1135 	vid_hdr->vol_id = cpu_to_be32(vol_id);
1136 	vid_hdr->lnum = cpu_to_be32(lnum);
1137 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1138 	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1139 
1140 	crc = crc32(UBI_CRC32_INIT, buf, data_size);
1141 	vid_hdr->vol_type = UBI_VID_STATIC;
1142 	vid_hdr->data_size = cpu_to_be32(data_size);
1143 	vid_hdr->used_ebs = cpu_to_be32(used_ebs);
1144 	vid_hdr->data_crc = cpu_to_be32(crc);
1145 
1146 	ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0);
1147 
1148 	for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1149 		err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1150 		if (err != -EIO || !ubi->bad_allowed)
1151 			break;
1152 
1153 		vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1154 		ubi_msg(ubi, "try another PEB");
1155 	}
1156 
1157 	if (err)
1158 		ubi_ro_mode(ubi);
1159 
1160 	leb_write_unlock(ubi, vol_id, lnum);
1161 
1162 out:
1163 	ubi_free_vid_buf(vidb);
1164 
1165 	return err;
1166 }
1167 
1168 /*
1169  * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
1170  * @ubi: UBI device description object
1171  * @vol: volume description object
1172  * @lnum: logical eraseblock number
1173  * @buf: data to write
1174  * @len: how many bytes to write
1175  *
1176  * This function changes the contents of a logical eraseblock atomically. @buf
1177  * has to contain new logical eraseblock data, and @len - the length of the
1178  * data, which has to be aligned. This function guarantees that in case of an
1179  * unclean reboot the old contents is preserved. Returns zero in case of
1180  * success and a negative error code in case of failure.
1181  *
1182  * UBI reserves one LEB for the "atomic LEB change" operation, so only one
1183  * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
1184  */
ubi_eba_atomic_leb_change(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,const void * buf,int len)1185 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
1186 			      int lnum, const void *buf, int len)
1187 {
1188 	int err, tries, vol_id = vol->vol_id;
1189 	struct ubi_vid_io_buf *vidb;
1190 	struct ubi_vid_hdr *vid_hdr;
1191 	uint32_t crc;
1192 
1193 	if (ubi->ro_mode)
1194 		return -EROFS;
1195 
1196 	if (len == 0) {
1197 		/*
1198 		 * Special case when data length is zero. In this case the LEB
1199 		 * has to be unmapped and mapped somewhere else.
1200 		 */
1201 		err = ubi_eba_unmap_leb(ubi, vol, lnum);
1202 		if (err)
1203 			return err;
1204 		return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
1205 	}
1206 
1207 	vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1208 	if (!vidb)
1209 		return -ENOMEM;
1210 
1211 	vid_hdr = ubi_get_vid_hdr(vidb);
1212 
1213 	mutex_lock(&ubi->alc_mutex);
1214 	err = leb_write_lock(ubi, vol_id, lnum);
1215 	if (err)
1216 		goto out_mutex;
1217 
1218 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1219 	vid_hdr->vol_id = cpu_to_be32(vol_id);
1220 	vid_hdr->lnum = cpu_to_be32(lnum);
1221 	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1222 	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1223 
1224 	crc = crc32(UBI_CRC32_INIT, buf, len);
1225 	vid_hdr->vol_type = UBI_VID_DYNAMIC;
1226 	vid_hdr->data_size = cpu_to_be32(len);
1227 	vid_hdr->copy_flag = 1;
1228 	vid_hdr->data_crc = cpu_to_be32(crc);
1229 
1230 	dbg_eba("change LEB %d:%d", vol_id, lnum);
1231 
1232 	for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1233 		err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1234 		if (err != -EIO || !ubi->bad_allowed)
1235 			break;
1236 
1237 		vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1238 		ubi_msg(ubi, "try another PEB");
1239 	}
1240 
1241 	/*
1242 	 * This flash device does not admit of bad eraseblocks or
1243 	 * something nasty and unexpected happened. Switch to read-only
1244 	 * mode just in case.
1245 	 */
1246 	if (err)
1247 		ubi_ro_mode(ubi);
1248 
1249 	leb_write_unlock(ubi, vol_id, lnum);
1250 
1251 out_mutex:
1252 	mutex_unlock(&ubi->alc_mutex);
1253 	ubi_free_vid_buf(vidb);
1254 	return err;
1255 }
1256 
1257 /**
1258  * is_error_sane - check whether a read error is sane.
1259  * @err: code of the error happened during reading
1260  *
1261  * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
1262  * cannot read data from the target PEB (an error @err happened). If the error
1263  * code is sane, then we treat this error as non-fatal. Otherwise the error is
1264  * fatal and UBI will be switched to R/O mode later.
1265  *
1266  * The idea is that we try not to switch to R/O mode if the read error is
1267  * something which suggests there was a real read problem. E.g., %-EIO. Or a
1268  * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
1269  * mode, simply because we do not know what happened at the MTD level, and we
1270  * cannot handle this. E.g., the underlying driver may have become crazy, and
1271  * it is safer to switch to R/O mode to preserve the data.
1272  *
1273  * And bear in mind, this is about reading from the target PEB, i.e. the PEB
1274  * which we have just written.
1275  */
is_error_sane(int err)1276 static int is_error_sane(int err)
1277 {
1278 	if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
1279 	    err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
1280 		return 0;
1281 	return 1;
1282 }
1283 
1284 /**
1285  * ubi_eba_copy_leb - copy logical eraseblock.
1286  * @ubi: UBI device description object
1287  * @from: physical eraseblock number from where to copy
1288  * @to: physical eraseblock number where to copy
1289  * @vid_hdr: VID header of the @from physical eraseblock
1290  *
1291  * This function copies logical eraseblock from physical eraseblock @from to
1292  * physical eraseblock @to. The @vid_hdr buffer may be changed by this
1293  * function. Returns:
1294  *   o %0 in case of success;
1295  *   o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
1296  *   o a negative error code in case of failure.
1297  */
ubi_eba_copy_leb(struct ubi_device * ubi,int from,int to,struct ubi_vid_io_buf * vidb)1298 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1299 		     struct ubi_vid_io_buf *vidb)
1300 {
1301 	int err, vol_id, lnum, data_size, aldata_size, idx;
1302 	struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
1303 	struct ubi_volume *vol;
1304 	uint32_t crc;
1305 
1306 	ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
1307 
1308 	vol_id = be32_to_cpu(vid_hdr->vol_id);
1309 	lnum = be32_to_cpu(vid_hdr->lnum);
1310 
1311 	dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
1312 
1313 	if (vid_hdr->vol_type == UBI_VID_STATIC) {
1314 		data_size = be32_to_cpu(vid_hdr->data_size);
1315 		aldata_size = ALIGN(data_size, ubi->min_io_size);
1316 	} else
1317 		data_size = aldata_size =
1318 			    ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
1319 
1320 	idx = vol_id2idx(ubi, vol_id);
1321 	spin_lock(&ubi->volumes_lock);
1322 	/*
1323 	 * Note, we may race with volume deletion, which means that the volume
1324 	 * this logical eraseblock belongs to might be being deleted. Since the
1325 	 * volume deletion un-maps all the volume's logical eraseblocks, it will
1326 	 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
1327 	 */
1328 	vol = ubi->volumes[idx];
1329 	spin_unlock(&ubi->volumes_lock);
1330 	if (!vol) {
1331 		/* No need to do further work, cancel */
1332 		dbg_wl("volume %d is being removed, cancel", vol_id);
1333 		return MOVE_CANCEL_RACE;
1334 	}
1335 
1336 	/*
1337 	 * We do not want anybody to write to this logical eraseblock while we
1338 	 * are moving it, so lock it.
1339 	 *
1340 	 * Note, we are using non-waiting locking here, because we cannot sleep
1341 	 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1342 	 * unmapping the LEB which is mapped to the PEB we are going to move
1343 	 * (@from). This task locks the LEB and goes sleep in the
1344 	 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1345 	 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1346 	 * LEB is already locked, we just do not move it and return
1347 	 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
1348 	 * we do not know the reasons of the contention - it may be just a
1349 	 * normal I/O on this LEB, so we want to re-try.
1350 	 */
1351 	err = leb_write_trylock(ubi, vol_id, lnum);
1352 	if (err) {
1353 		dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1354 		return MOVE_RETRY;
1355 	}
1356 
1357 	/*
1358 	 * The LEB might have been put meanwhile, and the task which put it is
1359 	 * probably waiting on @ubi->move_mutex. No need to continue the work,
1360 	 * cancel it.
1361 	 */
1362 	if (vol->eba_tbl->entries[lnum].pnum != from) {
1363 		dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
1364 		       vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum);
1365 		err = MOVE_CANCEL_RACE;
1366 		goto out_unlock_leb;
1367 	}
1368 
1369 	/*
1370 	 * OK, now the LEB is locked and we can safely start moving it. Since
1371 	 * this function utilizes the @ubi->peb_buf buffer which is shared
1372 	 * with some other functions - we lock the buffer by taking the
1373 	 * @ubi->buf_mutex.
1374 	 */
1375 	mutex_lock(&ubi->buf_mutex);
1376 	dbg_wl("read %d bytes of data", aldata_size);
1377 	err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
1378 	if (err && err != UBI_IO_BITFLIPS) {
1379 		ubi_warn(ubi, "error %d while reading data from PEB %d",
1380 			 err, from);
1381 		err = MOVE_SOURCE_RD_ERR;
1382 		goto out_unlock_buf;
1383 	}
1384 
1385 	/*
1386 	 * Now we have got to calculate how much data we have to copy. In
1387 	 * case of a static volume it is fairly easy - the VID header contains
1388 	 * the data size. In case of a dynamic volume it is more difficult - we
1389 	 * have to read the contents, cut 0xFF bytes from the end and copy only
1390 	 * the first part. We must do this to avoid writing 0xFF bytes as it
1391 	 * may have some side-effects. And not only this. It is important not
1392 	 * to include those 0xFFs to CRC because later the they may be filled
1393 	 * by data.
1394 	 */
1395 	if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1396 		aldata_size = data_size =
1397 			ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
1398 
1399 	cond_resched();
1400 	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
1401 	cond_resched();
1402 
1403 	/*
1404 	 * It may turn out to be that the whole @from physical eraseblock
1405 	 * contains only 0xFF bytes. Then we have to only write the VID header
1406 	 * and do not write any data. This also means we should not set
1407 	 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
1408 	 */
1409 	if (data_size > 0) {
1410 		vid_hdr->copy_flag = 1;
1411 		vid_hdr->data_size = cpu_to_be32(data_size);
1412 		vid_hdr->data_crc = cpu_to_be32(crc);
1413 	}
1414 	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1415 
1416 	err = ubi_io_write_vid_hdr(ubi, to, vidb);
1417 	if (err) {
1418 		if (err == -EIO)
1419 			err = MOVE_TARGET_WR_ERR;
1420 		goto out_unlock_buf;
1421 	}
1422 
1423 	cond_resched();
1424 
1425 	/* Read the VID header back and check if it was written correctly */
1426 	err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
1427 	if (err) {
1428 		if (err != UBI_IO_BITFLIPS) {
1429 			ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
1430 				 err, to);
1431 			if (is_error_sane(err))
1432 				err = MOVE_TARGET_RD_ERR;
1433 		} else
1434 			err = MOVE_TARGET_BITFLIPS;
1435 		goto out_unlock_buf;
1436 	}
1437 
1438 	if (data_size > 0) {
1439 		err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1440 		if (err) {
1441 			if (err == -EIO)
1442 				err = MOVE_TARGET_WR_ERR;
1443 			goto out_unlock_buf;
1444 		}
1445 
1446 		cond_resched();
1447 	}
1448 
1449 	ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
1450 	vol->eba_tbl->entries[lnum].pnum = to;
1451 
1452 out_unlock_buf:
1453 	mutex_unlock(&ubi->buf_mutex);
1454 out_unlock_leb:
1455 	leb_write_unlock(ubi, vol_id, lnum);
1456 	return err;
1457 }
1458 
1459 /**
1460  * print_rsvd_warning - warn about not having enough reserved PEBs.
1461  * @ubi: UBI device description object
1462  *
1463  * This is a helper function for 'ubi_eba_init()' which is called when UBI
1464  * cannot reserve enough PEBs for bad block handling. This function makes a
1465  * decision whether we have to print a warning or not. The algorithm is as
1466  * follows:
1467  *   o if this is a new UBI image, then just print the warning
1468  *   o if this is an UBI image which has already been used for some time, print
1469  *     a warning only if we can reserve less than 10% of the expected amount of
1470  *     the reserved PEB.
1471  *
1472  * The idea is that when UBI is used, PEBs become bad, and the reserved pool
1473  * of PEBs becomes smaller, which is normal and we do not want to scare users
1474  * with a warning every time they attach the MTD device. This was an issue
1475  * reported by real users.
1476  */
print_rsvd_warning(struct ubi_device * ubi,struct ubi_attach_info * ai)1477 static void print_rsvd_warning(struct ubi_device *ubi,
1478 			       struct ubi_attach_info *ai)
1479 {
1480 	/*
1481 	 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
1482 	 * large number to distinguish between newly flashed and used images.
1483 	 */
1484 	if (ai->max_sqnum > (1 << 18)) {
1485 		int min = ubi->beb_rsvd_level / 10;
1486 
1487 		if (!min)
1488 			min = 1;
1489 		if (ubi->beb_rsvd_pebs > min)
1490 			return;
1491 	}
1492 
1493 	ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
1494 		 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1495 	if (ubi->corr_peb_count)
1496 		ubi_warn(ubi, "%d PEBs are corrupted and not used",
1497 			 ubi->corr_peb_count);
1498 }
1499 
1500 /**
1501  * self_check_eba - run a self check on the EBA table constructed by fastmap.
1502  * @ubi: UBI device description object
1503  * @ai_fastmap: UBI attach info object created by fastmap
1504  * @ai_scan: UBI attach info object created by scanning
1505  *
1506  * Returns < 0 in case of an internal error, 0 otherwise.
1507  * If a bad EBA table entry was found it will be printed out and
1508  * ubi_assert() triggers.
1509  */
self_check_eba(struct ubi_device * ubi,struct ubi_attach_info * ai_fastmap,struct ubi_attach_info * ai_scan)1510 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1511 		   struct ubi_attach_info *ai_scan)
1512 {
1513 	int i, j, num_volumes, ret = 0;
1514 	int **scan_eba, **fm_eba;
1515 	struct ubi_ainf_volume *av;
1516 	struct ubi_volume *vol;
1517 	struct ubi_ainf_peb *aeb;
1518 	struct rb_node *rb;
1519 
1520 	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1521 
1522 	scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
1523 	if (!scan_eba)
1524 		return -ENOMEM;
1525 
1526 	fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
1527 	if (!fm_eba) {
1528 		kfree(scan_eba);
1529 		return -ENOMEM;
1530 	}
1531 
1532 	for (i = 0; i < num_volumes; i++) {
1533 		vol = ubi->volumes[i];
1534 		if (!vol)
1535 			continue;
1536 
1537 		scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
1538 				      GFP_KERNEL);
1539 		if (!scan_eba[i]) {
1540 			ret = -ENOMEM;
1541 			goto out_free;
1542 		}
1543 
1544 		fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
1545 				    GFP_KERNEL);
1546 		if (!fm_eba[i]) {
1547 			ret = -ENOMEM;
1548 			goto out_free;
1549 		}
1550 
1551 		for (j = 0; j < vol->reserved_pebs; j++)
1552 			scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
1553 
1554 		av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
1555 		if (!av)
1556 			continue;
1557 
1558 		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1559 			scan_eba[i][aeb->lnum] = aeb->pnum;
1560 
1561 		av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
1562 		if (!av)
1563 			continue;
1564 
1565 		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1566 			fm_eba[i][aeb->lnum] = aeb->pnum;
1567 
1568 		for (j = 0; j < vol->reserved_pebs; j++) {
1569 			if (scan_eba[i][j] != fm_eba[i][j]) {
1570 				if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
1571 					fm_eba[i][j] == UBI_LEB_UNMAPPED)
1572 					continue;
1573 
1574 				ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
1575 					vol->vol_id, j, fm_eba[i][j],
1576 					scan_eba[i][j]);
1577 				ubi_assert(0);
1578 			}
1579 		}
1580 	}
1581 
1582 out_free:
1583 	for (i = 0; i < num_volumes; i++) {
1584 		if (!ubi->volumes[i])
1585 			continue;
1586 
1587 		kfree(scan_eba[i]);
1588 		kfree(fm_eba[i]);
1589 	}
1590 
1591 	kfree(scan_eba);
1592 	kfree(fm_eba);
1593 	return ret;
1594 }
1595 
1596 /**
1597  * ubi_eba_init - initialize the EBA sub-system using attaching information.
1598  * @ubi: UBI device description object
1599  * @ai: attaching information
1600  *
1601  * This function returns zero in case of success and a negative error code in
1602  * case of failure.
1603  */
ubi_eba_init(struct ubi_device * ubi,struct ubi_attach_info * ai)1604 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1605 {
1606 	int i, err, num_volumes;
1607 	struct ubi_ainf_volume *av;
1608 	struct ubi_volume *vol;
1609 	struct ubi_ainf_peb *aeb;
1610 	struct rb_node *rb;
1611 
1612 	dbg_eba("initialize EBA sub-system");
1613 
1614 	spin_lock_init(&ubi->ltree_lock);
1615 	mutex_init(&ubi->alc_mutex);
1616 	ubi->ltree = RB_ROOT;
1617 
1618 	ubi->global_sqnum = ai->max_sqnum + 1;
1619 	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1620 
1621 	for (i = 0; i < num_volumes; i++) {
1622 		struct ubi_eba_table *tbl;
1623 
1624 		vol = ubi->volumes[i];
1625 		if (!vol)
1626 			continue;
1627 
1628 		cond_resched();
1629 
1630 		tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
1631 		if (IS_ERR(tbl)) {
1632 			err = PTR_ERR(tbl);
1633 			goto out_free;
1634 		}
1635 
1636 		ubi_eba_replace_table(vol, tbl);
1637 
1638 		av = ubi_find_av(ai, idx2vol_id(ubi, i));
1639 		if (!av)
1640 			continue;
1641 
1642 		ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
1643 			if (aeb->lnum >= vol->reserved_pebs) {
1644 				/*
1645 				 * This may happen in case of an unclean reboot
1646 				 * during re-size.
1647 				 */
1648 				ubi_move_aeb_to_list(av, aeb, &ai->erase);
1649 			} else {
1650 				struct ubi_eba_entry *entry;
1651 
1652 				entry = &vol->eba_tbl->entries[aeb->lnum];
1653 				entry->pnum = aeb->pnum;
1654 			}
1655 		}
1656 	}
1657 
1658 	if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1659 		ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1660 			ubi->avail_pebs, EBA_RESERVED_PEBS);
1661 		if (ubi->corr_peb_count)
1662 			ubi_err(ubi, "%d PEBs are corrupted and not used",
1663 				ubi->corr_peb_count);
1664 		err = -ENOSPC;
1665 		goto out_free;
1666 	}
1667 	ubi->avail_pebs -= EBA_RESERVED_PEBS;
1668 	ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1669 
1670 	if (ubi->bad_allowed) {
1671 		ubi_calculate_reserved(ubi);
1672 
1673 		if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1674 			/* No enough free physical eraseblocks */
1675 			ubi->beb_rsvd_pebs = ubi->avail_pebs;
1676 			print_rsvd_warning(ubi, ai);
1677 		} else
1678 			ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1679 
1680 		ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1681 		ubi->rsvd_pebs  += ubi->beb_rsvd_pebs;
1682 	}
1683 
1684 	dbg_eba("EBA sub-system is initialized");
1685 	return 0;
1686 
1687 out_free:
1688 	for (i = 0; i < num_volumes; i++) {
1689 		if (!ubi->volumes[i])
1690 			continue;
1691 		ubi_eba_replace_table(ubi->volumes[i], NULL);
1692 	}
1693 	return err;
1694 }
1695