1 /*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17 #include <linux/crc32.h>
18 #include "ubi.h"
19
20 /**
21 * init_seen - allocate memory for used for debugging.
22 * @ubi: UBI device description object
23 */
init_seen(struct ubi_device * ubi)24 static inline int *init_seen(struct ubi_device *ubi)
25 {
26 int *ret;
27
28 if (!ubi_dbg_chk_fastmap(ubi))
29 return NULL;
30
31 ret = kcalloc(ubi->peb_count, sizeof(int), GFP_KERNEL);
32 if (!ret)
33 return ERR_PTR(-ENOMEM);
34
35 return ret;
36 }
37
38 /**
39 * free_seen - free the seen logic integer array.
40 * @seen: integer array of @ubi->peb_count size
41 */
free_seen(int * seen)42 static inline void free_seen(int *seen)
43 {
44 kfree(seen);
45 }
46
47 /**
48 * set_seen - mark a PEB as seen.
49 * @ubi: UBI device description object
50 * @pnum: The PEB to be makred as seen
51 * @seen: integer array of @ubi->peb_count size
52 */
set_seen(struct ubi_device * ubi,int pnum,int * seen)53 static inline void set_seen(struct ubi_device *ubi, int pnum, int *seen)
54 {
55 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
56 return;
57
58 seen[pnum] = 1;
59 }
60
61 /**
62 * self_check_seen - check whether all PEB have been seen by fastmap.
63 * @ubi: UBI device description object
64 * @seen: integer array of @ubi->peb_count size
65 */
self_check_seen(struct ubi_device * ubi,int * seen)66 static int self_check_seen(struct ubi_device *ubi, int *seen)
67 {
68 int pnum, ret = 0;
69
70 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
71 return 0;
72
73 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
74 if (!seen[pnum] && ubi->lookuptbl[pnum]) {
75 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
76 ret = -EINVAL;
77 }
78 }
79
80 return ret;
81 }
82
83 /**
84 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
85 * @ubi: UBI device description object
86 */
ubi_calc_fm_size(struct ubi_device * ubi)87 size_t ubi_calc_fm_size(struct ubi_device *ubi)
88 {
89 size_t size;
90
91 size = sizeof(struct ubi_fm_sb) +
92 sizeof(struct ubi_fm_hdr) +
93 sizeof(struct ubi_fm_scan_pool) +
94 sizeof(struct ubi_fm_scan_pool) +
95 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
96 (sizeof(struct ubi_fm_eba) +
97 (ubi->peb_count * sizeof(__be32))) +
98 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
99 return roundup(size, ubi->leb_size);
100 }
101
102
103 /**
104 * new_fm_vhdr - allocate a new volume header for fastmap usage.
105 * @ubi: UBI device description object
106 * @vol_id: the VID of the new header
107 *
108 * Returns a new struct ubi_vid_hdr on success.
109 * NULL indicates out of memory.
110 */
new_fm_vhdr(struct ubi_device * ubi,int vol_id)111 static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
112 {
113 struct ubi_vid_hdr *new;
114
115 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
116 if (!new)
117 goto out;
118
119 new->vol_type = UBI_VID_DYNAMIC;
120 new->vol_id = cpu_to_be32(vol_id);
121
122 /* UBI implementations without fastmap support have to delete the
123 * fastmap.
124 */
125 new->compat = UBI_COMPAT_DELETE;
126
127 out:
128 return new;
129 }
130
131 /**
132 * add_aeb - create and add a attach erase block to a given list.
133 * @ai: UBI attach info object
134 * @list: the target list
135 * @pnum: PEB number of the new attach erase block
136 * @ec: erease counter of the new LEB
137 * @scrub: scrub this PEB after attaching
138 *
139 * Returns 0 on success, < 0 indicates an internal error.
140 */
add_aeb(struct ubi_attach_info * ai,struct list_head * list,int pnum,int ec,int scrub)141 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
142 int pnum, int ec, int scrub)
143 {
144 struct ubi_ainf_peb *aeb;
145
146 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
147 if (!aeb)
148 return -ENOMEM;
149
150 aeb->pnum = pnum;
151 aeb->ec = ec;
152 aeb->lnum = -1;
153 aeb->scrub = scrub;
154 aeb->copy_flag = aeb->sqnum = 0;
155
156 ai->ec_sum += aeb->ec;
157 ai->ec_count++;
158
159 if (ai->max_ec < aeb->ec)
160 ai->max_ec = aeb->ec;
161
162 if (ai->min_ec > aeb->ec)
163 ai->min_ec = aeb->ec;
164
165 list_add_tail(&aeb->u.list, list);
166
167 return 0;
168 }
169
170 /**
171 * add_vol - create and add a new volume to ubi_attach_info.
172 * @ai: ubi_attach_info object
173 * @vol_id: VID of the new volume
174 * @used_ebs: number of used EBS
175 * @data_pad: data padding value of the new volume
176 * @vol_type: volume type
177 * @last_eb_bytes: number of bytes in the last LEB
178 *
179 * Returns the new struct ubi_ainf_volume on success.
180 * NULL indicates an error.
181 */
add_vol(struct ubi_attach_info * ai,int vol_id,int used_ebs,int data_pad,u8 vol_type,int last_eb_bytes)182 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
183 int used_ebs, int data_pad, u8 vol_type,
184 int last_eb_bytes)
185 {
186 struct ubi_ainf_volume *av;
187 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
188
189 while (*p) {
190 parent = *p;
191 av = rb_entry(parent, struct ubi_ainf_volume, rb);
192
193 if (vol_id > av->vol_id)
194 p = &(*p)->rb_left;
195 else if (vol_id < av->vol_id)
196 p = &(*p)->rb_right;
197 else
198 return ERR_PTR(-EINVAL);
199 }
200
201 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
202 if (!av)
203 goto out;
204
205 av->highest_lnum = av->leb_count = av->used_ebs = 0;
206 av->vol_id = vol_id;
207 av->data_pad = data_pad;
208 av->last_data_size = last_eb_bytes;
209 av->compat = 0;
210 av->vol_type = vol_type;
211 av->root = RB_ROOT;
212 if (av->vol_type == UBI_STATIC_VOLUME)
213 av->used_ebs = used_ebs;
214
215 dbg_bld("found volume (ID %i)", vol_id);
216
217 rb_link_node(&av->rb, parent, p);
218 rb_insert_color(&av->rb, &ai->volumes);
219
220 out:
221 return av;
222 }
223
224 /**
225 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
226 * from it's original list.
227 * @ai: ubi_attach_info object
228 * @aeb: the to be assigned SEB
229 * @av: target scan volume
230 */
assign_aeb_to_av(struct ubi_attach_info * ai,struct ubi_ainf_peb * aeb,struct ubi_ainf_volume * av)231 static void assign_aeb_to_av(struct ubi_attach_info *ai,
232 struct ubi_ainf_peb *aeb,
233 struct ubi_ainf_volume *av)
234 {
235 struct ubi_ainf_peb *tmp_aeb;
236 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
237
238 p = &av->root.rb_node;
239 while (*p) {
240 parent = *p;
241
242 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
243 if (aeb->lnum != tmp_aeb->lnum) {
244 if (aeb->lnum < tmp_aeb->lnum)
245 p = &(*p)->rb_left;
246 else
247 p = &(*p)->rb_right;
248
249 continue;
250 } else
251 break;
252 }
253
254 list_del(&aeb->u.list);
255 av->leb_count++;
256
257 rb_link_node(&aeb->u.rb, parent, p);
258 rb_insert_color(&aeb->u.rb, &av->root);
259 }
260
261 /**
262 * update_vol - inserts or updates a LEB which was found a pool.
263 * @ubi: the UBI device object
264 * @ai: attach info object
265 * @av: the volume this LEB belongs to
266 * @new_vh: the volume header derived from new_aeb
267 * @new_aeb: the AEB to be examined
268 *
269 * Returns 0 on success, < 0 indicates an internal error.
270 */
update_vol(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_ainf_volume * av,struct ubi_vid_hdr * new_vh,struct ubi_ainf_peb * new_aeb)271 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
272 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
273 struct ubi_ainf_peb *new_aeb)
274 {
275 struct rb_node **p = &av->root.rb_node, *parent = NULL;
276 struct ubi_ainf_peb *aeb, *victim;
277 int cmp_res;
278
279 while (*p) {
280 parent = *p;
281 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
282
283 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
284 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
285 p = &(*p)->rb_left;
286 else
287 p = &(*p)->rb_right;
288
289 continue;
290 }
291
292 /* This case can happen if the fastmap gets written
293 * because of a volume change (creation, deletion, ..).
294 * Then a PEB can be within the persistent EBA and the pool.
295 */
296 if (aeb->pnum == new_aeb->pnum) {
297 ubi_assert(aeb->lnum == new_aeb->lnum);
298 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
299
300 return 0;
301 }
302
303 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
304 if (cmp_res < 0)
305 return cmp_res;
306
307 /* new_aeb is newer */
308 if (cmp_res & 1) {
309 victim = kmem_cache_alloc(ai->aeb_slab_cache,
310 GFP_KERNEL);
311 if (!victim)
312 return -ENOMEM;
313
314 victim->ec = aeb->ec;
315 victim->pnum = aeb->pnum;
316 list_add_tail(&victim->u.list, &ai->erase);
317
318 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
319 av->last_data_size =
320 be32_to_cpu(new_vh->data_size);
321
322 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
323 av->vol_id, aeb->lnum, new_aeb->pnum);
324
325 aeb->ec = new_aeb->ec;
326 aeb->pnum = new_aeb->pnum;
327 aeb->copy_flag = new_vh->copy_flag;
328 aeb->scrub = new_aeb->scrub;
329 aeb->sqnum = new_aeb->sqnum;
330 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
331
332 /* new_aeb is older */
333 } else {
334 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
335 av->vol_id, aeb->lnum, new_aeb->pnum);
336 list_add_tail(&new_aeb->u.list, &ai->erase);
337 }
338
339 return 0;
340 }
341 /* This LEB is new, let's add it to the volume */
342
343 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
344 av->highest_lnum = be32_to_cpu(new_vh->lnum);
345 av->last_data_size = be32_to_cpu(new_vh->data_size);
346 }
347
348 if (av->vol_type == UBI_STATIC_VOLUME)
349 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
350
351 av->leb_count++;
352
353 rb_link_node(&new_aeb->u.rb, parent, p);
354 rb_insert_color(&new_aeb->u.rb, &av->root);
355
356 return 0;
357 }
358
359 /**
360 * process_pool_aeb - we found a non-empty PEB in a pool.
361 * @ubi: UBI device object
362 * @ai: attach info object
363 * @new_vh: the volume header derived from new_aeb
364 * @new_aeb: the AEB to be examined
365 *
366 * Returns 0 on success, < 0 indicates an internal error.
367 */
process_pool_aeb(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_vid_hdr * new_vh,struct ubi_ainf_peb * new_aeb)368 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
369 struct ubi_vid_hdr *new_vh,
370 struct ubi_ainf_peb *new_aeb)
371 {
372 struct ubi_ainf_volume *av, *tmp_av = NULL;
373 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
374 int found = 0;
375
376 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
377 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
378 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
379
380 return 0;
381 }
382
383 /* Find the volume this SEB belongs to */
384 while (*p) {
385 parent = *p;
386 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
387
388 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
389 p = &(*p)->rb_left;
390 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
391 p = &(*p)->rb_right;
392 else {
393 found = 1;
394 break;
395 }
396 }
397
398 if (found)
399 av = tmp_av;
400 else {
401 ubi_err(ubi, "orphaned volume in fastmap pool!");
402 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
403 return UBI_BAD_FASTMAP;
404 }
405
406 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
407
408 return update_vol(ubi, ai, av, new_vh, new_aeb);
409 }
410
411 /**
412 * unmap_peb - unmap a PEB.
413 * If fastmap detects a free PEB in the pool it has to check whether
414 * this PEB has been unmapped after writing the fastmap.
415 *
416 * @ai: UBI attach info object
417 * @pnum: The PEB to be unmapped
418 */
unmap_peb(struct ubi_attach_info * ai,int pnum)419 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
420 {
421 struct ubi_ainf_volume *av;
422 struct rb_node *node, *node2;
423 struct ubi_ainf_peb *aeb;
424
425 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
426 av = rb_entry(node, struct ubi_ainf_volume, rb);
427
428 for (node2 = rb_first(&av->root); node2;
429 node2 = rb_next(node2)) {
430 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
431 if (aeb->pnum == pnum) {
432 rb_erase(&aeb->u.rb, &av->root);
433 av->leb_count--;
434 kmem_cache_free(ai->aeb_slab_cache, aeb);
435 return;
436 }
437 }
438 }
439 }
440
441 /**
442 * scan_pool - scans a pool for changed (no longer empty PEBs).
443 * @ubi: UBI device object
444 * @ai: attach info object
445 * @pebs: an array of all PEB numbers in the to be scanned pool
446 * @pool_size: size of the pool (number of entries in @pebs)
447 * @max_sqnum: pointer to the maximal sequence number
448 * @free: list of PEBs which are most likely free (and go into @ai->free)
449 *
450 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
451 * < 0 indicates an internal error.
452 */
scan_pool(struct ubi_device * ubi,struct ubi_attach_info * ai,__be32 * pebs,int pool_size,unsigned long long * max_sqnum,struct list_head * free)453 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
454 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
455 struct list_head *free)
456 {
457 struct ubi_vid_hdr *vh;
458 struct ubi_ec_hdr *ech;
459 struct ubi_ainf_peb *new_aeb;
460 int i, pnum, err, ret = 0;
461
462 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
463 if (!ech)
464 return -ENOMEM;
465
466 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
467 if (!vh) {
468 kfree(ech);
469 return -ENOMEM;
470 }
471
472 dbg_bld("scanning fastmap pool: size = %i", pool_size);
473
474 /*
475 * Now scan all PEBs in the pool to find changes which have been made
476 * after the creation of the fastmap
477 */
478 for (i = 0; i < pool_size; i++) {
479 int scrub = 0;
480 int image_seq;
481
482 pnum = be32_to_cpu(pebs[i]);
483
484 if (ubi_io_is_bad(ubi, pnum)) {
485 ubi_err(ubi, "bad PEB in fastmap pool!");
486 ret = UBI_BAD_FASTMAP;
487 goto out;
488 }
489
490 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
491 if (err && err != UBI_IO_BITFLIPS) {
492 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
493 pnum, err);
494 ret = err > 0 ? UBI_BAD_FASTMAP : err;
495 goto out;
496 } else if (err == UBI_IO_BITFLIPS)
497 scrub = 1;
498
499 /*
500 * Older UBI implementations have image_seq set to zero, so
501 * we shouldn't fail if image_seq == 0.
502 */
503 image_seq = be32_to_cpu(ech->image_seq);
504
505 if (image_seq && (image_seq != ubi->image_seq)) {
506 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
507 be32_to_cpu(ech->image_seq), ubi->image_seq);
508 ret = UBI_BAD_FASTMAP;
509 goto out;
510 }
511
512 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
513 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
514 unsigned long long ec = be64_to_cpu(ech->ec);
515 unmap_peb(ai, pnum);
516 dbg_bld("Adding PEB to free: %i", pnum);
517
518 if (err == UBI_IO_FF_BITFLIPS)
519 scrub = 1;
520
521 add_aeb(ai, free, pnum, ec, scrub);
522 continue;
523 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
524 dbg_bld("Found non empty PEB:%i in pool", pnum);
525
526 if (err == UBI_IO_BITFLIPS)
527 scrub = 1;
528
529 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
530 GFP_KERNEL);
531 if (!new_aeb) {
532 ret = -ENOMEM;
533 goto out;
534 }
535
536 new_aeb->ec = be64_to_cpu(ech->ec);
537 new_aeb->pnum = pnum;
538 new_aeb->lnum = be32_to_cpu(vh->lnum);
539 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
540 new_aeb->copy_flag = vh->copy_flag;
541 new_aeb->scrub = scrub;
542
543 if (*max_sqnum < new_aeb->sqnum)
544 *max_sqnum = new_aeb->sqnum;
545
546 err = process_pool_aeb(ubi, ai, vh, new_aeb);
547 if (err) {
548 ret = err > 0 ? UBI_BAD_FASTMAP : err;
549 goto out;
550 }
551 } else {
552 /* We are paranoid and fall back to scanning mode */
553 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
554 ret = err > 0 ? UBI_BAD_FASTMAP : err;
555 goto out;
556 }
557
558 }
559
560 out:
561 ubi_free_vid_hdr(ubi, vh);
562 kfree(ech);
563 return ret;
564 }
565
566 /**
567 * count_fastmap_pebs - Counts the PEBs found by fastmap.
568 * @ai: The UBI attach info object
569 */
count_fastmap_pebs(struct ubi_attach_info * ai)570 static int count_fastmap_pebs(struct ubi_attach_info *ai)
571 {
572 struct ubi_ainf_peb *aeb;
573 struct ubi_ainf_volume *av;
574 struct rb_node *rb1, *rb2;
575 int n = 0;
576
577 list_for_each_entry(aeb, &ai->erase, u.list)
578 n++;
579
580 list_for_each_entry(aeb, &ai->free, u.list)
581 n++;
582
583 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
584 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
585 n++;
586
587 return n;
588 }
589
590 /**
591 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
592 * @ubi: UBI device object
593 * @ai: UBI attach info object
594 * @fm: the fastmap to be attached
595 *
596 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
597 * < 0 indicates an internal error.
598 */
ubi_attach_fastmap(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_fastmap_layout * fm)599 static int ubi_attach_fastmap(struct ubi_device *ubi,
600 struct ubi_attach_info *ai,
601 struct ubi_fastmap_layout *fm)
602 {
603 struct list_head used, free;
604 struct ubi_ainf_volume *av;
605 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
606 struct ubi_fm_sb *fmsb;
607 struct ubi_fm_hdr *fmhdr;
608 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
609 struct ubi_fm_ec *fmec;
610 struct ubi_fm_volhdr *fmvhdr;
611 struct ubi_fm_eba *fm_eba;
612 int ret, i, j, pool_size, wl_pool_size;
613 size_t fm_pos = 0, fm_size = ubi->fm_size;
614 unsigned long long max_sqnum = 0;
615 void *fm_raw = ubi->fm_buf;
616
617 INIT_LIST_HEAD(&used);
618 INIT_LIST_HEAD(&free);
619 ai->min_ec = UBI_MAX_ERASECOUNTER;
620
621 fmsb = (struct ubi_fm_sb *)(fm_raw);
622 ai->max_sqnum = fmsb->sqnum;
623 fm_pos += sizeof(struct ubi_fm_sb);
624 if (fm_pos >= fm_size)
625 goto fail_bad;
626
627 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
628 fm_pos += sizeof(*fmhdr);
629 if (fm_pos >= fm_size)
630 goto fail_bad;
631
632 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
633 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
634 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
635 goto fail_bad;
636 }
637
638 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
639 fm_pos += sizeof(*fmpl);
640 if (fm_pos >= fm_size)
641 goto fail_bad;
642 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
643 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
644 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
645 goto fail_bad;
646 }
647
648 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
649 fm_pos += sizeof(*fmpl_wl);
650 if (fm_pos >= fm_size)
651 goto fail_bad;
652 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
653 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
654 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
655 goto fail_bad;
656 }
657
658 pool_size = be16_to_cpu(fmpl->size);
659 wl_pool_size = be16_to_cpu(fmpl_wl->size);
660 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
661 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
662
663 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
664 ubi_err(ubi, "bad pool size: %i", pool_size);
665 goto fail_bad;
666 }
667
668 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
669 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
670 goto fail_bad;
671 }
672
673
674 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
675 fm->max_pool_size < 0) {
676 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
677 goto fail_bad;
678 }
679
680 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
681 fm->max_wl_pool_size < 0) {
682 ubi_err(ubi, "bad maximal WL pool size: %i",
683 fm->max_wl_pool_size);
684 goto fail_bad;
685 }
686
687 /* read EC values from free list */
688 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
689 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
690 fm_pos += sizeof(*fmec);
691 if (fm_pos >= fm_size)
692 goto fail_bad;
693
694 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
695 be32_to_cpu(fmec->ec), 0);
696 }
697
698 /* read EC values from used list */
699 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
700 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
701 fm_pos += sizeof(*fmec);
702 if (fm_pos >= fm_size)
703 goto fail_bad;
704
705 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
706 be32_to_cpu(fmec->ec), 0);
707 }
708
709 /* read EC values from scrub list */
710 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
711 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
712 fm_pos += sizeof(*fmec);
713 if (fm_pos >= fm_size)
714 goto fail_bad;
715
716 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
717 be32_to_cpu(fmec->ec), 1);
718 }
719
720 /* read EC values from erase list */
721 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
722 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
723 fm_pos += sizeof(*fmec);
724 if (fm_pos >= fm_size)
725 goto fail_bad;
726
727 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
728 be32_to_cpu(fmec->ec), 1);
729 }
730
731 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
732 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
733
734 /* Iterate over all volumes and read their EBA table */
735 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
736 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
737 fm_pos += sizeof(*fmvhdr);
738 if (fm_pos >= fm_size)
739 goto fail_bad;
740
741 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
742 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
743 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
744 goto fail_bad;
745 }
746
747 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
748 be32_to_cpu(fmvhdr->used_ebs),
749 be32_to_cpu(fmvhdr->data_pad),
750 fmvhdr->vol_type,
751 be32_to_cpu(fmvhdr->last_eb_bytes));
752
753 if (IS_ERR(av)) {
754 if (PTR_ERR(av) == -EEXIST)
755 ubi_err(ubi, "volume (ID %i) already exists",
756 fmvhdr->vol_id);
757
758 goto fail_bad;
759 }
760
761 ai->vols_found++;
762 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
763 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
764
765 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
766 fm_pos += sizeof(*fm_eba);
767 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
768 if (fm_pos >= fm_size)
769 goto fail_bad;
770
771 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
772 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
773 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
774 goto fail_bad;
775 }
776
777 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
778 int pnum = be32_to_cpu(fm_eba->pnum[j]);
779
780 if (pnum < 0)
781 continue;
782
783 aeb = NULL;
784 list_for_each_entry(tmp_aeb, &used, u.list) {
785 if (tmp_aeb->pnum == pnum) {
786 aeb = tmp_aeb;
787 break;
788 }
789 }
790
791 if (!aeb) {
792 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
793 goto fail_bad;
794 }
795
796 aeb->lnum = j;
797
798 if (av->highest_lnum <= aeb->lnum)
799 av->highest_lnum = aeb->lnum;
800
801 assign_aeb_to_av(ai, aeb, av);
802
803 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
804 aeb->pnum, aeb->lnum, av->vol_id);
805 }
806 }
807
808 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
809 if (ret)
810 goto fail;
811
812 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
813 if (ret)
814 goto fail;
815
816 if (max_sqnum > ai->max_sqnum)
817 ai->max_sqnum = max_sqnum;
818
819 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
820 list_move_tail(&tmp_aeb->u.list, &ai->free);
821
822 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
823 list_move_tail(&tmp_aeb->u.list, &ai->erase);
824
825 ubi_assert(list_empty(&free));
826
827 /*
828 * If fastmap is leaking PEBs (must not happen), raise a
829 * fat warning and fall back to scanning mode.
830 * We do this here because in ubi_wl_init() it's too late
831 * and we cannot fall back to scanning.
832 */
833 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
834 ai->bad_peb_count - fm->used_blocks))
835 goto fail_bad;
836
837 return 0;
838
839 fail_bad:
840 ret = UBI_BAD_FASTMAP;
841 fail:
842 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
843 list_del(&tmp_aeb->u.list);
844 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
845 }
846 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
847 list_del(&tmp_aeb->u.list);
848 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
849 }
850
851 return ret;
852 }
853
854 /**
855 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
856 * @ai: UBI attach info to be filled
857 */
find_fm_anchor(struct ubi_attach_info * ai)858 static int find_fm_anchor(struct ubi_attach_info *ai)
859 {
860 int ret = -1;
861 struct ubi_ainf_peb *aeb;
862 unsigned long long max_sqnum = 0;
863
864 list_for_each_entry(aeb, &ai->fastmap, u.list) {
865 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
866 max_sqnum = aeb->sqnum;
867 ret = aeb->pnum;
868 }
869 }
870
871 return ret;
872 }
873
874 /**
875 * ubi_scan_fastmap - scan the fastmap.
876 * @ubi: UBI device object
877 * @ai: UBI attach info to be filled
878 * @scan_ai: UBI attach info from the first 64 PEBs,
879 * used to find the most recent Fastmap data structure
880 *
881 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
882 * UBI_BAD_FASTMAP if one was found but is not usable.
883 * < 0 indicates an internal error.
884 */
ubi_scan_fastmap(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_attach_info * scan_ai)885 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
886 struct ubi_attach_info *scan_ai)
887 {
888 struct ubi_fm_sb *fmsb, *fmsb2;
889 struct ubi_vid_hdr *vh;
890 struct ubi_ec_hdr *ech;
891 struct ubi_fastmap_layout *fm;
892 struct ubi_ainf_peb *tmp_aeb, *aeb;
893 int i, used_blocks, pnum, fm_anchor, ret = 0;
894 size_t fm_size;
895 __be32 crc, tmp_crc;
896 unsigned long long sqnum = 0;
897
898 fm_anchor = find_fm_anchor(scan_ai);
899 if (fm_anchor < 0)
900 return UBI_NO_FASTMAP;
901
902 /* Move all (possible) fastmap blocks into our new attach structure. */
903 list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list)
904 list_move_tail(&aeb->u.list, &ai->fastmap);
905
906 down_write(&ubi->fm_protect);
907 memset(ubi->fm_buf, 0, ubi->fm_size);
908
909 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
910 if (!fmsb) {
911 ret = -ENOMEM;
912 goto out;
913 }
914
915 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
916 if (!fm) {
917 ret = -ENOMEM;
918 kfree(fmsb);
919 goto out;
920 }
921
922 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
923 if (ret && ret != UBI_IO_BITFLIPS)
924 goto free_fm_sb;
925 else if (ret == UBI_IO_BITFLIPS)
926 fm->to_be_tortured[0] = 1;
927
928 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
929 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
930 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
931 ret = UBI_BAD_FASTMAP;
932 goto free_fm_sb;
933 }
934
935 if (fmsb->version != UBI_FM_FMT_VERSION) {
936 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
937 fmsb->version, UBI_FM_FMT_VERSION);
938 ret = UBI_BAD_FASTMAP;
939 goto free_fm_sb;
940 }
941
942 used_blocks = be32_to_cpu(fmsb->used_blocks);
943 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
944 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
945 used_blocks);
946 ret = UBI_BAD_FASTMAP;
947 goto free_fm_sb;
948 }
949
950 fm_size = ubi->leb_size * used_blocks;
951 if (fm_size != ubi->fm_size) {
952 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
953 fm_size, ubi->fm_size);
954 ret = UBI_BAD_FASTMAP;
955 goto free_fm_sb;
956 }
957
958 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
959 if (!ech) {
960 ret = -ENOMEM;
961 goto free_fm_sb;
962 }
963
964 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
965 if (!vh) {
966 ret = -ENOMEM;
967 goto free_hdr;
968 }
969
970 for (i = 0; i < used_blocks; i++) {
971 int image_seq;
972
973 pnum = be32_to_cpu(fmsb->block_loc[i]);
974
975 if (ubi_io_is_bad(ubi, pnum)) {
976 ret = UBI_BAD_FASTMAP;
977 goto free_hdr;
978 }
979
980 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
981 if (ret && ret != UBI_IO_BITFLIPS) {
982 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
983 i, pnum);
984 if (ret > 0)
985 ret = UBI_BAD_FASTMAP;
986 goto free_hdr;
987 } else if (ret == UBI_IO_BITFLIPS)
988 fm->to_be_tortured[i] = 1;
989
990 image_seq = be32_to_cpu(ech->image_seq);
991 if (!ubi->image_seq)
992 ubi->image_seq = image_seq;
993
994 /*
995 * Older UBI implementations have image_seq set to zero, so
996 * we shouldn't fail if image_seq == 0.
997 */
998 if (image_seq && (image_seq != ubi->image_seq)) {
999 ubi_err(ubi, "wrong image seq:%d instead of %d",
1000 be32_to_cpu(ech->image_seq), ubi->image_seq);
1001 ret = UBI_BAD_FASTMAP;
1002 goto free_hdr;
1003 }
1004
1005 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
1006 if (ret && ret != UBI_IO_BITFLIPS) {
1007 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
1008 i, pnum);
1009 goto free_hdr;
1010 }
1011
1012 if (i == 0) {
1013 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1014 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
1015 be32_to_cpu(vh->vol_id),
1016 UBI_FM_SB_VOLUME_ID);
1017 ret = UBI_BAD_FASTMAP;
1018 goto free_hdr;
1019 }
1020 } else {
1021 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1022 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1023 be32_to_cpu(vh->vol_id),
1024 UBI_FM_DATA_VOLUME_ID);
1025 ret = UBI_BAD_FASTMAP;
1026 goto free_hdr;
1027 }
1028 }
1029
1030 if (sqnum < be64_to_cpu(vh->sqnum))
1031 sqnum = be64_to_cpu(vh->sqnum);
1032
1033 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1034 ubi->leb_start, ubi->leb_size);
1035 if (ret && ret != UBI_IO_BITFLIPS) {
1036 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1037 "err: %i)", i, pnum, ret);
1038 goto free_hdr;
1039 }
1040 }
1041
1042 kfree(fmsb);
1043 fmsb = NULL;
1044
1045 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1046 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1047 fmsb2->data_crc = 0;
1048 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1049 if (crc != tmp_crc) {
1050 ubi_err(ubi, "fastmap data CRC is invalid");
1051 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1052 tmp_crc, crc);
1053 ret = UBI_BAD_FASTMAP;
1054 goto free_hdr;
1055 }
1056
1057 fmsb2->sqnum = sqnum;
1058
1059 fm->used_blocks = used_blocks;
1060
1061 ret = ubi_attach_fastmap(ubi, ai, fm);
1062 if (ret) {
1063 if (ret > 0)
1064 ret = UBI_BAD_FASTMAP;
1065 goto free_hdr;
1066 }
1067
1068 for (i = 0; i < used_blocks; i++) {
1069 struct ubi_wl_entry *e;
1070
1071 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1072 if (!e) {
1073 while (i--)
1074 kfree(fm->e[i]);
1075
1076 ret = -ENOMEM;
1077 goto free_hdr;
1078 }
1079
1080 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1081 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1082 fm->e[i] = e;
1083 }
1084
1085 ubi->fm = fm;
1086 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1087 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1088 ubi_msg(ubi, "attached by fastmap");
1089 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1090 ubi_msg(ubi, "fastmap WL pool size: %d",
1091 ubi->fm_wl_pool.max_size);
1092 ubi->fm_disabled = 0;
1093 ubi->fast_attach = 1;
1094
1095 ubi_free_vid_hdr(ubi, vh);
1096 kfree(ech);
1097 out:
1098 up_write(&ubi->fm_protect);
1099 if (ret == UBI_BAD_FASTMAP)
1100 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1101 return ret;
1102
1103 free_hdr:
1104 ubi_free_vid_hdr(ubi, vh);
1105 kfree(ech);
1106 free_fm_sb:
1107 kfree(fmsb);
1108 kfree(fm);
1109 goto out;
1110 }
1111
1112 /**
1113 * ubi_write_fastmap - writes a fastmap.
1114 * @ubi: UBI device object
1115 * @new_fm: the to be written fastmap
1116 *
1117 * Returns 0 on success, < 0 indicates an internal error.
1118 */
ubi_write_fastmap(struct ubi_device * ubi,struct ubi_fastmap_layout * new_fm)1119 static int ubi_write_fastmap(struct ubi_device *ubi,
1120 struct ubi_fastmap_layout *new_fm)
1121 {
1122 size_t fm_pos = 0;
1123 void *fm_raw;
1124 struct ubi_fm_sb *fmsb;
1125 struct ubi_fm_hdr *fmh;
1126 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1127 struct ubi_fm_ec *fec;
1128 struct ubi_fm_volhdr *fvh;
1129 struct ubi_fm_eba *feba;
1130 struct ubi_wl_entry *wl_e;
1131 struct ubi_volume *vol;
1132 struct ubi_vid_hdr *avhdr, *dvhdr;
1133 struct ubi_work *ubi_wrk;
1134 struct rb_node *tmp_rb;
1135 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1136 int scrub_peb_count, erase_peb_count;
1137 int *seen_pebs = NULL;
1138
1139 fm_raw = ubi->fm_buf;
1140 memset(ubi->fm_buf, 0, ubi->fm_size);
1141
1142 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1143 if (!avhdr) {
1144 ret = -ENOMEM;
1145 goto out;
1146 }
1147
1148 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1149 if (!dvhdr) {
1150 ret = -ENOMEM;
1151 goto out_kfree;
1152 }
1153
1154 seen_pebs = init_seen(ubi);
1155 if (IS_ERR(seen_pebs)) {
1156 ret = PTR_ERR(seen_pebs);
1157 goto out_kfree;
1158 }
1159
1160 spin_lock(&ubi->volumes_lock);
1161 spin_lock(&ubi->wl_lock);
1162
1163 fmsb = (struct ubi_fm_sb *)fm_raw;
1164 fm_pos += sizeof(*fmsb);
1165 ubi_assert(fm_pos <= ubi->fm_size);
1166
1167 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1168 fm_pos += sizeof(*fmh);
1169 ubi_assert(fm_pos <= ubi->fm_size);
1170
1171 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1172 fmsb->version = UBI_FM_FMT_VERSION;
1173 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1174 /* the max sqnum will be filled in while *reading* the fastmap */
1175 fmsb->sqnum = 0;
1176
1177 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1178 free_peb_count = 0;
1179 used_peb_count = 0;
1180 scrub_peb_count = 0;
1181 erase_peb_count = 0;
1182 vol_count = 0;
1183
1184 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1185 fm_pos += sizeof(*fmpl);
1186 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1187 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1188 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1189
1190 for (i = 0; i < ubi->fm_pool.size; i++) {
1191 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1192 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1193 }
1194
1195 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1196 fm_pos += sizeof(*fmpl_wl);
1197 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1198 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1199 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1200
1201 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1202 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1203 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1204 }
1205
1206 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1207 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1208
1209 fec->pnum = cpu_to_be32(wl_e->pnum);
1210 set_seen(ubi, wl_e->pnum, seen_pebs);
1211 fec->ec = cpu_to_be32(wl_e->ec);
1212
1213 free_peb_count++;
1214 fm_pos += sizeof(*fec);
1215 ubi_assert(fm_pos <= ubi->fm_size);
1216 }
1217 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1218
1219 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1220 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1221
1222 fec->pnum = cpu_to_be32(wl_e->pnum);
1223 set_seen(ubi, wl_e->pnum, seen_pebs);
1224 fec->ec = cpu_to_be32(wl_e->ec);
1225
1226 used_peb_count++;
1227 fm_pos += sizeof(*fec);
1228 ubi_assert(fm_pos <= ubi->fm_size);
1229 }
1230
1231 ubi_for_each_protected_peb(ubi, i, wl_e) {
1232 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1233
1234 fec->pnum = cpu_to_be32(wl_e->pnum);
1235 set_seen(ubi, wl_e->pnum, seen_pebs);
1236 fec->ec = cpu_to_be32(wl_e->ec);
1237
1238 used_peb_count++;
1239 fm_pos += sizeof(*fec);
1240 ubi_assert(fm_pos <= ubi->fm_size);
1241 }
1242 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1243
1244 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1245 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1246
1247 fec->pnum = cpu_to_be32(wl_e->pnum);
1248 set_seen(ubi, wl_e->pnum, seen_pebs);
1249 fec->ec = cpu_to_be32(wl_e->ec);
1250
1251 scrub_peb_count++;
1252 fm_pos += sizeof(*fec);
1253 ubi_assert(fm_pos <= ubi->fm_size);
1254 }
1255 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1256
1257
1258 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1259 if (ubi_is_erase_work(ubi_wrk)) {
1260 wl_e = ubi_wrk->e;
1261 ubi_assert(wl_e);
1262
1263 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1264
1265 fec->pnum = cpu_to_be32(wl_e->pnum);
1266 set_seen(ubi, wl_e->pnum, seen_pebs);
1267 fec->ec = cpu_to_be32(wl_e->ec);
1268
1269 erase_peb_count++;
1270 fm_pos += sizeof(*fec);
1271 ubi_assert(fm_pos <= ubi->fm_size);
1272 }
1273 }
1274 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1275
1276 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1277 vol = ubi->volumes[i];
1278
1279 if (!vol)
1280 continue;
1281
1282 vol_count++;
1283
1284 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1285 fm_pos += sizeof(*fvh);
1286 ubi_assert(fm_pos <= ubi->fm_size);
1287
1288 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1289 fvh->vol_id = cpu_to_be32(vol->vol_id);
1290 fvh->vol_type = vol->vol_type;
1291 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1292 fvh->data_pad = cpu_to_be32(vol->data_pad);
1293 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1294
1295 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1296 vol->vol_type == UBI_STATIC_VOLUME);
1297
1298 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1299 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1300 ubi_assert(fm_pos <= ubi->fm_size);
1301
1302 for (j = 0; j < vol->reserved_pebs; j++)
1303 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1304
1305 feba->reserved_pebs = cpu_to_be32(j);
1306 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1307 }
1308 fmh->vol_count = cpu_to_be32(vol_count);
1309 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1310
1311 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1312 avhdr->lnum = 0;
1313
1314 spin_unlock(&ubi->wl_lock);
1315 spin_unlock(&ubi->volumes_lock);
1316
1317 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1318 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1319 if (ret) {
1320 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1321 goto out_kfree;
1322 }
1323
1324 for (i = 0; i < new_fm->used_blocks; i++) {
1325 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1326 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1327 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1328 }
1329
1330 fmsb->data_crc = 0;
1331 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1332 ubi->fm_size));
1333
1334 for (i = 1; i < new_fm->used_blocks; i++) {
1335 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1336 dvhdr->lnum = cpu_to_be32(i);
1337 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1338 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1339 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1340 if (ret) {
1341 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1342 new_fm->e[i]->pnum);
1343 goto out_kfree;
1344 }
1345 }
1346
1347 for (i = 0; i < new_fm->used_blocks; i++) {
1348 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1349 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1350 if (ret) {
1351 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1352 new_fm->e[i]->pnum);
1353 goto out_kfree;
1354 }
1355 }
1356
1357 ubi_assert(new_fm);
1358 ubi->fm = new_fm;
1359
1360 ret = self_check_seen(ubi, seen_pebs);
1361 dbg_bld("fastmap written!");
1362
1363 out_kfree:
1364 ubi_free_vid_hdr(ubi, avhdr);
1365 ubi_free_vid_hdr(ubi, dvhdr);
1366 free_seen(seen_pebs);
1367 out:
1368 return ret;
1369 }
1370
1371 /**
1372 * erase_block - Manually erase a PEB.
1373 * @ubi: UBI device object
1374 * @pnum: PEB to be erased
1375 *
1376 * Returns the new EC value on success, < 0 indicates an internal error.
1377 */
erase_block(struct ubi_device * ubi,int pnum)1378 static int erase_block(struct ubi_device *ubi, int pnum)
1379 {
1380 int ret;
1381 struct ubi_ec_hdr *ec_hdr;
1382 long long ec;
1383
1384 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1385 if (!ec_hdr)
1386 return -ENOMEM;
1387
1388 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1389 if (ret < 0)
1390 goto out;
1391 else if (ret && ret != UBI_IO_BITFLIPS) {
1392 ret = -EINVAL;
1393 goto out;
1394 }
1395
1396 ret = ubi_io_sync_erase(ubi, pnum, 0);
1397 if (ret < 0)
1398 goto out;
1399
1400 ec = be64_to_cpu(ec_hdr->ec);
1401 ec += ret;
1402 if (ec > UBI_MAX_ERASECOUNTER) {
1403 ret = -EINVAL;
1404 goto out;
1405 }
1406
1407 ec_hdr->ec = cpu_to_be64(ec);
1408 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1409 if (ret < 0)
1410 goto out;
1411
1412 ret = ec;
1413 out:
1414 kfree(ec_hdr);
1415 return ret;
1416 }
1417
1418 /**
1419 * invalidate_fastmap - destroys a fastmap.
1420 * @ubi: UBI device object
1421 *
1422 * This function ensures that upon next UBI attach a full scan
1423 * is issued. We need this if UBI is about to write a new fastmap
1424 * but is unable to do so. In this case we have two options:
1425 * a) Make sure that the current fastmap will not be usued upon
1426 * attach time and contine or b) fall back to RO mode to have the
1427 * current fastmap in a valid state.
1428 * Returns 0 on success, < 0 indicates an internal error.
1429 */
invalidate_fastmap(struct ubi_device * ubi)1430 static int invalidate_fastmap(struct ubi_device *ubi)
1431 {
1432 int ret;
1433 struct ubi_fastmap_layout *fm;
1434 struct ubi_wl_entry *e;
1435 struct ubi_vid_hdr *vh = NULL;
1436
1437 if (!ubi->fm)
1438 return 0;
1439
1440 ubi->fm = NULL;
1441
1442 ret = -ENOMEM;
1443 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1444 if (!fm)
1445 goto out;
1446
1447 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1448 if (!vh)
1449 goto out_free_fm;
1450
1451 ret = -ENOSPC;
1452 e = ubi_wl_get_fm_peb(ubi, 1);
1453 if (!e)
1454 goto out_free_fm;
1455
1456 /*
1457 * Create fake fastmap such that UBI will fall back
1458 * to scanning mode.
1459 */
1460 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1461 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
1462 if (ret < 0) {
1463 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1464 goto out_free_fm;
1465 }
1466
1467 fm->used_blocks = 1;
1468 fm->e[0] = e;
1469
1470 ubi->fm = fm;
1471
1472 out:
1473 ubi_free_vid_hdr(ubi, vh);
1474 return ret;
1475
1476 out_free_fm:
1477 kfree(fm);
1478 goto out;
1479 }
1480
1481 /**
1482 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1483 * WL sub-system.
1484 * @ubi: UBI device object
1485 * @fm: fastmap layout object
1486 */
return_fm_pebs(struct ubi_device * ubi,struct ubi_fastmap_layout * fm)1487 static void return_fm_pebs(struct ubi_device *ubi,
1488 struct ubi_fastmap_layout *fm)
1489 {
1490 int i;
1491
1492 if (!fm)
1493 return;
1494
1495 for (i = 0; i < fm->used_blocks; i++) {
1496 if (fm->e[i]) {
1497 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1498 fm->to_be_tortured[i]);
1499 fm->e[i] = NULL;
1500 }
1501 }
1502 }
1503
1504 /**
1505 * ubi_update_fastmap - will be called by UBI if a volume changes or
1506 * a fastmap pool becomes full.
1507 * @ubi: UBI device object
1508 *
1509 * Returns 0 on success, < 0 indicates an internal error.
1510 */
ubi_update_fastmap(struct ubi_device * ubi)1511 int ubi_update_fastmap(struct ubi_device *ubi)
1512 {
1513 int ret, i, j;
1514 struct ubi_fastmap_layout *new_fm, *old_fm;
1515 struct ubi_wl_entry *tmp_e;
1516
1517 down_write(&ubi->fm_protect);
1518 down_write(&ubi->work_sem);
1519 down_write(&ubi->fm_eba_sem);
1520
1521 ubi_refill_pools(ubi);
1522
1523 if (ubi->ro_mode || ubi->fm_disabled) {
1524 up_write(&ubi->fm_eba_sem);
1525 up_write(&ubi->work_sem);
1526 up_write(&ubi->fm_protect);
1527 return 0;
1528 }
1529
1530 ret = ubi_ensure_anchor_pebs(ubi);
1531 if (ret) {
1532 up_write(&ubi->fm_eba_sem);
1533 up_write(&ubi->work_sem);
1534 up_write(&ubi->fm_protect);
1535 return ret;
1536 }
1537
1538 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1539 if (!new_fm) {
1540 up_write(&ubi->fm_eba_sem);
1541 up_write(&ubi->work_sem);
1542 up_write(&ubi->fm_protect);
1543 return -ENOMEM;
1544 }
1545
1546 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1547 old_fm = ubi->fm;
1548 ubi->fm = NULL;
1549
1550 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1551 ubi_err(ubi, "fastmap too large");
1552 ret = -ENOSPC;
1553 goto err;
1554 }
1555
1556 for (i = 1; i < new_fm->used_blocks; i++) {
1557 spin_lock(&ubi->wl_lock);
1558 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1559 spin_unlock(&ubi->wl_lock);
1560
1561 if (!tmp_e) {
1562 if (old_fm && old_fm->e[i]) {
1563 ret = erase_block(ubi, old_fm->e[i]->pnum);
1564 if (ret < 0) {
1565 ubi_err(ubi, "could not erase old fastmap PEB");
1566
1567 for (j = 1; j < i; j++) {
1568 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1569 j, 0);
1570 new_fm->e[j] = NULL;
1571 }
1572 goto err;
1573 }
1574 new_fm->e[i] = old_fm->e[i];
1575 old_fm->e[i] = NULL;
1576 } else {
1577 ubi_err(ubi, "could not get any free erase block");
1578
1579 for (j = 1; j < i; j++) {
1580 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1581 new_fm->e[j] = NULL;
1582 }
1583
1584 ret = -ENOSPC;
1585 goto err;
1586 }
1587 } else {
1588 new_fm->e[i] = tmp_e;
1589
1590 if (old_fm && old_fm->e[i]) {
1591 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1592 old_fm->to_be_tortured[i]);
1593 old_fm->e[i] = NULL;
1594 }
1595 }
1596 }
1597
1598 /* Old fastmap is larger than the new one */
1599 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1600 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1601 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1602 old_fm->to_be_tortured[i]);
1603 old_fm->e[i] = NULL;
1604 }
1605 }
1606
1607 spin_lock(&ubi->wl_lock);
1608 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1609 spin_unlock(&ubi->wl_lock);
1610
1611 if (old_fm) {
1612 /* no fresh anchor PEB was found, reuse the old one */
1613 if (!tmp_e) {
1614 ret = erase_block(ubi, old_fm->e[0]->pnum);
1615 if (ret < 0) {
1616 ubi_err(ubi, "could not erase old anchor PEB");
1617
1618 for (i = 1; i < new_fm->used_blocks; i++) {
1619 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1620 i, 0);
1621 new_fm->e[i] = NULL;
1622 }
1623 goto err;
1624 }
1625 new_fm->e[0] = old_fm->e[0];
1626 new_fm->e[0]->ec = ret;
1627 old_fm->e[0] = NULL;
1628 } else {
1629 /* we've got a new anchor PEB, return the old one */
1630 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1631 old_fm->to_be_tortured[0]);
1632 new_fm->e[0] = tmp_e;
1633 old_fm->e[0] = NULL;
1634 }
1635 } else {
1636 if (!tmp_e) {
1637 ubi_err(ubi, "could not find any anchor PEB");
1638
1639 for (i = 1; i < new_fm->used_blocks; i++) {
1640 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1641 new_fm->e[i] = NULL;
1642 }
1643
1644 ret = -ENOSPC;
1645 goto err;
1646 }
1647 new_fm->e[0] = tmp_e;
1648 }
1649
1650 ret = ubi_write_fastmap(ubi, new_fm);
1651
1652 if (ret)
1653 goto err;
1654
1655 out_unlock:
1656 up_write(&ubi->fm_eba_sem);
1657 up_write(&ubi->work_sem);
1658 up_write(&ubi->fm_protect);
1659 kfree(old_fm);
1660 return ret;
1661
1662 err:
1663 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1664
1665 ret = invalidate_fastmap(ubi);
1666 if (ret < 0) {
1667 ubi_err(ubi, "Unable to invalidiate current fastmap!");
1668 ubi_ro_mode(ubi);
1669 } else {
1670 return_fm_pebs(ubi, old_fm);
1671 return_fm_pebs(ubi, new_fm);
1672 ret = 0;
1673 }
1674
1675 kfree(new_fm);
1676 goto out_unlock;
1677 }
1678