Lines Matching full:ubi
9 * UBI wear-leveling sub-system.
23 * done asynchronously in context of the per-UBI device background thread,
37 * As it was said, for the UBI sub-system all physical eraseblocks are either
92 #include "ubi.h"
125 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
126 static int self_check_in_wl_tree(const struct ubi_device *ubi,
128 static int self_check_in_pq(const struct ubi_device *ubi,
137 * the @ubi->used and @ubi->free RB-trees.
169 * @ubi: UBI device description object
175 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e) in wl_entry_destroy() argument
177 ubi->lookuptbl[e->pnum] = NULL; in wl_entry_destroy()
183 * @ubi: UBI device description object
188 static int do_work(struct ubi_device *ubi) in do_work() argument
196 * @ubi->work_sem is used to synchronize with the workers. Workers take in do_work()
201 down_read(&ubi->work_sem); in do_work()
202 spin_lock(&ubi->wl_lock); in do_work()
203 if (list_empty(&ubi->works)) { in do_work()
204 spin_unlock(&ubi->wl_lock); in do_work()
205 up_read(&ubi->work_sem); in do_work()
209 wrk = list_entry(ubi->works.next, struct ubi_work, list); in do_work()
211 ubi->works_count -= 1; in do_work()
212 ubi_assert(ubi->works_count >= 0); in do_work()
213 spin_unlock(&ubi->wl_lock); in do_work()
220 err = wrk->func(ubi, wrk, 0); in do_work()
222 ubi_err(ubi, "work failed with error code %d", err); in do_work()
223 up_read(&ubi->work_sem); in do_work()
269 * @ubi: UBI device description object
275 static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e) in in_pq() argument
281 list_for_each_entry(p, &ubi->pq[i], u.list) in in_pq()
290 * @ubi: UBI device description object
293 * This function adds @e to the tail of the protection queue @ubi->pq, where
298 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) in prot_queue_add() argument
300 int pq_tail = ubi->pq_head - 1; in prot_queue_add()
305 list_add_tail(&e->u.list, &ubi->pq[pq_tail]); in prot_queue_add()
311 * @ubi: UBI device description object
318 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, in find_wl_entry() argument
346 * @ubi: UBI device description object
353 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, in find_mean_wl_entry() argument
367 e = may_reserve_for_fm(ubi, e, root); in find_mean_wl_entry()
369 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); in find_mean_wl_entry()
377 * @ubi: UBI device description object
382 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi) in wl_get_wle() argument
386 e = find_mean_wl_entry(ubi, &ubi->free); in wl_get_wle()
388 ubi_err(ubi, "no free eraseblocks"); in wl_get_wle()
392 self_check_in_wl_tree(ubi, e, &ubi->free); in wl_get_wle()
398 rb_erase(&e->u.rb, &ubi->free); in wl_get_wle()
399 ubi->free_count--; in wl_get_wle()
407 * @ubi: UBI device description object
413 static int prot_queue_del(struct ubi_device *ubi, int pnum) in prot_queue_del() argument
417 e = ubi->lookuptbl[pnum]; in prot_queue_del()
421 if (self_check_in_pq(ubi, e)) in prot_queue_del()
431 * @ubi: UBI device description object
438 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in sync_erase() argument
447 err = self_check_ec(ubi, e->pnum, e->ec); in sync_erase()
451 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); in sync_erase()
455 err = ubi_io_sync_erase(ubi, e->pnum, torture); in sync_erase()
462 * Erase counter overflow. Upgrade UBI and use 64-bit in sync_erase()
465 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu", in sync_erase()
475 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); in sync_erase()
480 spin_lock(&ubi->wl_lock); in sync_erase()
481 if (e->ec > ubi->max_ec) in sync_erase()
482 ubi->max_ec = e->ec; in sync_erase()
483 spin_unlock(&ubi->wl_lock); in sync_erase()
492 * @ubi: UBI device description object
498 static void serve_prot_queue(struct ubi_device *ubi) in serve_prot_queue() argument
509 spin_lock(&ubi->wl_lock); in serve_prot_queue()
510 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { in serve_prot_queue()
515 wl_tree_add(e, &ubi->used); in serve_prot_queue()
521 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
527 ubi->pq_head += 1; in serve_prot_queue()
528 if (ubi->pq_head == UBI_PROT_QUEUE_LEN) in serve_prot_queue()
529 ubi->pq_head = 0; in serve_prot_queue()
530 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); in serve_prot_queue()
531 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
536 * @ubi: UBI device description object
540 * list. Can only be used if ubi->work_sem is already held in read mode!
542 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in __schedule_ubi_work() argument
544 spin_lock(&ubi->wl_lock); in __schedule_ubi_work()
545 list_add_tail(&wrk->list, &ubi->works); in __schedule_ubi_work()
546 ubi_assert(ubi->works_count >= 0); in __schedule_ubi_work()
547 ubi->works_count += 1; in __schedule_ubi_work()
548 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) in __schedule_ubi_work()
549 wake_up_process(ubi->bgt_thread); in __schedule_ubi_work()
550 spin_unlock(&ubi->wl_lock); in __schedule_ubi_work()
555 * @ubi: UBI device description object
561 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in schedule_ubi_work() argument
563 down_read(&ubi->work_sem); in schedule_ubi_work()
564 __schedule_ubi_work(ubi, wrk); in schedule_ubi_work()
565 up_read(&ubi->work_sem); in schedule_ubi_work()
568 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
573 * @ubi: UBI device description object
583 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in schedule_erase() argument
604 __schedule_ubi_work(ubi, wl_wrk); in schedule_erase()
606 schedule_ubi_work(ubi, wl_wrk); in schedule_erase()
610 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
613 * @ubi: UBI device description object
620 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in do_sync_erase() argument
632 return __erase_worker(ubi, &wl_wrk); in do_sync_erase()
635 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
638 * @ubi: UBI device description object
647 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, argument
661 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
667 down_read(&ubi->fm_eba_sem);
668 mutex_lock(&ubi->move_mutex);
669 spin_lock(&ubi->wl_lock);
670 ubi_assert(!ubi->move_from && !ubi->move_to);
671 ubi_assert(!ubi->move_to_put);
673 if (!ubi->free.rb_node ||
674 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
682 * @ubi->used tree later and the wear-leveling will be
686 !ubi->free.rb_node, !ubi->used.rb_node);
691 e1 = find_anchor_wl_entry(&ubi->used);
692 if (e1 && ubi->fm_anchor &&
693 (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
694 ubi->fm_do_produce_anchor = 1;
700 wl_tree_add(ubi->fm_anchor, &ubi->free);
701 ubi->fm_anchor = NULL;
702 ubi->free_count++;
705 if (ubi->fm_do_produce_anchor) {
708 e2 = get_peb_for_wl(ubi);
712 self_check_in_wl_tree(ubi, e1, &ubi->used);
713 rb_erase(&e1->u.rb, &ubi->used);
715 ubi->fm_do_produce_anchor = 0;
716 } else if (!ubi->scrub.rb_node) {
718 if (!ubi->scrub.rb_node) {
725 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
726 e2 = get_peb_for_wl(ubi);
735 wl_tree_add(e2, &ubi->free);
736 ubi->free_count++;
739 self_check_in_wl_tree(ubi, e1, &ubi->used);
740 rb_erase(&e1->u.rb, &ubi->used);
746 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
747 e2 = get_peb_for_wl(ubi);
751 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
752 rb_erase(&e1->u.rb, &ubi->scrub);
756 ubi->move_from = e1;
757 ubi->move_to = e2;
758 spin_unlock(&ubi->wl_lock);
771 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
776 * We are trying to move PEB without a VID header. UBI
798 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
810 ubi_err(ubi, "error %d while reading VID header from PEB %d",
818 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
853 * put this PEB to the @ubi->erroneous list to prevent
854 * UBI from trying to move it over and over again.
856 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
857 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
858 ubi->erroneous_peb_count);
874 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
878 spin_lock(&ubi->wl_lock);
879 if (!ubi->move_to_put) {
880 wl_tree_add(e2, &ubi->used);
883 ubi->move_from = ubi->move_to = NULL;
884 ubi->move_to_put = ubi->wl_scheduled = 0;
885 spin_unlock(&ubi->wl_lock);
887 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
890 spin_lock(&ubi->wl_lock);
891 wl_entry_destroy(ubi, e2);
892 spin_unlock(&ubi->wl_lock);
904 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
910 mutex_unlock(&ubi->move_mutex);
911 up_read(&ubi->fm_eba_sem);
926 spin_lock(&ubi->wl_lock);
928 prot_queue_add(ubi, e1);
930 wl_tree_add(e1, &ubi->erroneous);
931 ubi->erroneous_peb_count += 1;
933 wl_tree_add(e1, &ubi->scrub);
935 wl_tree_add(e1, &ubi->used);
937 wl_tree_add(e2, &ubi->free);
938 ubi->free_count++;
941 ubi_assert(!ubi->move_to_put);
942 ubi->move_from = ubi->move_to = NULL;
943 ubi->wl_scheduled = 0;
944 spin_unlock(&ubi->wl_lock);
948 ensure_wear_leveling(ubi, 1);
950 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
956 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
961 mutex_unlock(&ubi->move_mutex);
962 up_read(&ubi->fm_eba_sem);
967 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
970 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
972 spin_lock(&ubi->wl_lock);
973 ubi->move_from = ubi->move_to = NULL;
974 ubi->move_to_put = ubi->wl_scheduled = 0;
975 wl_entry_destroy(ubi, e1);
976 wl_entry_destroy(ubi, e2);
977 spin_unlock(&ubi->wl_lock);
982 ubi_ro_mode(ubi);
983 mutex_unlock(&ubi->move_mutex);
984 up_read(&ubi->fm_eba_sem);
989 ubi->wl_scheduled = 0;
990 spin_unlock(&ubi->wl_lock);
991 mutex_unlock(&ubi->move_mutex);
992 up_read(&ubi->fm_eba_sem);
999 * @ubi: UBI device description object
1000 * @nested: set to non-zero if this function is called from UBI worker
1006 static int ensure_wear_leveling(struct ubi_device *ubi, int nested) argument
1013 spin_lock(&ubi->wl_lock);
1014 if (ubi->wl_scheduled)
1019 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1022 if (!ubi->scrub.rb_node) {
1023 if (!ubi->used.rb_node || !ubi->free.rb_node)
1033 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1034 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1042 ubi->wl_scheduled = 1;
1043 spin_unlock(&ubi->wl_lock);
1053 __schedule_ubi_work(ubi, wrk);
1055 schedule_ubi_work(ubi, wrk);
1059 spin_lock(&ubi->wl_lock);
1060 ubi->wl_scheduled = 0;
1062 spin_unlock(&ubi->wl_lock);
1068 * @ubi: UBI device description object
1076 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) argument
1087 err = sync_erase(ubi, e, wl_wrk->torture);
1089 spin_lock(&ubi->wl_lock);
1091 if (!ubi->fm_disabled && !ubi->fm_anchor &&
1097 ubi->fm_anchor = e;
1098 ubi->fm_do_produce_anchor = 0;
1100 wl_tree_add(e, &ubi->free);
1101 ubi->free_count++;
1104 spin_unlock(&ubi->wl_lock);
1110 serve_prot_queue(ubi);
1113 err = ensure_wear_leveling(ubi, 1);
1117 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1124 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, true);
1126 spin_lock(&ubi->wl_lock);
1127 wl_entry_destroy(ubi, e);
1128 spin_unlock(&ubi->wl_lock);
1135 spin_lock(&ubi->wl_lock);
1136 wl_entry_destroy(ubi, e);
1137 spin_unlock(&ubi->wl_lock);
1148 if (!ubi->bad_allowed) {
1149 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1153 spin_lock(&ubi->volumes_lock);
1154 if (ubi->beb_rsvd_pebs == 0) {
1155 if (ubi->avail_pebs == 0) {
1156 spin_unlock(&ubi->volumes_lock);
1157 ubi_err(ubi, "no reserved/available physical eraseblocks");
1160 ubi->avail_pebs -= 1;
1163 spin_unlock(&ubi->volumes_lock);
1165 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1166 err = ubi_io_mark_bad(ubi, pnum);
1170 spin_lock(&ubi->volumes_lock);
1171 if (ubi->beb_rsvd_pebs > 0) {
1177 ubi->avail_pebs += 1;
1180 ubi->beb_rsvd_pebs -= 1;
1182 ubi->bad_peb_count += 1;
1183 ubi->good_peb_count -= 1;
1184 ubi_calculate_reserved(ubi);
1186 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1187 else if (ubi->beb_rsvd_pebs)
1188 ubi_msg(ubi, "%d PEBs left in the reserve",
1189 ubi->beb_rsvd_pebs);
1191 ubi_warn(ubi, "last PEB from the reserve was used");
1192 spin_unlock(&ubi->volumes_lock);
1198 spin_lock(&ubi->volumes_lock);
1199 ubi->avail_pebs += 1;
1200 spin_unlock(&ubi->volumes_lock);
1202 ubi_ro_mode(ubi);
1206 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, argument
1216 wl_entry_destroy(ubi, e);
1220 ret = __erase_worker(ubi, wl_wrk);
1227 * @ubi: UBI device description object
1238 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, argument
1246 ubi_assert(pnum < ubi->peb_count);
1248 down_read(&ubi->fm_protect);
1251 spin_lock(&ubi->wl_lock);
1252 e = ubi->lookuptbl[pnum];
1258 * ubi_wl_put_peb) will set ubi ro_mode at the same time,
1261 spin_unlock(&ubi->wl_lock);
1262 up_read(&ubi->fm_protect);
1265 if (e == ubi->move_from) {
1272 spin_unlock(&ubi->wl_lock);
1274 /* Wait for the WL worker by taking the @ubi->move_mutex */
1275 mutex_lock(&ubi->move_mutex);
1276 mutex_unlock(&ubi->move_mutex);
1278 } else if (e == ubi->move_to) {
1289 ubi_assert(!ubi->move_to_put);
1290 ubi->move_to_put = 1;
1291 spin_unlock(&ubi->wl_lock);
1292 up_read(&ubi->fm_protect);
1295 if (in_wl_tree(e, &ubi->used)) {
1296 self_check_in_wl_tree(ubi, e, &ubi->used);
1297 rb_erase(&e->u.rb, &ubi->used);
1298 } else if (in_wl_tree(e, &ubi->scrub)) {
1299 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1300 rb_erase(&e->u.rb, &ubi->scrub);
1301 } else if (in_wl_tree(e, &ubi->erroneous)) {
1302 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1303 rb_erase(&e->u.rb, &ubi->erroneous);
1304 ubi->erroneous_peb_count -= 1;
1305 ubi_assert(ubi->erroneous_peb_count >= 0);
1309 err = prot_queue_del(ubi, e->pnum);
1311 ubi_err(ubi, "PEB %d not found", pnum);
1312 ubi_ro_mode(ubi);
1313 spin_unlock(&ubi->wl_lock);
1314 up_read(&ubi->fm_protect);
1319 spin_unlock(&ubi->wl_lock);
1321 err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1323 spin_lock(&ubi->wl_lock);
1324 wl_tree_add(e, &ubi->used);
1325 spin_unlock(&ubi->wl_lock);
1328 up_read(&ubi->fm_protect);
1334 * @ubi: UBI device description object
1342 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) argument
1346 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1349 spin_lock(&ubi->wl_lock);
1350 e = ubi->lookuptbl[pnum];
1351 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1352 in_wl_tree(e, &ubi->erroneous)) {
1353 spin_unlock(&ubi->wl_lock);
1357 if (e == ubi->move_to) {
1364 spin_unlock(&ubi->wl_lock);
1370 if (in_wl_tree(e, &ubi->used)) {
1371 self_check_in_wl_tree(ubi, e, &ubi->used);
1372 rb_erase(&e->u.rb, &ubi->used);
1376 err = prot_queue_del(ubi, e->pnum);
1378 ubi_err(ubi, "PEB %d not found", pnum);
1379 ubi_ro_mode(ubi);
1380 spin_unlock(&ubi->wl_lock);
1385 wl_tree_add(e, &ubi->scrub);
1386 spin_unlock(&ubi->wl_lock);
1392 return ensure_wear_leveling(ubi, 0);
1397 * @ubi: UBI device description object
1407 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) argument
1417 vol_id, lnum, ubi->works_count);
1423 down_read(&ubi->work_sem);
1424 spin_lock(&ubi->wl_lock);
1425 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1429 ubi->works_count -= 1;
1430 ubi_assert(ubi->works_count >= 0);
1431 spin_unlock(&ubi->wl_lock);
1433 err = wrk->func(ubi, wrk, 0);
1435 up_read(&ubi->work_sem);
1439 spin_lock(&ubi->wl_lock);
1444 spin_unlock(&ubi->wl_lock);
1445 up_read(&ubi->work_sem);
1452 down_write(&ubi->work_sem);
1453 up_write(&ubi->work_sem);
1458 static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e) argument
1460 if (in_wl_tree(e, &ubi->scrub))
1462 else if (in_wl_tree(e, &ubi->erroneous))
1464 else if (ubi->move_from == e)
1466 else if (ubi->move_to == e)
1474 * @ubi: UBI device description object
1485 * %ENOENT, PEB is no longer used by UBI
1491 int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force) argument
1496 if (pnum < 0 || pnum >= ubi->peb_count) {
1505 down_write(&ubi->work_sem);
1511 spin_lock(&ubi->wl_lock);
1512 e = ubi->lookuptbl[pnum];
1514 spin_unlock(&ubi->wl_lock);
1522 if (!scrub_possible(ubi, e)) {
1523 spin_unlock(&ubi->wl_lock);
1527 spin_unlock(&ubi->wl_lock);
1530 mutex_lock(&ubi->buf_mutex);
1531 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1532 mutex_unlock(&ubi->buf_mutex);
1539 spin_lock(&ubi->wl_lock);
1542 * Recheck. We released wl_lock, UBI might have killed the
1545 e = ubi->lookuptbl[pnum];
1547 spin_unlock(&ubi->wl_lock);
1555 if (!scrub_possible(ubi, e)) {
1556 spin_unlock(&ubi->wl_lock);
1561 if (in_pq(ubi, e)) {
1562 prot_queue_del(ubi, e->pnum);
1563 wl_tree_add(e, &ubi->scrub);
1564 spin_unlock(&ubi->wl_lock);
1566 err = ensure_wear_leveling(ubi, 1);
1567 } else if (in_wl_tree(e, &ubi->used)) {
1568 rb_erase(&e->u.rb, &ubi->used);
1569 wl_tree_add(e, &ubi->scrub);
1570 spin_unlock(&ubi->wl_lock);
1572 err = ensure_wear_leveling(ubi, 1);
1573 } else if (in_wl_tree(e, &ubi->free)) {
1574 rb_erase(&e->u.rb, &ubi->free);
1575 ubi->free_count--;
1576 spin_unlock(&ubi->wl_lock);
1582 err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1585 spin_unlock(&ubi->wl_lock);
1596 up_write(&ubi->work_sem);
1604 * @ubi: UBI device description object
1607 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root) argument
1629 wl_entry_destroy(ubi, e);
1635 * ubi_thread - UBI background thread.
1636 * @u: the UBI device description object pointer
1641 struct ubi_device *ubi = u; local
1643 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1644 ubi->bgt_name, task_pid_nr(current));
1656 spin_lock(&ubi->wl_lock);
1657 if (list_empty(&ubi->works) || ubi->ro_mode ||
1658 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1660 spin_unlock(&ubi->wl_lock);
1677 spin_unlock(&ubi->wl_lock);
1679 err = do_work(ubi);
1681 ubi_err(ubi, "%s: work failed with error code %d",
1682 ubi->bgt_name, err);
1688 ubi_msg(ubi, "%s: %d consecutive failures",
1689 ubi->bgt_name, WL_MAX_FAILURES);
1690 ubi_ro_mode(ubi);
1691 ubi->thread_enabled = 0;
1700 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1701 ubi->thread_enabled = 0;
1707 * @ubi: UBI device description object
1709 static void shutdown_work(struct ubi_device *ubi) argument
1711 while (!list_empty(&ubi->works)) {
1714 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1716 wrk->func(ubi, wrk, 1);
1717 ubi->works_count -= 1;
1718 ubi_assert(ubi->works_count >= 0);
1723 * erase_aeb - erase a PEB given in UBI attach info PEB
1724 * @ubi: UBI device description object
1725 * @aeb: UBI attach info PEB
1728 static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync) argument
1739 ubi->lookuptbl[e->pnum] = e;
1742 err = sync_erase(ubi, e, false);
1746 wl_tree_add(e, &ubi->free);
1747 ubi->free_count++;
1749 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1757 wl_entry_destroy(ubi, e);
1764 * @ubi: UBI device description object
1770 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) argument
1778 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1779 spin_lock_init(&ubi->wl_lock);
1780 mutex_init(&ubi->move_mutex);
1781 init_rwsem(&ubi->work_sem);
1782 ubi->max_ec = ai->max_ec;
1783 INIT_LIST_HEAD(&ubi->works);
1785 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1788 ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1789 if (!ubi->lookuptbl)
1793 INIT_LIST_HEAD(&ubi->pq[i]);
1794 ubi->pq_head = 0;
1796 ubi->free_count = 0;
1800 err = erase_aeb(ubi, aeb, false);
1820 wl_tree_add(e, &ubi->free);
1821 ubi->free_count++;
1823 ubi->lookuptbl[e->pnum] = e;
1840 ubi->lookuptbl[e->pnum] = e;
1845 wl_tree_add(e, &ubi->used);
1849 wl_tree_add(e, &ubi->scrub);
1859 e = ubi_find_fm_block(ubi, aeb->pnum);
1862 ubi_assert(!ubi->lookuptbl[e->pnum]);
1863 ubi->lookuptbl[e->pnum] = e;
1873 if (ubi->lookuptbl[aeb->pnum])
1888 err = erase_aeb(ubi, aeb, sync);
1898 ubi_assert(ubi->good_peb_count == found_pebs);
1901 ubi_fastmap_init(ubi, &reserved_pebs);
1903 if (ubi->avail_pebs < reserved_pebs) {
1904 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1905 ubi->avail_pebs, reserved_pebs);
1906 if (ubi->corr_peb_count)
1907 ubi_err(ubi, "%d PEBs are corrupted and not used",
1908 ubi->corr_peb_count);
1912 ubi->avail_pebs -= reserved_pebs;
1913 ubi->rsvd_pebs += reserved_pebs;
1916 err = ensure_wear_leveling(ubi, 0);
1921 if (!ubi->ro_mode && !ubi->fm_disabled)
1922 ubi_ensure_anchor_pebs(ubi);
1927 shutdown_work(ubi);
1928 tree_destroy(ubi, &ubi->used);
1929 tree_destroy(ubi, &ubi->free);
1930 tree_destroy(ubi, &ubi->scrub);
1931 kfree(ubi->lookuptbl);
1937 * @ubi: UBI device description object
1939 static void protection_queue_destroy(struct ubi_device *ubi) argument
1945 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1947 wl_entry_destroy(ubi, e);
1954 * @ubi: UBI device description object
1956 void ubi_wl_close(struct ubi_device *ubi) argument
1959 ubi_fastmap_close(ubi);
1960 shutdown_work(ubi);
1961 protection_queue_destroy(ubi);
1962 tree_destroy(ubi, &ubi->used);
1963 tree_destroy(ubi, &ubi->erroneous);
1964 tree_destroy(ubi, &ubi->free);
1965 tree_destroy(ubi, &ubi->scrub);
1966 kfree(ubi->lookuptbl);
1971 * @ubi: UBI device description object
1979 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) argument
1985 if (!ubi_dbg_chk_gen(ubi))
1988 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1992 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
2001 ubi_err(ubi, "self-check failed for PEB %d", pnum);
2002 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
2015 * @ubi: UBI device description object
2022 static int self_check_in_wl_tree(const struct ubi_device *ubi, argument
2025 if (!ubi_dbg_chk_gen(ubi))
2031 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
2040 * @ubi: UBI device description object
2043 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2045 static int self_check_in_pq(const struct ubi_device *ubi, argument
2048 if (!ubi_dbg_chk_gen(ubi))
2051 if (in_pq(ubi, e))
2054 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2060 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) argument
2064 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
2065 self_check_in_wl_tree(ubi, e, &ubi->free);
2066 ubi->free_count--;
2067 ubi_assert(ubi->free_count >= 0);
2068 rb_erase(&e->u.rb, &ubi->free);
2075 * @ubi: UBI device description object
2082 static int produce_free_peb(struct ubi_device *ubi) argument
2086 while (!ubi->free.rb_node && ubi->works_count) {
2087 spin_unlock(&ubi->wl_lock);
2090 err = do_work(ubi);
2092 spin_lock(&ubi->wl_lock);
2102 * @ubi: UBI device description object
2106 * Returns with ubi->fm_eba_sem held in read mode!
2108 int ubi_wl_get_peb(struct ubi_device *ubi) argument
2114 down_read(&ubi->fm_eba_sem);
2115 spin_lock(&ubi->wl_lock);
2116 if (!ubi->free.rb_node) {
2117 if (ubi->works_count == 0) {
2118 ubi_err(ubi, "no free eraseblocks");
2119 ubi_assert(list_empty(&ubi->works));
2120 spin_unlock(&ubi->wl_lock);
2124 err = produce_free_peb(ubi);
2126 spin_unlock(&ubi->wl_lock);
2129 spin_unlock(&ubi->wl_lock);
2130 up_read(&ubi->fm_eba_sem);
2134 e = wl_get_wle(ubi);
2135 prot_queue_add(ubi, e);
2136 spin_unlock(&ubi->wl_lock);
2138 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2139 ubi->peb_size - ubi->vid_hdr_aloffset);
2141 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);