• Home
  • Raw
  • Download

Lines Matching refs:ubi

137 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
138 static int self_check_in_wl_tree(const struct ubi_device *ubi,
140 static int self_check_in_pq(const struct ubi_device *ubi,
150 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work); in update_fastmap_work_fn() local
151 ubi_update_fastmap(ubi); in update_fastmap_work_fn()
159 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) in ubi_is_fm_block() argument
163 if (!ubi->fm) in ubi_is_fm_block()
166 for (i = 0; i < ubi->fm->used_blocks; i++) in ubi_is_fm_block()
167 if (ubi->fm->e[i]->pnum == pnum) in ubi_is_fm_block()
173 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum) in ubi_is_fm_block() argument
222 static int do_work(struct ubi_device *ubi) in do_work() argument
235 down_read(&ubi->work_sem); in do_work()
236 spin_lock(&ubi->wl_lock); in do_work()
237 if (list_empty(&ubi->works)) { in do_work()
238 spin_unlock(&ubi->wl_lock); in do_work()
239 up_read(&ubi->work_sem); in do_work()
243 wrk = list_entry(ubi->works.next, struct ubi_work, list); in do_work()
245 ubi->works_count -= 1; in do_work()
246 ubi_assert(ubi->works_count >= 0); in do_work()
247 spin_unlock(&ubi->wl_lock); in do_work()
254 err = wrk->func(ubi, wrk, 0); in do_work()
257 up_read(&ubi->work_sem); in do_work()
271 static int produce_free_peb(struct ubi_device *ubi) in produce_free_peb() argument
275 while (!ubi->free.rb_node) { in produce_free_peb()
276 spin_unlock(&ubi->wl_lock); in produce_free_peb()
279 err = do_work(ubi); in produce_free_peb()
281 spin_lock(&ubi->wl_lock); in produce_free_peb()
338 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) in prot_queue_add() argument
340 int pq_tail = ubi->pq_head - 1; in prot_queue_add()
345 list_add_tail(&e->u.list, &ubi->pq[pq_tail]); in prot_queue_add()
358 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, in find_wl_entry() argument
385 if (prev_e && !ubi->fm_disabled && in find_wl_entry()
386 !ubi->fm && e->pnum < UBI_FM_MAX_START) in find_wl_entry()
401 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, in find_mean_wl_entry() argument
416 if (e && !ubi->fm_disabled && !ubi->fm && in find_mean_wl_entry()
422 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); in find_mean_wl_entry()
469 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor) in ubi_wl_get_fm_peb() argument
473 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) in ubi_wl_get_fm_peb()
477 e = find_anchor_wl_entry(&ubi->free); in ubi_wl_get_fm_peb()
479 e = find_mean_wl_entry(ubi, &ubi->free); in ubi_wl_get_fm_peb()
484 self_check_in_wl_tree(ubi, e, &ubi->free); in ubi_wl_get_fm_peb()
488 rb_erase(&e->u.rb, &ubi->free); in ubi_wl_get_fm_peb()
489 ubi->free_count--; in ubi_wl_get_fm_peb()
502 static int __wl_get_peb(struct ubi_device *ubi) in __wl_get_peb() argument
508 if (!ubi->free.rb_node) { in __wl_get_peb()
509 if (ubi->works_count == 0) { in __wl_get_peb()
511 ubi_assert(list_empty(&ubi->works)); in __wl_get_peb()
515 err = produce_free_peb(ubi); in __wl_get_peb()
521 e = find_mean_wl_entry(ubi, &ubi->free); in __wl_get_peb()
527 self_check_in_wl_tree(ubi, e, &ubi->free); in __wl_get_peb()
533 rb_erase(&e->u.rb, &ubi->free); in __wl_get_peb()
534 ubi->free_count--; in __wl_get_peb()
540 prot_queue_add(ubi, e); in __wl_get_peb()
551 static void return_unused_pool_pebs(struct ubi_device *ubi, in return_unused_pool_pebs() argument
558 e = ubi->lookuptbl[pool->pebs[i]]; in return_unused_pool_pebs()
559 wl_tree_add(e, &ubi->free); in return_unused_pool_pebs()
560 ubi->free_count++; in return_unused_pool_pebs()
569 static void refill_wl_pool(struct ubi_device *ubi) in refill_wl_pool() argument
572 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; in refill_wl_pool()
574 return_unused_pool_pebs(ubi, pool); in refill_wl_pool()
577 if (!ubi->free.rb_node || in refill_wl_pool()
578 (ubi->free_count - ubi->beb_rsvd_pebs < 5)) in refill_wl_pool()
581 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); in refill_wl_pool()
582 self_check_in_wl_tree(ubi, e, &ubi->free); in refill_wl_pool()
583 rb_erase(&e->u.rb, &ubi->free); in refill_wl_pool()
584 ubi->free_count--; in refill_wl_pool()
595 static void refill_wl_user_pool(struct ubi_device *ubi) in refill_wl_user_pool() argument
597 struct ubi_fm_pool *pool = &ubi->fm_pool; in refill_wl_user_pool()
599 return_unused_pool_pebs(ubi, pool); in refill_wl_user_pool()
602 if (!ubi->free.rb_node || in refill_wl_user_pool()
603 (ubi->free_count - ubi->beb_rsvd_pebs < 1)) in refill_wl_user_pool()
606 pool->pebs[pool->size] = __wl_get_peb(ubi); in refill_wl_user_pool()
617 void ubi_refill_pools(struct ubi_device *ubi) in ubi_refill_pools() argument
619 spin_lock(&ubi->wl_lock); in ubi_refill_pools()
620 refill_wl_pool(ubi); in ubi_refill_pools()
621 refill_wl_user_pool(ubi); in ubi_refill_pools()
622 spin_unlock(&ubi->wl_lock); in ubi_refill_pools()
628 int ubi_wl_get_peb(struct ubi_device *ubi) in ubi_wl_get_peb() argument
631 struct ubi_fm_pool *pool = &ubi->fm_pool; in ubi_wl_get_peb()
632 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool; in ubi_wl_get_peb()
636 ubi_update_fastmap(ubi); in ubi_wl_get_peb()
642 spin_lock(&ubi->wl_lock); in ubi_wl_get_peb()
644 prot_queue_add(ubi, ubi->lookuptbl[ret]); in ubi_wl_get_peb()
645 spin_unlock(&ubi->wl_lock); in ubi_wl_get_peb()
655 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) in get_peb_for_wl() argument
657 struct ubi_fm_pool *pool = &ubi->fm_wl_pool; in get_peb_for_wl()
664 schedule_work(&ubi->fm_work); in get_peb_for_wl()
668 return ubi->lookuptbl[pnum]; in get_peb_for_wl()
672 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) in get_peb_for_wl() argument
676 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); in get_peb_for_wl()
677 self_check_in_wl_tree(ubi, e, &ubi->free); in get_peb_for_wl()
678 rb_erase(&e->u.rb, &ubi->free); in get_peb_for_wl()
683 int ubi_wl_get_peb(struct ubi_device *ubi) in ubi_wl_get_peb() argument
687 spin_lock(&ubi->wl_lock); in ubi_wl_get_peb()
688 peb = __wl_get_peb(ubi); in ubi_wl_get_peb()
689 spin_unlock(&ubi->wl_lock); in ubi_wl_get_peb()
691 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset, in ubi_wl_get_peb()
692 ubi->peb_size - ubi->vid_hdr_aloffset); in ubi_wl_get_peb()
710 static int prot_queue_del(struct ubi_device *ubi, int pnum) in prot_queue_del() argument
714 e = ubi->lookuptbl[pnum]; in prot_queue_del()
718 if (self_check_in_pq(ubi, e)) in prot_queue_del()
735 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in sync_erase() argument
744 err = self_check_ec(ubi, e->pnum, e->ec); in sync_erase()
748 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); in sync_erase()
752 err = ubi_io_sync_erase(ubi, e->pnum, torture); in sync_erase()
772 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); in sync_erase()
777 spin_lock(&ubi->wl_lock); in sync_erase()
778 if (e->ec > ubi->max_ec) in sync_erase()
779 ubi->max_ec = e->ec; in sync_erase()
780 spin_unlock(&ubi->wl_lock); in sync_erase()
795 static void serve_prot_queue(struct ubi_device *ubi) in serve_prot_queue() argument
806 spin_lock(&ubi->wl_lock); in serve_prot_queue()
807 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { in serve_prot_queue()
812 wl_tree_add(e, &ubi->used); in serve_prot_queue()
818 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
824 ubi->pq_head += 1; in serve_prot_queue()
825 if (ubi->pq_head == UBI_PROT_QUEUE_LEN) in serve_prot_queue()
826 ubi->pq_head = 0; in serve_prot_queue()
827 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); in serve_prot_queue()
828 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
839 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in __schedule_ubi_work() argument
841 spin_lock(&ubi->wl_lock); in __schedule_ubi_work()
842 list_add_tail(&wrk->list, &ubi->works); in __schedule_ubi_work()
843 ubi_assert(ubi->works_count >= 0); in __schedule_ubi_work()
844 ubi->works_count += 1; in __schedule_ubi_work()
845 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) in __schedule_ubi_work()
846 wake_up_process(ubi->bgt_thread); in __schedule_ubi_work()
847 spin_unlock(&ubi->wl_lock); in __schedule_ubi_work()
858 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in schedule_ubi_work() argument
860 down_read(&ubi->work_sem); in schedule_ubi_work()
861 __schedule_ubi_work(ubi, wrk); in schedule_ubi_work()
862 up_read(&ubi->work_sem); in schedule_ubi_work()
865 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
890 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in schedule_erase() argument
896 ubi_assert(!ubi_is_fm_block(ubi, e->pnum)); in schedule_erase()
911 schedule_ubi_work(ubi, wl_wrk); in schedule_erase()
924 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in do_sync_erase() argument
940 return erase_worker(ubi, wl_wrk, 0); in do_sync_erase()
954 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e, in ubi_wl_put_fm_peb() argument
963 ubi_assert(pnum < ubi->peb_count); in ubi_wl_put_fm_peb()
965 spin_lock(&ubi->wl_lock); in ubi_wl_put_fm_peb()
966 e = ubi->lookuptbl[pnum]; in ubi_wl_put_fm_peb()
975 ubi->lookuptbl[pnum] = e; in ubi_wl_put_fm_peb()
981 spin_unlock(&ubi->wl_lock); in ubi_wl_put_fm_peb()
984 return schedule_erase(ubi, e, vol_id, lnum, torture); in ubi_wl_put_fm_peb()
998 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, argument
1013 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1017 mutex_lock(&ubi->move_mutex);
1018 spin_lock(&ubi->wl_lock);
1019 ubi_assert(!ubi->move_from && !ubi->move_to);
1020 ubi_assert(!ubi->move_to_put);
1022 if (!ubi->free.rb_node ||
1023 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
1035 !ubi->free.rb_node, !ubi->used.rb_node);
1042 anchor = !anchor_pebs_avalible(&ubi->free);
1045 e1 = find_anchor_wl_entry(&ubi->used);
1048 e2 = get_peb_for_wl(ubi);
1052 self_check_in_wl_tree(ubi, e1, &ubi->used);
1053 rb_erase(&e1->u.rb, &ubi->used);
1055 } else if (!ubi->scrub.rb_node) {
1057 if (!ubi->scrub.rb_node) {
1064 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1065 e2 = get_peb_for_wl(ubi);
1074 self_check_in_wl_tree(ubi, e1, &ubi->used);
1075 rb_erase(&e1->u.rb, &ubi->used);
1081 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
1082 e2 = get_peb_for_wl(ubi);
1086 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
1087 rb_erase(&e1->u.rb, &ubi->scrub);
1091 ubi->move_from = e1;
1092 ubi->move_to = e2;
1093 spin_unlock(&ubi->wl_lock);
1106 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
1142 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
1177 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
1179 ubi->erroneous_peb_count);
1196 ubi_free_vid_hdr(ubi, vid_hdr);
1198 spin_lock(&ubi->wl_lock);
1199 if (!ubi->move_to_put) {
1200 wl_tree_add(e2, &ubi->used);
1203 ubi->move_from = ubi->move_to = NULL;
1204 ubi->move_to_put = ubi->wl_scheduled = 0;
1205 spin_unlock(&ubi->wl_lock);
1207 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
1222 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
1230 mutex_unlock(&ubi->move_mutex);
1245 spin_lock(&ubi->wl_lock);
1247 prot_queue_add(ubi, e1);
1249 wl_tree_add(e1, &ubi->erroneous);
1250 ubi->erroneous_peb_count += 1;
1252 wl_tree_add(e1, &ubi->scrub);
1254 wl_tree_add(e1, &ubi->used);
1255 ubi_assert(!ubi->move_to_put);
1256 ubi->move_from = ubi->move_to = NULL;
1257 ubi->wl_scheduled = 0;
1258 spin_unlock(&ubi->wl_lock);
1260 ubi_free_vid_hdr(ubi, vid_hdr);
1261 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
1266 mutex_unlock(&ubi->move_mutex);
1276 spin_lock(&ubi->wl_lock);
1277 ubi->move_from = ubi->move_to = NULL;
1278 ubi->move_to_put = ubi->wl_scheduled = 0;
1279 spin_unlock(&ubi->wl_lock);
1281 ubi_free_vid_hdr(ubi, vid_hdr);
1286 ubi_ro_mode(ubi);
1287 mutex_unlock(&ubi->move_mutex);
1292 ubi->wl_scheduled = 0;
1293 spin_unlock(&ubi->wl_lock);
1294 mutex_unlock(&ubi->move_mutex);
1295 ubi_free_vid_hdr(ubi, vid_hdr);
1308 static int ensure_wear_leveling(struct ubi_device *ubi, int nested) argument
1315 spin_lock(&ubi->wl_lock);
1316 if (ubi->wl_scheduled)
1324 if (!ubi->scrub.rb_node) {
1325 if (!ubi->used.rb_node || !ubi->free.rb_node)
1335 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1336 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1344 ubi->wl_scheduled = 1;
1345 spin_unlock(&ubi->wl_lock);
1356 __schedule_ubi_work(ubi, wrk);
1358 schedule_ubi_work(ubi, wrk);
1362 spin_lock(&ubi->wl_lock);
1363 ubi->wl_scheduled = 0;
1365 spin_unlock(&ubi->wl_lock);
1374 int ubi_ensure_anchor_pebs(struct ubi_device *ubi) argument
1378 spin_lock(&ubi->wl_lock);
1379 if (ubi->wl_scheduled) {
1380 spin_unlock(&ubi->wl_lock);
1383 ubi->wl_scheduled = 1;
1384 spin_unlock(&ubi->wl_lock);
1388 spin_lock(&ubi->wl_lock);
1389 ubi->wl_scheduled = 0;
1390 spin_unlock(&ubi->wl_lock);
1396 schedule_ubi_work(ubi, wrk);
1412 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, argument
1431 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1433 err = sync_erase(ubi, e, wl_wrk->torture);
1438 spin_lock(&ubi->wl_lock);
1439 wl_tree_add(e, &ubi->free);
1440 ubi->free_count++;
1441 spin_unlock(&ubi->wl_lock);
1447 serve_prot_queue(ubi);
1450 err = ensure_wear_leveling(ubi, 1);
1462 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1481 if (!ubi->bad_allowed) {
1486 spin_lock(&ubi->volumes_lock);
1487 if (ubi->beb_rsvd_pebs == 0) {
1488 if (ubi->avail_pebs == 0) {
1489 spin_unlock(&ubi->volumes_lock);
1493 ubi->avail_pebs -= 1;
1496 spin_unlock(&ubi->volumes_lock);
1499 err = ubi_io_mark_bad(ubi, pnum);
1503 spin_lock(&ubi->volumes_lock);
1504 if (ubi->beb_rsvd_pebs > 0) {
1510 ubi->avail_pebs += 1;
1513 ubi->beb_rsvd_pebs -= 1;
1515 ubi->bad_peb_count += 1;
1516 ubi->good_peb_count -= 1;
1517 ubi_calculate_reserved(ubi);
1520 else if (ubi->beb_rsvd_pebs)
1521 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1524 spin_unlock(&ubi->volumes_lock);
1530 spin_lock(&ubi->volumes_lock);
1531 ubi->avail_pebs += 1;
1532 spin_unlock(&ubi->volumes_lock);
1534 ubi_ro_mode(ubi);
1551 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, argument
1559 ubi_assert(pnum < ubi->peb_count);
1562 spin_lock(&ubi->wl_lock);
1563 e = ubi->lookuptbl[pnum];
1564 if (e == ubi->move_from) {
1571 spin_unlock(&ubi->wl_lock);
1574 mutex_lock(&ubi->move_mutex);
1575 mutex_unlock(&ubi->move_mutex);
1577 } else if (e == ubi->move_to) {
1588 ubi_assert(!ubi->move_to_put);
1589 ubi->move_to_put = 1;
1590 spin_unlock(&ubi->wl_lock);
1593 if (in_wl_tree(e, &ubi->used)) {
1594 self_check_in_wl_tree(ubi, e, &ubi->used);
1595 rb_erase(&e->u.rb, &ubi->used);
1596 } else if (in_wl_tree(e, &ubi->scrub)) {
1597 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1598 rb_erase(&e->u.rb, &ubi->scrub);
1599 } else if (in_wl_tree(e, &ubi->erroneous)) {
1600 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1601 rb_erase(&e->u.rb, &ubi->erroneous);
1602 ubi->erroneous_peb_count -= 1;
1603 ubi_assert(ubi->erroneous_peb_count >= 0);
1607 err = prot_queue_del(ubi, e->pnum);
1610 ubi_ro_mode(ubi);
1611 spin_unlock(&ubi->wl_lock);
1616 spin_unlock(&ubi->wl_lock);
1618 err = schedule_erase(ubi, e, vol_id, lnum, torture);
1620 spin_lock(&ubi->wl_lock);
1621 wl_tree_add(e, &ubi->used);
1622 spin_unlock(&ubi->wl_lock);
1638 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) argument
1645 spin_lock(&ubi->wl_lock);
1646 e = ubi->lookuptbl[pnum];
1647 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1648 in_wl_tree(e, &ubi->erroneous)) {
1649 spin_unlock(&ubi->wl_lock);
1653 if (e == ubi->move_to) {
1660 spin_unlock(&ubi->wl_lock);
1666 if (in_wl_tree(e, &ubi->used)) {
1667 self_check_in_wl_tree(ubi, e, &ubi->used);
1668 rb_erase(&e->u.rb, &ubi->used);
1672 err = prot_queue_del(ubi, e->pnum);
1675 ubi_ro_mode(ubi);
1676 spin_unlock(&ubi->wl_lock);
1681 wl_tree_add(e, &ubi->scrub);
1682 spin_unlock(&ubi->wl_lock);
1688 return ensure_wear_leveling(ubi, 0);
1703 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) argument
1713 vol_id, lnum, ubi->works_count);
1719 down_read(&ubi->work_sem);
1720 spin_lock(&ubi->wl_lock);
1721 list_for_each_entry(wrk, &ubi->works, list) {
1725 ubi->works_count -= 1;
1726 ubi_assert(ubi->works_count >= 0);
1727 spin_unlock(&ubi->wl_lock);
1729 err = wrk->func(ubi, wrk, 0);
1731 up_read(&ubi->work_sem);
1735 spin_lock(&ubi->wl_lock);
1740 spin_unlock(&ubi->wl_lock);
1741 up_read(&ubi->work_sem);
1748 down_write(&ubi->work_sem);
1749 up_write(&ubi->work_sem);
1792 struct ubi_device *ubi = u; local
1795 ubi->bgt_name, task_pid_nr(current));
1807 spin_lock(&ubi->wl_lock);
1808 if (list_empty(&ubi->works) || ubi->ro_mode ||
1809 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1811 spin_unlock(&ubi->wl_lock);
1815 spin_unlock(&ubi->wl_lock);
1817 err = do_work(ubi);
1820 ubi->bgt_name, err);
1827 ubi->bgt_name, WL_MAX_FAILURES);
1828 ubi_ro_mode(ubi);
1829 ubi->thread_enabled = 0;
1838 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1846 static void cancel_pending(struct ubi_device *ubi) argument
1848 while (!list_empty(&ubi->works)) {
1851 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1853 wrk->func(ubi, wrk, 1);
1854 ubi->works_count -= 1;
1855 ubi_assert(ubi->works_count >= 0);
1867 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) argument
1875 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1876 spin_lock_init(&ubi->wl_lock);
1877 mutex_init(&ubi->move_mutex);
1878 init_rwsem(&ubi->work_sem);
1879 ubi->max_ec = ai->max_ec;
1880 INIT_LIST_HEAD(&ubi->works);
1882 INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
1885 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1888 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1889 if (!ubi->lookuptbl)
1893 INIT_LIST_HEAD(&ubi->pq[i]);
1894 ubi->pq_head = 0;
1905 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1906 ubi->lookuptbl[e->pnum] = e;
1907 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1915 ubi->free_count = 0;
1926 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1928 wl_tree_add(e, &ubi->free);
1929 ubi->free_count++;
1931 ubi->lookuptbl[e->pnum] = e;
1946 ubi->lookuptbl[e->pnum] = e;
1951 wl_tree_add(e, &ubi->used);
1955 wl_tree_add(e, &ubi->scrub);
1964 if (ubi->fm)
1965 ubi_assert(ubi->good_peb_count == \
1966 found_pebs + ubi->fm->used_blocks);
1968 ubi_assert(ubi->good_peb_count == found_pebs);
1973 reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
1976 if (ubi->avail_pebs < reserved_pebs) {
1978 ubi->avail_pebs, reserved_pebs);
1979 if (ubi->corr_peb_count)
1981 ubi->corr_peb_count);
1984 ubi->avail_pebs -= reserved_pebs;
1985 ubi->rsvd_pebs += reserved_pebs;
1988 err = ensure_wear_leveling(ubi, 0);
1995 cancel_pending(ubi);
1996 tree_destroy(&ubi->used);
1997 tree_destroy(&ubi->free);
1998 tree_destroy(&ubi->scrub);
1999 kfree(ubi->lookuptbl);
2007 static void protection_queue_destroy(struct ubi_device *ubi) argument
2013 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
2024 void ubi_wl_close(struct ubi_device *ubi) argument
2027 cancel_pending(ubi);
2028 protection_queue_destroy(ubi);
2029 tree_destroy(&ubi->used);
2030 tree_destroy(&ubi->erroneous);
2031 tree_destroy(&ubi->free);
2032 tree_destroy(&ubi->scrub);
2033 kfree(ubi->lookuptbl);
2046 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) argument
2052 if (!ubi_dbg_chk_gen(ubi))
2055 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
2059 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
2089 static int self_check_in_wl_tree(const struct ubi_device *ubi, argument
2092 if (!ubi_dbg_chk_gen(ubi))
2112 static int self_check_in_pq(const struct ubi_device *ubi, argument
2118 if (!ubi_dbg_chk_gen(ubi))
2122 list_for_each_entry(p, &ubi->pq[i], u.list)