• Home
  • Raw
  • Download

Lines Matching +full:0 +full:d

75 #define EBLOCK_BAD		(1 << 0)
148 #define MTDSWAP_MAGIC_CLEAN 0x2095
150 #define MTDSWAP_TYPE_CLEAN 0
173 #define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root) argument
174 #define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL) argument
175 #define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name)) argument
176 #define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count) argument
193 "Include builtin swap header (default 0, without header)");
195 static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background);
197 static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_eb_offset() argument
199 return (loff_t)(eb - d->eb_data) * d->mtd->erasesize; in mtdswap_eb_offset()
202 static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_eb_detach() argument
209 oldidx = tp - &d->trees[0]; in mtdswap_eb_detach()
211 d->trees[oldidx].count--; in mtdswap_eb_detach()
235 static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx) in mtdswap_rb_add() argument
239 if (eb->root == &d->trees[idx].root) in mtdswap_rb_add()
242 mtdswap_eb_detach(d, eb); in mtdswap_rb_add()
243 root = &d->trees[idx].root; in mtdswap_rb_add()
246 d->trees[idx].count++; in mtdswap_rb_add()
255 i = 0; in mtdswap_rb_index()
264 static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_handle_badblock() argument
269 d->spare_eblks--; in mtdswap_handle_badblock()
271 mtdswap_eb_detach(d, eb); in mtdswap_handle_badblock()
275 if (!mtd_can_have_bb(d->mtd)) in mtdswap_handle_badblock()
278 offset = mtdswap_eb_offset(d, eb); in mtdswap_handle_badblock()
279 dev_warn(d->dev, "Marking bad block at %08llx\n", offset); in mtdswap_handle_badblock()
280 ret = mtd_block_markbad(d->mtd, offset); in mtdswap_handle_badblock()
283 dev_warn(d->dev, "Mark block bad failed for block at %08llx " in mtdswap_handle_badblock()
284 "error %d\n", offset, ret); in mtdswap_handle_badblock()
292 static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_handle_write_error() argument
295 struct swap_eb *curr_write = d->curr_write; in mtdswap_handle_write_error()
299 d->curr_write = NULL; in mtdswap_handle_write_error()
301 if (!marked && d->curr_write_pos != 0) { in mtdswap_handle_write_error()
302 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); in mtdswap_handle_write_error()
303 return 0; in mtdswap_handle_write_error()
307 return mtdswap_handle_badblock(d, eb); in mtdswap_handle_write_error()
310 static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from, in mtdswap_read_oob() argument
313 int ret = mtd_read_oob(d->mtd, from, ops); in mtdswap_read_oob()
319 dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n", in mtdswap_read_oob()
325 dev_warn(d->dev, "Read OOB return short read (%zd bytes not " in mtdswap_read_oob()
331 return 0; in mtdswap_read_oob()
334 static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_read_markers() argument
341 offset = mtdswap_eb_offset(d, eb); in mtdswap_read_markers()
344 if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset)) in mtdswap_read_markers()
347 ops.ooblen = 2 * d->mtd->oobavail; in mtdswap_read_markers()
348 ops.oobbuf = d->oob_buf; in mtdswap_read_markers()
349 ops.ooboffs = 0; in mtdswap_read_markers()
353 ret = mtdswap_read_oob(d, offset, &ops); in mtdswap_read_markers()
358 data = (struct mtdswap_oobdata *)d->oob_buf; in mtdswap_read_markers()
360 (d->oob_buf + d->mtd->oobavail); in mtdswap_read_markers()
380 static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb, in mtdswap_write_marker() argument
388 ops.ooboffs = 0; in mtdswap_write_marker()
397 offset = mtdswap_eb_offset(d, eb); in mtdswap_write_marker()
401 offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize; in mtdswap_write_marker()
404 ret = mtd_write_oob(d->mtd, offset, &ops); in mtdswap_write_marker()
407 dev_warn(d->dev, "Write OOB failed for block at %08llx " in mtdswap_write_marker()
408 "error %d\n", offset, ret); in mtdswap_write_marker()
410 mtdswap_handle_write_error(d, eb); in mtdswap_write_marker()
415 dev_warn(d->dev, "Short OOB write for block at %08llx: " in mtdswap_write_marker()
421 return 0; in mtdswap_write_marker()
429 static void mtdswap_check_counts(struct mtdswap_dev *d) in mtdswap_check_counts() argument
436 cnt = 0; in mtdswap_check_counts()
437 for (i = 0; i < d->eblks; i++) { in mtdswap_check_counts()
438 eb = d->eb_data + i; in mtdswap_check_counts()
447 if (cnt == 0) in mtdswap_check_counts()
453 d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root); in mtdswap_check_counts()
455 for (i = 0; i < d->eblks; i++) { in mtdswap_check_counts()
456 eb = d->eb_data + i; in mtdswap_check_counts()
468 static void mtdswap_scan_eblks(struct mtdswap_dev *d) in mtdswap_scan_eblks() argument
474 for (i = 0; i < d->eblks; i++) { in mtdswap_scan_eblks()
475 eb = d->eb_data + i; in mtdswap_scan_eblks()
477 status = mtdswap_read_markers(d, eb); in mtdswap_scan_eblks()
478 if (status < 0) in mtdswap_scan_eblks()
500 mtdswap_check_counts(d); in mtdswap_scan_eblks()
502 for (i = 0; i < d->eblks; i++) { in mtdswap_scan_eblks()
503 eb = d->eb_data + i; in mtdswap_scan_eblks()
509 mtdswap_rb_add(d, eb, idx); in mtdswap_scan_eblks()
517 static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_store_eb() argument
520 unsigned int maxweight = d->pages_per_eblk; in mtdswap_store_eb()
522 if (eb == d->curr_write) in mtdswap_store_eb()
526 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); in mtdswap_store_eb()
528 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); in mtdswap_store_eb()
530 mtdswap_rb_add(d, eb, MTDSWAP_USED); in mtdswap_store_eb()
531 else if (weight == 0) in mtdswap_store_eb()
532 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); in mtdswap_store_eb()
534 mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG); in mtdswap_store_eb()
536 mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG); in mtdswap_store_eb()
539 static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_erase_block() argument
541 struct mtd_info *mtd = d->mtd; in mtdswap_erase_block()
543 unsigned int retries = 0; in mtdswap_erase_block()
547 if (eb->erase_count > d->max_erase_count) in mtdswap_erase_block()
548 d->max_erase_count = eb->erase_count; in mtdswap_erase_block()
551 memset(&erase, 0, sizeof(struct erase_info)); in mtdswap_erase_block()
552 erase.addr = mtdswap_eb_offset(d, eb); in mtdswap_erase_block()
558 dev_warn(d->dev, in mtdswap_erase_block()
565 dev_err(d->dev, "Cannot erase erase block %#llx on %s\n", in mtdswap_erase_block()
568 mtdswap_handle_badblock(d, eb); in mtdswap_erase_block()
572 return 0; in mtdswap_erase_block()
575 static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page, in mtdswap_map_free_block() argument
579 struct swap_eb *old_eb = d->curr_write; in mtdswap_map_free_block()
583 if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) { in mtdswap_map_free_block()
585 if (TREE_EMPTY(d, CLEAN)) in mtdswap_map_free_block()
588 clean_root = TREE_ROOT(d, CLEAN); in mtdswap_map_free_block()
592 TREE_COUNT(d, CLEAN)--; in mtdswap_map_free_block()
594 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); in mtdswap_map_free_block()
600 d->curr_write_pos = 0; in mtdswap_map_free_block()
601 d->curr_write = eb; in mtdswap_map_free_block()
603 mtdswap_store_eb(d, old_eb); in mtdswap_map_free_block()
606 *block = (d->curr_write - d->eb_data) * d->pages_per_eblk + in mtdswap_map_free_block()
607 d->curr_write_pos; in mtdswap_map_free_block()
609 d->curr_write->active_count++; in mtdswap_map_free_block()
610 d->revmap[*block] = page; in mtdswap_map_free_block()
611 d->curr_write_pos++; in mtdswap_map_free_block()
613 return 0; in mtdswap_map_free_block()
616 static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d) in mtdswap_free_page_cnt() argument
618 return TREE_COUNT(d, CLEAN) * d->pages_per_eblk + in mtdswap_free_page_cnt()
619 d->pages_per_eblk - d->curr_write_pos; in mtdswap_free_page_cnt()
622 static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d) in mtdswap_enough_free_pages() argument
624 return mtdswap_free_page_cnt(d) > d->pages_per_eblk; in mtdswap_enough_free_pages()
627 static int mtdswap_write_block(struct mtdswap_dev *d, char *buf, in mtdswap_write_block() argument
630 struct mtd_info *mtd = d->mtd; in mtdswap_write_block()
638 while (!mtdswap_enough_free_pages(d)) in mtdswap_write_block()
639 if (mtdswap_gc(d, 0) > 0) in mtdswap_write_block()
642 ret = mtdswap_map_free_block(d, page, bp); in mtdswap_write_block()
643 eb = d->eb_data + (*bp / d->pages_per_eblk); in mtdswap_write_block()
646 d->curr_write = NULL; in mtdswap_write_block()
648 d->revmap[*bp] = PAGE_UNDEF; in mtdswap_write_block()
652 if (ret < 0) in mtdswap_write_block()
658 d->curr_write_pos--; in mtdswap_write_block()
660 d->revmap[*bp] = PAGE_UNDEF; in mtdswap_write_block()
661 mtdswap_handle_write_error(d, eb); in mtdswap_write_block()
665 if (ret < 0) { in mtdswap_write_block()
666 dev_err(d->dev, "Write to MTD device failed: %d (%zd written)", in mtdswap_write_block()
672 dev_err(d->dev, "Short write to MTD device: %zd written", in mtdswap_write_block()
681 d->curr_write_pos--; in mtdswap_write_block()
683 d->revmap[*bp] = PAGE_UNDEF; in mtdswap_write_block()
688 static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock, in mtdswap_move_block() argument
691 struct mtd_info *mtd = d->mtd; in mtdswap_move_block()
698 page = d->revmap[oldblock]; in mtdswap_move_block()
700 retries = 0; in mtdswap_move_block()
703 ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); in mtdswap_move_block()
705 if (ret < 0 && !mtd_is_bitflip(ret)) { in mtdswap_move_block()
706 oldeb = d->eb_data + oldblock / d->pages_per_eblk; in mtdswap_move_block()
709 dev_err(d->dev, "Read Error: %d (block %u)\n", ret, in mtdswap_move_block()
719 dev_err(d->dev, "Short read: %zd (block %u)\n", retlen, in mtdswap_move_block()
725 ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1); in mtdswap_move_block()
726 if (ret < 0) { in mtdswap_move_block()
727 d->page_data[page] = BLOCK_ERROR; in mtdswap_move_block()
728 dev_err(d->dev, "Write error: %d\n", ret); in mtdswap_move_block()
732 eb = d->eb_data + *newblock / d->pages_per_eblk; in mtdswap_move_block()
733 d->page_data[page] = *newblock; in mtdswap_move_block()
734 d->revmap[oldblock] = PAGE_UNDEF; in mtdswap_move_block()
735 eb = d->eb_data + oldblock / d->pages_per_eblk; in mtdswap_move_block()
738 return 0; in mtdswap_move_block()
741 d->page_data[page] = BLOCK_ERROR; in mtdswap_move_block()
742 d->revmap[oldblock] = PAGE_UNDEF; in mtdswap_move_block()
746 static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_gc_eblock() argument
751 errcode = 0; in mtdswap_gc_eblock()
752 eblk_base = (eb - d->eb_data) * d->pages_per_eblk; in mtdswap_gc_eblock()
754 for (i = 0; i < d->pages_per_eblk; i++) { in mtdswap_gc_eblock()
755 if (d->spare_eblks < MIN_SPARE_EBLOCKS) in mtdswap_gc_eblock()
759 if (d->revmap[block] == PAGE_UNDEF) in mtdswap_gc_eblock()
762 ret = mtdswap_move_block(d, block, &newblock); in mtdswap_gc_eblock()
763 if (ret < 0 && !errcode) in mtdswap_gc_eblock()
770 static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d) in __mtdswap_choose_gc_tree() argument
774 if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD) in __mtdswap_choose_gc_tree()
780 if (d->trees[idx].root.rb_node != NULL) in __mtdswap_choose_gc_tree()
814 static int mtdswap_choose_wl_tree(struct mtdswap_dev *d) in mtdswap_choose_wl_tree() argument
820 max = 0; in mtdswap_choose_wl_tree()
821 for (i = 0; i <= MTDSWAP_DIRTY; i++) { in mtdswap_choose_wl_tree()
822 root = &d->trees[i].root; in mtdswap_choose_wl_tree()
826 wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root); in mtdswap_choose_wl_tree()
834 pick_cnt = 0; in mtdswap_choose_wl_tree()
842 static int mtdswap_choose_gc_tree(struct mtdswap_dev *d, in mtdswap_choose_gc_tree() argument
847 if (TREE_NONEMPTY(d, FAILING) && in mtdswap_choose_gc_tree()
848 (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY)))) in mtdswap_choose_gc_tree()
851 idx = mtdswap_choose_wl_tree(d); in mtdswap_choose_gc_tree()
855 return __mtdswap_choose_gc_tree(d); in mtdswap_choose_gc_tree()
858 static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d, in mtdswap_pick_gc_eblk() argument
865 if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD && in mtdswap_pick_gc_eblk()
866 TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING)) in mtdswap_pick_gc_eblk()
869 idx = mtdswap_choose_gc_tree(d, background); in mtdswap_pick_gc_eblk()
870 if (idx < 0) in mtdswap_pick_gc_eblk()
873 rp = &d->trees[idx].root; in mtdswap_pick_gc_eblk()
878 d->trees[idx].count--; in mtdswap_pick_gc_eblk()
884 return i % 2 ? 0x55555555 : 0xAAAAAAAA; in mtdswap_test_patt()
887 static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d, in mtdswap_eblk_passes() argument
890 struct mtd_info *mtd = d->mtd; in mtdswap_eblk_passes()
893 unsigned int *p1 = (unsigned int *)d->page_buf; in mtdswap_eblk_passes()
894 unsigned char *p2 = (unsigned char *)d->oob_buf; in mtdswap_eblk_passes()
901 ops.ooboffs = 0; in mtdswap_eblk_passes()
902 ops.datbuf = d->page_buf; in mtdswap_eblk_passes()
903 ops.oobbuf = d->oob_buf; in mtdswap_eblk_passes()
904 base = mtdswap_eb_offset(d, eb); in mtdswap_eblk_passes()
905 mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize; in mtdswap_eblk_passes()
907 for (test = 0; test < 2; test++) { in mtdswap_eblk_passes()
909 for (i = 0; i < mtd_pages; i++) { in mtdswap_eblk_passes()
911 memset(d->page_buf, patt, mtd->writesize); in mtdswap_eblk_passes()
912 memset(d->oob_buf, patt, mtd->oobavail); in mtdswap_eblk_passes()
921 for (i = 0; i < mtd_pages; i++) { in mtdswap_eblk_passes()
927 for (j = 0; j < mtd->writesize/sizeof(int); j++) in mtdswap_eblk_passes()
931 for (j = 0; j < mtd->oobavail; j++) in mtdswap_eblk_passes()
938 ret = mtdswap_erase_block(d, eb); in mtdswap_eblk_passes()
947 mtdswap_handle_badblock(d, eb); in mtdswap_eblk_passes()
948 return 0; in mtdswap_eblk_passes()
951 static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background) in mtdswap_gc() argument
956 if (d->spare_eblks < MIN_SPARE_EBLOCKS) in mtdswap_gc()
959 eb = mtdswap_pick_gc_eblk(d, background); in mtdswap_gc()
963 ret = mtdswap_gc_eblock(d, eb); in mtdswap_gc()
968 mtdswap_handle_badblock(d, eb); in mtdswap_gc()
969 return 0; in mtdswap_gc()
973 ret = mtdswap_erase_block(d, eb); in mtdswap_gc()
975 (ret || !mtdswap_eblk_passes(d, eb))) in mtdswap_gc()
976 return 0; in mtdswap_gc()
978 if (ret == 0) in mtdswap_gc()
979 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN); in mtdswap_gc()
981 if (ret == 0) in mtdswap_gc()
982 mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); in mtdswap_gc()
984 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); in mtdswap_gc()
986 return 0; in mtdswap_gc()
991 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_background() local
995 ret = mtdswap_gc(d, 1); in mtdswap_background()
1001 static void mtdswap_cleanup(struct mtdswap_dev *d) in mtdswap_cleanup() argument
1003 vfree(d->eb_data); in mtdswap_cleanup()
1004 vfree(d->revmap); in mtdswap_cleanup()
1005 vfree(d->page_data); in mtdswap_cleanup()
1006 kfree(d->oob_buf); in mtdswap_cleanup()
1007 kfree(d->page_buf); in mtdswap_cleanup()
1012 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_flush() local
1014 mtd_sync(d->mtd); in mtdswap_flush()
1015 return 0; in mtdswap_flush()
1023 badcnt = 0; in mtdswap_badblocks()
1026 for (offset = 0; offset < size; offset += mtd->erasesize) in mtdswap_badblocks()
1036 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_writesect() local
1041 d->sect_write_count++; in mtdswap_writesect()
1043 if (d->spare_eblks < MIN_SPARE_EBLOCKS) in mtdswap_writesect()
1048 if (unlikely(page == 0)) in mtdswap_writesect()
1049 return 0; in mtdswap_writesect()
1054 mapped = d->page_data[page]; in mtdswap_writesect()
1056 eb = d->eb_data + (mapped / d->pages_per_eblk); in mtdswap_writesect()
1058 mtdswap_store_eb(d, eb); in mtdswap_writesect()
1059 d->page_data[page] = BLOCK_UNDEF; in mtdswap_writesect()
1060 d->revmap[mapped] = PAGE_UNDEF; in mtdswap_writesect()
1063 ret = mtdswap_write_block(d, buf, page, &newblock, 0); in mtdswap_writesect()
1064 d->mtd_write_count++; in mtdswap_writesect()
1066 if (ret < 0) in mtdswap_writesect()
1069 eb = d->eb_data + (newblock / d->pages_per_eblk); in mtdswap_writesect()
1070 d->page_data[page] = newblock; in mtdswap_writesect()
1072 return 0; in mtdswap_writesect()
1076 static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf) in mtdswap_auto_header() argument
1080 memset(buf, 0, PAGE_SIZE - 10); in mtdswap_auto_header()
1083 hd->info.last_page = d->mbd_dev->size - 1; in mtdswap_auto_header()
1084 hd->info.nr_badpages = 0; in mtdswap_auto_header()
1088 return 0; in mtdswap_auto_header()
1094 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_readsect() local
1095 struct mtd_info *mtd = d->mtd; in mtdswap_readsect()
1102 d->sect_read_count++; in mtdswap_readsect()
1105 if (unlikely(page == 0)) in mtdswap_readsect()
1106 return mtdswap_auto_header(d, buf); in mtdswap_readsect()
1111 realblock = d->page_data[page]; in mtdswap_readsect()
1113 memset(buf, 0x0, PAGE_SIZE); in mtdswap_readsect()
1115 return 0; in mtdswap_readsect()
1120 eb = d->eb_data + (realblock / d->pages_per_eblk); in mtdswap_readsect()
1121 BUG_ON(d->revmap[realblock] == PAGE_UNDEF); in mtdswap_readsect()
1124 retries = 0; in mtdswap_readsect()
1129 d->mtd_read_count++; in mtdswap_readsect()
1132 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); in mtdswap_readsect()
1133 ret = 0; in mtdswap_readsect()
1136 if (ret < 0) { in mtdswap_readsect()
1137 dev_err(d->dev, "Read error %d\n", ret); in mtdswap_readsect()
1139 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); in mtdswap_readsect()
1148 dev_err(d->dev, "Short read %zd\n", retlen); in mtdswap_readsect()
1152 return 0; in mtdswap_readsect()
1158 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_discard() local
1163 d->discard_count++; in mtdswap_discard()
1166 mapped = d->page_data[page]; in mtdswap_discard()
1168 eb = d->eb_data + (mapped / d->pages_per_eblk); in mtdswap_discard()
1170 mtdswap_store_eb(d, eb); in mtdswap_discard()
1171 d->page_data[page] = BLOCK_UNDEF; in mtdswap_discard()
1172 d->revmap[mapped] = PAGE_UNDEF; in mtdswap_discard()
1173 d->discard_page_count++; in mtdswap_discard()
1175 d->page_data[page] = BLOCK_UNDEF; in mtdswap_discard()
1176 d->discard_page_count++; in mtdswap_discard()
1180 return 0; in mtdswap_discard()
1185 struct mtdswap_dev *d = (struct mtdswap_dev *) s->private; in mtdswap_show() local
1190 unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages; in mtdswap_show()
1196 mutex_lock(&d->mbd_dev->lock); in mtdswap_show()
1198 for (i = 0; i < MTDSWAP_TREE_CNT; i++) { in mtdswap_show()
1199 struct rb_root *root = &d->trees[i].root; in mtdswap_show()
1202 count[i] = d->trees[i].count; in mtdswap_show()
1206 count[i] = 0; in mtdswap_show()
1209 if (d->curr_write) { in mtdswap_show()
1211 cwp = d->curr_write_pos; in mtdswap_show()
1212 cwecount = d->curr_write->erase_count; in mtdswap_show()
1215 sum = 0; in mtdswap_show()
1216 for (i = 0; i < d->eblks; i++) in mtdswap_show()
1217 sum += d->eb_data[i].erase_count; in mtdswap_show()
1219 use_size = (uint64_t)d->eblks * d->mtd->erasesize; in mtdswap_show()
1220 bb_cnt = mtdswap_badblocks(d->mtd, use_size); in mtdswap_show()
1222 mapped = 0; in mtdswap_show()
1223 pages = d->mbd_dev->size; in mtdswap_show()
1224 for (i = 0; i < pages; i++) in mtdswap_show()
1225 if (d->page_data[i] != BLOCK_UNDEF) in mtdswap_show()
1228 mutex_unlock(&d->mbd_dev->lock); in mtdswap_show()
1230 for (i = 0; i < MTDSWAP_TREE_CNT; i++) { in mtdswap_show()
1235 seq_printf(s, "%s:\t%5d erase blocks, erased min %d, " in mtdswap_show()
1236 "max %d times\n", in mtdswap_show()
1239 seq_printf(s, "%s:\t%5d erase blocks, all erased %d " in mtdswap_show()
1249 cwp, d->pages_per_eblk - cwp, cwecount); in mtdswap_show()
1255 seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count); in mtdswap_show()
1256 seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count); in mtdswap_show()
1257 seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count); in mtdswap_show()
1258 seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count); in mtdswap_show()
1259 seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count); in mtdswap_show()
1260 seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count); in mtdswap_show()
1266 return 0; in mtdswap_show()
1281 static int mtdswap_add_debugfs(struct mtdswap_dev *d) in mtdswap_add_debugfs() argument
1283 struct dentry *root = d->mtd->dbg.dfs_dir; in mtdswap_add_debugfs()
1287 return 0; in mtdswap_add_debugfs()
1292 dent = debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, in mtdswap_add_debugfs()
1295 dev_err(d->dev, "debugfs_create_file failed\n"); in mtdswap_add_debugfs()
1299 return 0; in mtdswap_add_debugfs()
1302 static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks, in mtdswap_init() argument
1305 struct mtd_info *mtd = d->mbd_dev->mtd; in mtdswap_init()
1309 d->mtd = mtd; in mtdswap_init()
1310 d->eblks = eblocks; in mtdswap_init()
1311 d->spare_eblks = spare_cnt; in mtdswap_init()
1312 d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT; in mtdswap_init()
1314 pages = d->mbd_dev->size; in mtdswap_init()
1315 blocks = eblocks * d->pages_per_eblk; in mtdswap_init()
1317 for (i = 0; i < MTDSWAP_TREE_CNT; i++) in mtdswap_init()
1318 d->trees[i].root = RB_ROOT; in mtdswap_init()
1320 d->page_data = vmalloc(array_size(pages, sizeof(int))); in mtdswap_init()
1321 if (!d->page_data) in mtdswap_init()
1324 d->revmap = vmalloc(array_size(blocks, sizeof(int))); in mtdswap_init()
1325 if (!d->revmap) in mtdswap_init()
1328 eblk_bytes = sizeof(struct swap_eb)*d->eblks; in mtdswap_init()
1329 d->eb_data = vzalloc(eblk_bytes); in mtdswap_init()
1330 if (!d->eb_data) in mtdswap_init()
1333 for (i = 0; i < pages; i++) in mtdswap_init()
1334 d->page_data[i] = BLOCK_UNDEF; in mtdswap_init()
1336 for (i = 0; i < blocks; i++) in mtdswap_init()
1337 d->revmap[i] = PAGE_UNDEF; in mtdswap_init()
1339 d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); in mtdswap_init()
1340 if (!d->page_buf) in mtdswap_init()
1343 d->oob_buf = kmalloc_array(2, mtd->oobavail, GFP_KERNEL); in mtdswap_init()
1344 if (!d->oob_buf) in mtdswap_init()
1347 mtdswap_scan_eblks(d); in mtdswap_init()
1349 return 0; in mtdswap_init()
1352 kfree(d->page_buf); in mtdswap_init()
1354 vfree(d->eb_data); in mtdswap_init()
1356 vfree(d->revmap); in mtdswap_init()
1358 vfree(d->page_data); in mtdswap_init()
1360 printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret); in mtdswap_init()
1366 struct mtdswap_dev *d; in mtdswap_add_mtd() local
1375 parts = &partitions[0]; in mtdswap_add_mtd()
1380 if (kstrtoul(this_opt, 0, &part) < 0) in mtdswap_add_mtd()
1404 "%d available, %zu needed.\n", in mtdswap_add_mtd()
1428 "%d needed\n", MTDSWAP_PREFIX, eavailable, in mtdswap_add_mtd()
1442 (header ? PAGE_SIZE : 0); in mtdswap_add_mtd()
1448 d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL); in mtdswap_add_mtd()
1449 if (!d) in mtdswap_add_mtd()
1454 kfree(d); in mtdswap_add_mtd()
1458 d->mbd_dev = mbd_dev; in mtdswap_add_mtd()
1459 mbd_dev->priv = d; in mtdswap_add_mtd()
1469 if (mtdswap_init(d, eblocks, spare_cnt) < 0) in mtdswap_add_mtd()
1472 if (add_mtd_blktrans_dev(mbd_dev) < 0) in mtdswap_add_mtd()
1475 d->dev = disk_to_dev(mbd_dev->disk); in mtdswap_add_mtd()
1477 ret = mtdswap_add_debugfs(d); in mtdswap_add_mtd()
1478 if (ret < 0) in mtdswap_add_mtd()
1487 mtdswap_cleanup(d); in mtdswap_add_mtd()
1491 kfree(d); in mtdswap_add_mtd()
1496 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_remove_dev() local
1499 mtdswap_cleanup(d); in mtdswap_remove_dev()
1500 kfree(d); in mtdswap_remove_dev()
1505 .major = 0,
1506 .part_bits = 0,