• Home
  • Raw
  • Download

Lines Matching refs:et

26 	struct extent_tree *et = fi->extent_tree[EX_READ];  in sanity_check_extent_cache()  local
29 if (!et) in sanity_check_extent_cache()
32 ei = &et->largest; in sanity_check_extent_cache()
39 et->largest_updated = true; in sanity_check_extent_cache()
115 static void __try_update_largest_extent(struct extent_tree *et, in __try_update_largest_extent() argument
118 if (et->type != EX_READ) in __try_update_largest_extent()
120 if (en->ei.len <= et->largest.len) in __try_update_largest_extent()
123 et->largest = en->ei; in __try_update_largest_extent()
124 et->largest_updated = true; in __try_update_largest_extent()
269 struct extent_tree *et, struct extent_info *ei, in __attach_extent_node() argument
273 struct extent_tree_info *eti = &sbi->extent_tree[et->type]; in __attach_extent_node()
282 en->et = et; in __attach_extent_node()
285 rb_insert_color_cached(&en->rb_node, &et->root, leftmost); in __attach_extent_node()
286 atomic_inc(&et->node_cnt); in __attach_extent_node()
292 struct extent_tree *et, struct extent_node *en) in __detach_extent_node() argument
294 struct extent_tree_info *eti = &sbi->extent_tree[et->type]; in __detach_extent_node()
296 rb_erase_cached(&en->rb_node, &et->root); in __detach_extent_node()
297 atomic_dec(&et->node_cnt); in __detach_extent_node()
300 if (et->cached_en == en) in __detach_extent_node()
301 et->cached_en = NULL; in __detach_extent_node()
312 struct extent_tree *et, struct extent_node *en) in __release_extent_node() argument
314 struct extent_tree_info *eti = &sbi->extent_tree[et->type]; in __release_extent_node()
321 __detach_extent_node(sbi, et, en); in __release_extent_node()
329 struct extent_tree *et; in __grab_extent_tree() local
333 et = radix_tree_lookup(&eti->extent_tree_root, ino); in __grab_extent_tree()
334 if (!et) { in __grab_extent_tree()
335 et = f2fs_kmem_cache_alloc(extent_tree_slab, in __grab_extent_tree()
337 f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et); in __grab_extent_tree()
338 memset(et, 0, sizeof(struct extent_tree)); in __grab_extent_tree()
339 et->ino = ino; in __grab_extent_tree()
340 et->type = type; in __grab_extent_tree()
341 et->root = RB_ROOT_CACHED; in __grab_extent_tree()
342 et->cached_en = NULL; in __grab_extent_tree()
343 rwlock_init(&et->lock); in __grab_extent_tree()
344 INIT_LIST_HEAD(&et->list); in __grab_extent_tree()
345 atomic_set(&et->node_cnt, 0); in __grab_extent_tree()
349 list_del_init(&et->list); in __grab_extent_tree()
354 F2FS_I(inode)->extent_tree[type] = et; in __grab_extent_tree()
356 return et; in __grab_extent_tree()
360 struct extent_tree *et) in __free_extent_tree() argument
364 unsigned int count = atomic_read(&et->node_cnt); in __free_extent_tree()
366 node = rb_first_cached(&et->root); in __free_extent_tree()
370 __release_extent_node(sbi, et, en); in __free_extent_tree()
374 return count - atomic_read(&et->node_cnt); in __free_extent_tree()
377 static void __drop_largest_extent(struct extent_tree *et, in __drop_largest_extent() argument
380 if (fofs < et->largest.fofs + et->largest.len && in __drop_largest_extent()
381 fofs + len > et->largest.fofs) { in __drop_largest_extent()
382 et->largest.len = 0; in __drop_largest_extent()
383 et->largest_updated = true; in __drop_largest_extent()
392 struct extent_tree *et; in f2fs_init_read_extent_tree() local
406 et = __grab_extent_tree(inode, EX_READ); in f2fs_init_read_extent_tree()
413 write_lock(&et->lock); in f2fs_init_read_extent_tree()
414 if (atomic_read(&et->node_cnt)) in f2fs_init_read_extent_tree()
417 en = __attach_extent_node(sbi, et, &ei, NULL, in f2fs_init_read_extent_tree()
418 &et->root.rb_root.rb_node, true); in f2fs_init_read_extent_tree()
420 et->largest = en->ei; in f2fs_init_read_extent_tree()
421 et->cached_en = en; in f2fs_init_read_extent_tree()
428 write_unlock(&et->lock); in f2fs_init_read_extent_tree()
457 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; in __lookup_extent_tree() local
461 if (!et) in __lookup_extent_tree()
466 read_lock(&et->lock); in __lookup_extent_tree()
469 et->largest.fofs <= pgofs && in __lookup_extent_tree()
470 et->largest.fofs + et->largest.len > pgofs) { in __lookup_extent_tree()
471 *ei = et->largest; in __lookup_extent_tree()
477 en = __lookup_extent_node(&et->root, et->cached_en, pgofs); in __lookup_extent_tree()
481 if (en == et->cached_en) in __lookup_extent_tree()
490 et->cached_en = en; in __lookup_extent_tree()
496 read_unlock(&et->lock); in __lookup_extent_tree()
506 struct extent_tree *et, struct extent_info *ei, in __try_merge_extent_node() argument
510 struct extent_tree_info *eti = &sbi->extent_tree[et->type]; in __try_merge_extent_node()
513 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) { in __try_merge_extent_node()
519 if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) { in __try_merge_extent_node()
522 if (et->type == EX_READ) in __try_merge_extent_node()
525 __release_extent_node(sbi, et, prev_ex); in __try_merge_extent_node()
533 __try_update_largest_extent(et, en); in __try_merge_extent_node()
538 et->cached_en = en; in __try_merge_extent_node()
545 struct extent_tree *et, struct extent_info *ei, in __insert_extent_tree() argument
550 struct extent_tree_info *eti = &sbi->extent_tree[et->type]; in __insert_extent_tree()
551 struct rb_node **p = &et->root.rb_root.rb_node; in __insert_extent_tree()
579 en = __attach_extent_node(sbi, et, ei, parent, p, leftmost); in __insert_extent_tree()
583 __try_update_largest_extent(et, en); in __insert_extent_tree()
588 et->cached_en = en; in __insert_extent_tree()
597 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; in __update_extent_tree_range() local
607 if (!et) in __update_extent_tree_range()
617 write_lock(&et->lock); in __update_extent_tree_range()
621 write_unlock(&et->lock); in __update_extent_tree_range()
625 prev = et->largest; in __update_extent_tree_range()
632 __drop_largest_extent(et, fofs, len); in __update_extent_tree_range()
636 en = __lookup_extent_node_ret(&et->root, in __update_extent_tree_range()
637 et->cached_en, fofs, in __update_extent_tree_range()
670 en1 = __insert_extent_tree(sbi, et, &ei, in __update_extent_tree_range()
692 __try_update_largest_extent(et, en); in __update_extent_tree_range()
694 __release_extent_node(sbi, et, en); in __update_extent_tree_range()
717 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) in __update_extent_tree_range()
718 __insert_extent_tree(sbi, et, &ei, in __update_extent_tree_range()
724 et->largest.len < F2FS_MIN_EXTENT_LEN) { in __update_extent_tree_range()
725 et->largest.len = 0; in __update_extent_tree_range()
726 et->largest_updated = true; in __update_extent_tree_range()
732 __free_extent_tree(sbi, et); in __update_extent_tree_range()
734 if (et->largest_updated) { in __update_extent_tree_range()
735 et->largest_updated = false; in __update_extent_tree_range()
745 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) in __update_extent_tree_range()
746 __insert_extent_tree(sbi, et, &ei, in __update_extent_tree_range()
749 write_unlock(&et->lock); in __update_extent_tree_range()
761 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ]; in f2fs_update_read_extent_tree_range_compressed() local
775 write_lock(&et->lock); in f2fs_update_read_extent_tree_range_compressed()
777 en = __lookup_extent_node_ret(&et->root, in f2fs_update_read_extent_tree_range_compressed()
778 et->cached_en, fofs, in f2fs_update_read_extent_tree_range_compressed()
788 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en)) in f2fs_update_read_extent_tree_range_compressed()
789 __insert_extent_tree(sbi, et, &ei, in f2fs_update_read_extent_tree_range_compressed()
792 write_unlock(&et->lock); in f2fs_update_read_extent_tree_range_compressed()
900 struct extent_tree *et, *next; in __shrink_extent_tree() local
912 list_for_each_entry_safe(et, next, &eti->zombie_list, list) { in __shrink_extent_tree()
913 if (atomic_read(&et->node_cnt)) { in __shrink_extent_tree()
914 write_lock(&et->lock); in __shrink_extent_tree()
915 node_cnt += __free_extent_tree(sbi, et); in __shrink_extent_tree()
916 write_unlock(&et->lock); in __shrink_extent_tree()
918 f2fs_bug_on(sbi, atomic_read(&et->node_cnt)); in __shrink_extent_tree()
919 list_del_init(&et->list); in __shrink_extent_tree()
920 radix_tree_delete(&eti->extent_tree_root, et->ino); in __shrink_extent_tree()
921 kmem_cache_free(extent_tree_slab, et); in __shrink_extent_tree()
945 et = en->et; in __shrink_extent_tree()
946 if (!write_trylock(&et->lock)) { in __shrink_extent_tree()
955 __detach_extent_node(sbi, et, en); in __shrink_extent_tree()
957 write_unlock(&et->lock); in __shrink_extent_tree()
1061 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; in __destroy_extent_node() local
1064 if (!et || !atomic_read(&et->node_cnt)) in __destroy_extent_node()
1067 write_lock(&et->lock); in __destroy_extent_node()
1068 node_cnt = __free_extent_tree(sbi, et); in __destroy_extent_node()
1069 write_unlock(&et->lock); in __destroy_extent_node()
1083 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; in __drop_extent_tree() local
1089 write_lock(&et->lock); in __drop_extent_tree()
1090 __free_extent_tree(sbi, et); in __drop_extent_tree()
1093 if (et->largest.len) { in __drop_extent_tree()
1094 et->largest.len = 0; in __drop_extent_tree()
1098 write_unlock(&et->lock); in __drop_extent_tree()
1113 struct extent_tree *et = F2FS_I(inode)->extent_tree[type]; in __destroy_extent_tree() local
1116 if (!et) in __destroy_extent_tree()
1120 atomic_read(&et->node_cnt)) { in __destroy_extent_tree()
1122 list_add_tail(&et->list, &eti->zombie_list); in __destroy_extent_tree()
1133 f2fs_bug_on(sbi, atomic_read(&et->node_cnt)); in __destroy_extent_tree()
1135 kmem_cache_free(extent_tree_slab, et); in __destroy_extent_tree()