• Home
  • Raw
  • Download

Lines Matching full:em

48 	struct extent_map *em;  in alloc_extent_map()  local
49 em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); in alloc_extent_map()
50 if (!em) in alloc_extent_map()
52 RB_CLEAR_NODE(&em->rb_node); in alloc_extent_map()
53 em->compress_type = BTRFS_COMPRESS_NONE; in alloc_extent_map()
54 refcount_set(&em->refs, 1); in alloc_extent_map()
55 INIT_LIST_HEAD(&em->list); in alloc_extent_map()
56 return em; in alloc_extent_map()
60 * Drop the reference out on @em by one and free the structure if the reference
63 void free_extent_map(struct extent_map *em) in free_extent_map() argument
65 if (!em) in free_extent_map()
67 if (refcount_dec_and_test(&em->refs)) { in free_extent_map()
68 WARN_ON(extent_map_in_tree(em)); in free_extent_map()
69 WARN_ON(!list_empty(&em->list)); in free_extent_map()
70 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) in free_extent_map()
71 kfree(em->map_lookup); in free_extent_map()
72 kmem_cache_free(extent_map_cache, em); in free_extent_map()
84 static int tree_insert(struct rb_root_cached *root, struct extent_map *em) in tree_insert() argument
90 u64 end = range_end(em->start, em->len); in tree_insert()
97 if (em->start < entry->start) { in tree_insert()
99 } else if (em->start >= extent_map_end(entry)) { in tree_insert()
108 while (parent && em->start >= extent_map_end(entry)) { in tree_insert()
113 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert()
118 while (parent && em->start < entry->start) { in tree_insert()
123 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert()
126 rb_link_node(&em->rb_node, orig_parent, p); in tree_insert()
127 rb_insert_color_cached(&em->rb_node, root, leftmost); in tree_insert()
231 static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) in try_merge_map() argument
244 if (refcount_read(&em->refs) > 2) in try_merge_map()
247 if (em->start != 0) { in try_merge_map()
248 rb = rb_prev(&em->rb_node); in try_merge_map()
251 if (rb && mergable_maps(merge, em)) { in try_merge_map()
252 em->start = merge->start; in try_merge_map()
253 em->orig_start = merge->orig_start; in try_merge_map()
254 em->len += merge->len; in try_merge_map()
255 em->block_len += merge->block_len; in try_merge_map()
256 em->block_start = merge->block_start; in try_merge_map()
257 em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; in try_merge_map()
258 em->mod_start = merge->mod_start; in try_merge_map()
259 em->generation = max(em->generation, merge->generation); in try_merge_map()
260 set_bit(EXTENT_FLAG_MERGED, &em->flags); in try_merge_map()
268 rb = rb_next(&em->rb_node); in try_merge_map()
271 if (rb && mergable_maps(em, merge)) { in try_merge_map()
272 em->len += merge->len; in try_merge_map()
273 em->block_len += merge->block_len; in try_merge_map()
276 em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; in try_merge_map()
277 em->generation = max(em->generation, merge->generation); in try_merge_map()
278 set_bit(EXTENT_FLAG_MERGED, &em->flags); in try_merge_map()
299 struct extent_map *em; in unpin_extent_cache() local
303 em = lookup_extent_mapping(tree, start, len); in unpin_extent_cache()
305 WARN_ON(!em || em->start != start); in unpin_extent_cache()
307 if (!em) in unpin_extent_cache()
310 em->generation = gen; in unpin_extent_cache()
311 clear_bit(EXTENT_FLAG_PINNED, &em->flags); in unpin_extent_cache()
312 em->mod_start = em->start; in unpin_extent_cache()
313 em->mod_len = em->len; in unpin_extent_cache()
315 if (test_bit(EXTENT_FLAG_FILLING, &em->flags)) { in unpin_extent_cache()
317 clear_bit(EXTENT_FLAG_FILLING, &em->flags); in unpin_extent_cache()
320 try_merge_map(tree, em); in unpin_extent_cache()
323 em->mod_start = em->start; in unpin_extent_cache()
324 em->mod_len = em->len; in unpin_extent_cache()
327 free_extent_map(em); in unpin_extent_cache()
334 void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) in clear_em_logging() argument
338 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); in clear_em_logging()
339 if (extent_map_in_tree(em)) in clear_em_logging()
340 try_merge_map(tree, em); in clear_em_logging()
344 struct extent_map *em, in setup_extent_mapping() argument
347 refcount_inc(&em->refs); in setup_extent_mapping()
348 em->mod_start = em->start; in setup_extent_mapping()
349 em->mod_len = em->len; in setup_extent_mapping()
352 list_move(&em->list, &tree->modified_extents); in setup_extent_mapping()
354 try_merge_map(tree, em); in setup_extent_mapping()
357 static void extent_map_device_set_bits(struct extent_map *em, unsigned bits) in extent_map_device_set_bits() argument
359 struct map_lookup *map = em->map_lookup; in extent_map_device_set_bits()
360 u64 stripe_size = em->orig_block_len; in extent_map_device_set_bits()
373 static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits) in extent_map_device_clear_bits() argument
375 struct map_lookup *map = em->map_lookup; in extent_map_device_clear_bits()
376 u64 stripe_size = em->orig_block_len; in extent_map_device_clear_bits()
394 * @em: map to insert
395 * @modified: indicate whether the given @em should be added to the
398 * Insert @em into @tree or perform a simple forward/backward merge with
404 struct extent_map *em, int modified) in add_extent_mapping() argument
410 ret = tree_insert(&tree->map, em); in add_extent_mapping()
414 setup_extent_mapping(tree, em, modified); in add_extent_mapping()
415 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) { in add_extent_mapping()
416 extent_map_device_set_bits(em, CHUNK_ALLOCATED); in add_extent_mapping()
417 extent_map_device_clear_bits(em, CHUNK_TRIMMED); in add_extent_mapping()
427 struct extent_map *em; in __lookup_extent_mapping() local
440 em = rb_entry(rb_node, struct extent_map, rb_node); in __lookup_extent_mapping()
442 if (strict && !(end > em->start && start < extent_map_end(em))) in __lookup_extent_mapping()
445 refcount_inc(&em->refs); in __lookup_extent_mapping()
446 return em; in __lookup_extent_mapping()
489 * @em: extent map being removed
491 * Remove @em from @tree. No reference counts are dropped, and no checks
494 void remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) in remove_extent_mapping() argument
498 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); in remove_extent_mapping()
499 rb_erase_cached(&em->rb_node, &tree->map); in remove_extent_mapping()
500 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) in remove_extent_mapping()
501 list_del_init(&em->list); in remove_extent_mapping()
502 if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) in remove_extent_mapping()
503 extent_map_device_clear_bits(em, CHUNK_ALLOCATED); in remove_extent_mapping()
504 RB_CLEAR_NODE(&em->rb_node); in remove_extent_mapping()
524 static struct extent_map *next_extent_map(const struct extent_map *em) in next_extent_map() argument
528 next = rb_next(&em->rb_node); in next_extent_map()
534 static struct extent_map *prev_extent_map(struct extent_map *em) in prev_extent_map() argument
538 prev = rb_prev(&em->rb_node); in prev_extent_map()
552 struct extent_map *em, in merge_extent_mapping() argument
561 BUG_ON(map_start < em->start || map_start >= extent_map_end(em)); in merge_extent_mapping()
571 start = prev ? extent_map_end(prev) : em->start; in merge_extent_mapping()
572 start = max_t(u64, start, em->start); in merge_extent_mapping()
573 end = next ? next->start : extent_map_end(em); in merge_extent_mapping()
574 end = min_t(u64, end, extent_map_end(em)); in merge_extent_mapping()
575 start_diff = start - em->start; in merge_extent_mapping()
576 em->start = start; in merge_extent_mapping()
577 em->len = end - start; in merge_extent_mapping()
578 if (em->block_start < EXTENT_MAP_LAST_BYTE && in merge_extent_mapping()
579 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { in merge_extent_mapping()
580 em->block_start += start_diff; in merge_extent_mapping()
581 em->block_len = em->len; in merge_extent_mapping()
583 return add_extent_mapping(em_tree, em, 0); in merge_extent_mapping()
601 * existing em.
612 struct extent_map *em = *em_in; in btrfs_add_extent_mapping() local
618 if (em->block_start == EXTENT_MAP_INLINE) in btrfs_add_extent_mapping()
619 ASSERT(em->start == 0); in btrfs_add_extent_mapping()
621 ret = add_extent_mapping(em_tree, em, 0); in btrfs_add_extent_mapping()
633 trace_btrfs_handle_em_exist(fs_info, existing, em, start, len); in btrfs_add_extent_mapping()
641 free_extent_map(em); in btrfs_add_extent_mapping()
645 u64 orig_start = em->start; in btrfs_add_extent_mapping()
646 u64 orig_len = em->len; in btrfs_add_extent_mapping()
653 em, start); in btrfs_add_extent_mapping()
655 free_extent_map(em); in btrfs_add_extent_mapping()
658 "unexpected error %d: merge existing(start %llu len %llu) with em(start %llu len %llu)\n", in btrfs_add_extent_mapping()
679 struct extent_map *em; in drop_all_extent_maps_fast() local
683 em = rb_entry(node, struct extent_map, rb_node); in drop_all_extent_maps_fast()
684 clear_bit(EXTENT_FLAG_PINNED, &em->flags); in drop_all_extent_maps_fast()
685 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); in drop_all_extent_maps_fast()
686 remove_extent_mapping(tree, em); in drop_all_extent_maps_fast()
687 free_extent_map(em); in drop_all_extent_maps_fast()
712 struct extent_map *em; in btrfs_drop_extent_map_range() local
740 em = lookup_extent_mapping(em_tree, start, len); in btrfs_drop_extent_map_range()
742 while (em) { in btrfs_drop_extent_map_range()
744 const u64 em_end = extent_map_end(em); in btrfs_drop_extent_map_range()
752 next_em = next_extent_map(em); in btrfs_drop_extent_map_range()
761 if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) { in btrfs_drop_extent_map_range()
766 flags = em->flags; in btrfs_drop_extent_map_range()
767 clear_bit(EXTENT_FLAG_PINNED, &em->flags); in btrfs_drop_extent_map_range()
774 modified = !list_empty(&em->list); in btrfs_drop_extent_map_range()
780 if (em->start >= start && em_end <= end) in btrfs_drop_extent_map_range()
783 gen = em->generation; in btrfs_drop_extent_map_range()
784 compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); in btrfs_drop_extent_map_range()
786 if (em->start < start) { in btrfs_drop_extent_map_range()
793 split->start = em->start; in btrfs_drop_extent_map_range()
794 split->len = start - em->start; in btrfs_drop_extent_map_range()
796 if (em->block_start < EXTENT_MAP_LAST_BYTE) { in btrfs_drop_extent_map_range()
797 split->orig_start = em->orig_start; in btrfs_drop_extent_map_range()
798 split->block_start = em->block_start; in btrfs_drop_extent_map_range()
801 split->block_len = em->block_len; in btrfs_drop_extent_map_range()
805 em->orig_block_len); in btrfs_drop_extent_map_range()
806 split->ram_bytes = em->ram_bytes; in btrfs_drop_extent_map_range()
810 split->block_start = em->block_start; in btrfs_drop_extent_map_range()
817 split->compress_type = em->compress_type; in btrfs_drop_extent_map_range()
818 replace_extent_mapping(em_tree, em, split, modified); in btrfs_drop_extent_map_range()
832 split->block_start = em->block_start; in btrfs_drop_extent_map_range()
834 split->compress_type = em->compress_type; in btrfs_drop_extent_map_range()
837 if (em->block_start < EXTENT_MAP_LAST_BYTE) { in btrfs_drop_extent_map_range()
838 split->orig_block_len = max(em->block_len, in btrfs_drop_extent_map_range()
839 em->orig_block_len); in btrfs_drop_extent_map_range()
841 split->ram_bytes = em->ram_bytes; in btrfs_drop_extent_map_range()
843 split->block_len = em->block_len; in btrfs_drop_extent_map_range()
844 split->orig_start = em->orig_start; in btrfs_drop_extent_map_range()
846 const u64 diff = end - em->start; in btrfs_drop_extent_map_range()
850 split->orig_start = em->orig_start; in btrfs_drop_extent_map_range()
859 if (extent_map_in_tree(em)) { in btrfs_drop_extent_map_range()
860 replace_extent_mapping(em_tree, em, split, in btrfs_drop_extent_map_range()
876 if (extent_map_in_tree(em)) { in btrfs_drop_extent_map_range()
897 if ((em->start < start || em_end > end) && modified) { in btrfs_drop_extent_map_range()
901 remove_extent_mapping(em_tree, em); in btrfs_drop_extent_map_range()
908 free_extent_map(em); in btrfs_drop_extent_map_range()
911 free_extent_map(em); in btrfs_drop_extent_map_range()
913 em = next_em; in btrfs_drop_extent_map_range()
973 struct extent_map *em; in split_extent_map() local
993 em = lookup_extent_mapping(em_tree, start, len); in split_extent_map()
994 if (!em) { in split_extent_map()
999 ASSERT(em->len == len); in split_extent_map()
1000 ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)); in split_extent_map()
1001 ASSERT(em->block_start < EXTENT_MAP_LAST_BYTE); in split_extent_map()
1002 ASSERT(test_bit(EXTENT_FLAG_PINNED, &em->flags)); in split_extent_map()
1003 ASSERT(!test_bit(EXTENT_FLAG_LOGGING, &em->flags)); in split_extent_map()
1004 ASSERT(!list_empty(&em->list)); in split_extent_map()
1006 flags = em->flags; in split_extent_map()
1007 clear_bit(EXTENT_FLAG_PINNED, &em->flags); in split_extent_map()
1009 /* First, replace the em with a new extent_map starting from * em->start */ in split_extent_map()
1010 split_pre->start = em->start; in split_extent_map()
1018 split_pre->compress_type = em->compress_type; in split_extent_map()
1019 split_pre->generation = em->generation; in split_extent_map()
1021 replace_extent_mapping(em_tree, em, split_pre, 1); in split_extent_map()
1025 * [em->start, em->start + pre] in split_extent_map()
1029 split_mid->start = em->start + pre; in split_extent_map()
1030 split_mid->len = em->len - pre; in split_extent_map()
1032 split_mid->block_start = em->block_start + pre; in split_extent_map()
1037 split_mid->compress_type = em->compress_type; in split_extent_map()
1038 split_mid->generation = em->generation; in split_extent_map()
1042 free_extent_map(em); in split_extent_map()
1044 free_extent_map(em); in split_extent_map()