1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * f2fs extent cache support
4 *
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Samsung Electronics
7 * Authors: Jaegeuk Kim <jaegeuk@kernel.org>
8 * Chao Yu <chao2.yu@samsung.com>
9 *
10 * block_age-based extent cache added by:
11 * Copyright (c) 2022 xiaomi Co., Ltd.
12 * http://www.xiaomi.com/
13 */
14
15 #include <linux/fs.h>
16 #include <linux/f2fs_fs.h>
17
18 #include "f2fs.h"
19 #include "node.h"
20 #include <trace/events/f2fs.h>
21
__set_extent_info(struct extent_info * ei,unsigned int fofs,unsigned int len,block_t blk,bool keep_clen,unsigned long age,unsigned long last_blocks,enum extent_type type)22 static void __set_extent_info(struct extent_info *ei,
23 unsigned int fofs, unsigned int len,
24 block_t blk, bool keep_clen,
25 unsigned long age, unsigned long last_blocks,
26 enum extent_type type)
27 {
28 ei->fofs = fofs;
29 ei->len = len;
30
31 if (type == EX_READ) {
32 ei->blk = blk;
33 if (keep_clen)
34 return;
35 #ifdef CONFIG_F2FS_FS_COMPRESSION
36 ei->c_len = 0;
37 #endif
38 } else if (type == EX_BLOCK_AGE) {
39 ei->age = age;
40 ei->last_blocks = last_blocks;
41 }
42 }
43
__init_may_extent_tree(struct inode * inode,enum extent_type type)44 static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
45 {
46 if (type == EX_READ)
47 return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
48 S_ISREG(inode->i_mode);
49 if (type == EX_BLOCK_AGE)
50 return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
51 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
52 return false;
53 }
54
__may_extent_tree(struct inode * inode,enum extent_type type)55 static bool __may_extent_tree(struct inode *inode, enum extent_type type)
56 {
57 /*
58 * for recovered files during mount do not create extents
59 * if shrinker is not registered.
60 */
61 if (list_empty(&F2FS_I_SB(inode)->s_list))
62 return false;
63
64 if (!__init_may_extent_tree(inode, type))
65 return false;
66
67 if (type == EX_READ) {
68 if (is_inode_flag_set(inode, FI_NO_EXTENT))
69 return false;
70 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
71 !f2fs_sb_has_readonly(F2FS_I_SB(inode)))
72 return false;
73 } else if (type == EX_BLOCK_AGE) {
74 if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
75 return false;
76 if (file_is_cold(inode))
77 return false;
78 }
79 return true;
80 }
81
__try_update_largest_extent(struct extent_tree * et,struct extent_node * en)82 static void __try_update_largest_extent(struct extent_tree *et,
83 struct extent_node *en)
84 {
85 if (et->type != EX_READ)
86 return;
87 if (en->ei.len <= et->largest.len)
88 return;
89
90 et->largest = en->ei;
91 et->largest_updated = true;
92 }
93
__is_extent_mergeable(struct extent_info * back,struct extent_info * front,enum extent_type type)94 static bool __is_extent_mergeable(struct extent_info *back,
95 struct extent_info *front, enum extent_type type)
96 {
97 if (type == EX_READ) {
98 #ifdef CONFIG_F2FS_FS_COMPRESSION
99 if (back->c_len && back->len != back->c_len)
100 return false;
101 if (front->c_len && front->len != front->c_len)
102 return false;
103 #endif
104 return (back->fofs + back->len == front->fofs &&
105 back->blk + back->len == front->blk);
106 } else if (type == EX_BLOCK_AGE) {
107 return (back->fofs + back->len == front->fofs &&
108 abs(back->age - front->age) <= SAME_AGE_REGION &&
109 abs(back->last_blocks - front->last_blocks) <=
110 SAME_AGE_REGION);
111 }
112 return false;
113 }
114
__is_back_mergeable(struct extent_info * cur,struct extent_info * back,enum extent_type type)115 static bool __is_back_mergeable(struct extent_info *cur,
116 struct extent_info *back, enum extent_type type)
117 {
118 return __is_extent_mergeable(back, cur, type);
119 }
120
__is_front_mergeable(struct extent_info * cur,struct extent_info * front,enum extent_type type)121 static bool __is_front_mergeable(struct extent_info *cur,
122 struct extent_info *front, enum extent_type type)
123 {
124 return __is_extent_mergeable(cur, front, type);
125 }
126
__lookup_rb_tree_fast(struct rb_entry * cached_re,unsigned int ofs)127 static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re,
128 unsigned int ofs)
129 {
130 if (cached_re) {
131 if (cached_re->ofs <= ofs &&
132 cached_re->ofs + cached_re->len > ofs) {
133 return cached_re;
134 }
135 }
136 return NULL;
137 }
138
__lookup_rb_tree_slow(struct rb_root_cached * root,unsigned int ofs)139 static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root,
140 unsigned int ofs)
141 {
142 struct rb_node *node = root->rb_root.rb_node;
143 struct rb_entry *re;
144
145 while (node) {
146 re = rb_entry(node, struct rb_entry, rb_node);
147
148 if (ofs < re->ofs)
149 node = node->rb_left;
150 else if (ofs >= re->ofs + re->len)
151 node = node->rb_right;
152 else
153 return re;
154 }
155 return NULL;
156 }
157
f2fs_lookup_rb_tree(struct rb_root_cached * root,struct rb_entry * cached_re,unsigned int ofs)158 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
159 struct rb_entry *cached_re, unsigned int ofs)
160 {
161 struct rb_entry *re;
162
163 re = __lookup_rb_tree_fast(cached_re, ofs);
164 if (!re)
165 return __lookup_rb_tree_slow(root, ofs);
166
167 return re;
168 }
169
f2fs_lookup_rb_tree_ext(struct f2fs_sb_info * sbi,struct rb_root_cached * root,struct rb_node ** parent,unsigned long long key,bool * leftmost)170 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
171 struct rb_root_cached *root,
172 struct rb_node **parent,
173 unsigned long long key, bool *leftmost)
174 {
175 struct rb_node **p = &root->rb_root.rb_node;
176 struct rb_entry *re;
177
178 while (*p) {
179 *parent = *p;
180 re = rb_entry(*parent, struct rb_entry, rb_node);
181
182 if (key < re->key) {
183 p = &(*p)->rb_left;
184 } else {
185 p = &(*p)->rb_right;
186 *leftmost = false;
187 }
188 }
189
190 return p;
191 }
192
f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info * sbi,struct rb_root_cached * root,struct rb_node ** parent,unsigned int ofs,bool * leftmost)193 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
194 struct rb_root_cached *root,
195 struct rb_node **parent,
196 unsigned int ofs, bool *leftmost)
197 {
198 struct rb_node **p = &root->rb_root.rb_node;
199 struct rb_entry *re;
200
201 while (*p) {
202 *parent = *p;
203 re = rb_entry(*parent, struct rb_entry, rb_node);
204
205 if (ofs < re->ofs) {
206 p = &(*p)->rb_left;
207 } else if (ofs >= re->ofs + re->len) {
208 p = &(*p)->rb_right;
209 *leftmost = false;
210 } else {
211 f2fs_bug_on(sbi, 1);
212 }
213 }
214
215 return p;
216 }
217
218 /*
219 * lookup rb entry in position of @ofs in rb-tree,
220 * if hit, return the entry, otherwise, return NULL
221 * @prev_ex: extent before ofs
222 * @next_ex: extent after ofs
223 * @insert_p: insert point for new extent at ofs
224 * in order to simpfy the insertion after.
225 * tree must stay unchanged between lookup and insertion.
226 */
f2fs_lookup_rb_tree_ret(struct rb_root_cached * root,struct rb_entry * cached_re,unsigned int ofs,struct rb_entry ** prev_entry,struct rb_entry ** next_entry,struct rb_node *** insert_p,struct rb_node ** insert_parent,bool force,bool * leftmost)227 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
228 struct rb_entry *cached_re,
229 unsigned int ofs,
230 struct rb_entry **prev_entry,
231 struct rb_entry **next_entry,
232 struct rb_node ***insert_p,
233 struct rb_node **insert_parent,
234 bool force, bool *leftmost)
235 {
236 struct rb_node **pnode = &root->rb_root.rb_node;
237 struct rb_node *parent = NULL, *tmp_node;
238 struct rb_entry *re = cached_re;
239
240 *insert_p = NULL;
241 *insert_parent = NULL;
242 *prev_entry = NULL;
243 *next_entry = NULL;
244
245 if (RB_EMPTY_ROOT(&root->rb_root))
246 return NULL;
247
248 if (re) {
249 if (re->ofs <= ofs && re->ofs + re->len > ofs)
250 goto lookup_neighbors;
251 }
252
253 if (leftmost)
254 *leftmost = true;
255
256 while (*pnode) {
257 parent = *pnode;
258 re = rb_entry(*pnode, struct rb_entry, rb_node);
259
260 if (ofs < re->ofs) {
261 pnode = &(*pnode)->rb_left;
262 } else if (ofs >= re->ofs + re->len) {
263 pnode = &(*pnode)->rb_right;
264 if (leftmost)
265 *leftmost = false;
266 } else {
267 goto lookup_neighbors;
268 }
269 }
270
271 *insert_p = pnode;
272 *insert_parent = parent;
273
274 re = rb_entry(parent, struct rb_entry, rb_node);
275 tmp_node = parent;
276 if (parent && ofs > re->ofs)
277 tmp_node = rb_next(parent);
278 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
279
280 tmp_node = parent;
281 if (parent && ofs < re->ofs)
282 tmp_node = rb_prev(parent);
283 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
284 return NULL;
285
286 lookup_neighbors:
287 if (ofs == re->ofs || force) {
288 /* lookup prev node for merging backward later */
289 tmp_node = rb_prev(&re->rb_node);
290 *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
291 }
292 if (ofs == re->ofs + re->len - 1 || force) {
293 /* lookup next node for merging frontward later */
294 tmp_node = rb_next(&re->rb_node);
295 *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node);
296 }
297 return re;
298 }
299
f2fs_check_rb_tree_consistence(struct f2fs_sb_info * sbi,struct rb_root_cached * root,bool check_key)300 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
301 struct rb_root_cached *root, bool check_key)
302 {
303 #ifdef CONFIG_F2FS_CHECK_FS
304 struct rb_node *cur = rb_first_cached(root), *next;
305 struct rb_entry *cur_re, *next_re;
306
307 if (!cur)
308 return true;
309
310 while (cur) {
311 next = rb_next(cur);
312 if (!next)
313 return true;
314
315 cur_re = rb_entry(cur, struct rb_entry, rb_node);
316 next_re = rb_entry(next, struct rb_entry, rb_node);
317
318 if (check_key) {
319 if (cur_re->key > next_re->key) {
320 f2fs_info(sbi, "inconsistent rbtree, "
321 "cur(%llu) next(%llu)",
322 cur_re->key, next_re->key);
323 return false;
324 }
325 goto next;
326 }
327
328 if (cur_re->ofs + cur_re->len > next_re->ofs) {
329 f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
330 cur_re->ofs, cur_re->len,
331 next_re->ofs, next_re->len);
332 return false;
333 }
334 next:
335 cur = next;
336 }
337 #endif
338 return true;
339 }
340
341 static struct kmem_cache *extent_tree_slab;
342 static struct kmem_cache *extent_node_slab;
343
__attach_extent_node(struct f2fs_sb_info * sbi,struct extent_tree * et,struct extent_info * ei,struct rb_node * parent,struct rb_node ** p,bool leftmost)344 static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
345 struct extent_tree *et, struct extent_info *ei,
346 struct rb_node *parent, struct rb_node **p,
347 bool leftmost)
348 {
349 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
350 struct extent_node *en;
351
352 en = kmem_cache_alloc(extent_node_slab, GFP_ATOMIC);
353 if (!en)
354 return NULL;
355
356 en->ei = *ei;
357 INIT_LIST_HEAD(&en->list);
358 en->et = et;
359
360 rb_link_node(&en->rb_node, parent, p);
361 rb_insert_color_cached(&en->rb_node, &et->root, leftmost);
362 atomic_inc(&et->node_cnt);
363 atomic_inc(&eti->total_ext_node);
364 return en;
365 }
366
__detach_extent_node(struct f2fs_sb_info * sbi,struct extent_tree * et,struct extent_node * en)367 static void __detach_extent_node(struct f2fs_sb_info *sbi,
368 struct extent_tree *et, struct extent_node *en)
369 {
370 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
371
372 rb_erase_cached(&en->rb_node, &et->root);
373 atomic_dec(&et->node_cnt);
374 atomic_dec(&eti->total_ext_node);
375
376 if (et->cached_en == en)
377 et->cached_en = NULL;
378 kmem_cache_free(extent_node_slab, en);
379 }
380
381 /*
382 * Flow to release an extent_node:
383 * 1. list_del_init
384 * 2. __detach_extent_node
385 * 3. kmem_cache_free.
386 */
__release_extent_node(struct f2fs_sb_info * sbi,struct extent_tree * et,struct extent_node * en)387 static void __release_extent_node(struct f2fs_sb_info *sbi,
388 struct extent_tree *et, struct extent_node *en)
389 {
390 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
391
392 spin_lock(&eti->extent_lock);
393 f2fs_bug_on(sbi, list_empty(&en->list));
394 list_del_init(&en->list);
395 spin_unlock(&eti->extent_lock);
396
397 __detach_extent_node(sbi, et, en);
398 }
399
__grab_extent_tree(struct inode * inode,enum extent_type type)400 static struct extent_tree *__grab_extent_tree(struct inode *inode,
401 enum extent_type type)
402 {
403 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
404 struct extent_tree_info *eti = &sbi->extent_tree[type];
405 struct extent_tree *et;
406 nid_t ino = inode->i_ino;
407
408 mutex_lock(&eti->extent_tree_lock);
409 et = radix_tree_lookup(&eti->extent_tree_root, ino);
410 if (!et) {
411 et = f2fs_kmem_cache_alloc(extent_tree_slab, GFP_NOFS);
412 f2fs_radix_tree_insert(&eti->extent_tree_root, ino, et);
413 memset(et, 0, sizeof(struct extent_tree));
414 et->ino = ino;
415 et->type = type;
416 et->root = RB_ROOT_CACHED;
417 et->cached_en = NULL;
418 rwlock_init(&et->lock);
419 INIT_LIST_HEAD(&et->list);
420 atomic_set(&et->node_cnt, 0);
421 atomic_inc(&eti->total_ext_tree);
422 } else {
423 atomic_dec(&eti->total_zombie_tree);
424 list_del_init(&et->list);
425 }
426 mutex_unlock(&eti->extent_tree_lock);
427
428 /* never died until evict_inode */
429 F2FS_I(inode)->extent_tree[type] = et;
430
431 return et;
432 }
433
__free_extent_tree(struct f2fs_sb_info * sbi,struct extent_tree * et)434 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
435 struct extent_tree *et)
436 {
437 struct rb_node *node, *next;
438 struct extent_node *en;
439 unsigned int count = atomic_read(&et->node_cnt);
440
441 node = rb_first_cached(&et->root);
442 while (node) {
443 next = rb_next(node);
444 en = rb_entry(node, struct extent_node, rb_node);
445 __release_extent_node(sbi, et, en);
446 node = next;
447 }
448
449 return count - atomic_read(&et->node_cnt);
450 }
451
__drop_largest_extent(struct extent_tree * et,pgoff_t fofs,unsigned int len)452 static void __drop_largest_extent(struct extent_tree *et,
453 pgoff_t fofs, unsigned int len)
454 {
455 if (fofs < et->largest.fofs + et->largest.len &&
456 fofs + len > et->largest.fofs) {
457 et->largest.len = 0;
458 et->largest_updated = true;
459 }
460 }
461
f2fs_init_read_extent_tree(struct inode * inode,struct page * ipage)462 void f2fs_init_read_extent_tree(struct inode *inode, struct page *ipage)
463 {
464 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
465 struct extent_tree_info *eti = &sbi->extent_tree[EX_READ];
466 struct f2fs_extent *i_ext = &F2FS_INODE(ipage)->i_ext;
467 struct extent_tree *et;
468 struct extent_node *en;
469 struct extent_info ei;
470
471 if (!__may_extent_tree(inode, EX_READ)) {
472 /* drop largest read extent */
473 if (i_ext && i_ext->len) {
474 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
475 i_ext->len = 0;
476 set_page_dirty(ipage);
477 }
478 goto out;
479 }
480
481 et = __grab_extent_tree(inode, EX_READ);
482
483 if (!i_ext || !i_ext->len)
484 goto out;
485
486 get_read_extent_info(&ei, i_ext);
487
488 write_lock(&et->lock);
489 if (atomic_read(&et->node_cnt))
490 goto unlock_out;
491
492 en = __attach_extent_node(sbi, et, &ei, NULL,
493 &et->root.rb_root.rb_node, true);
494 if (en) {
495 et->largest = en->ei;
496 et->cached_en = en;
497
498 spin_lock(&eti->extent_lock);
499 list_add_tail(&en->list, &eti->extent_list);
500 spin_unlock(&eti->extent_lock);
501 }
502 unlock_out:
503 write_unlock(&et->lock);
504 out:
505 if (!F2FS_I(inode)->extent_tree[EX_READ])
506 set_inode_flag(inode, FI_NO_EXTENT);
507 }
508
f2fs_init_age_extent_tree(struct inode * inode)509 void f2fs_init_age_extent_tree(struct inode *inode)
510 {
511 if (!__init_may_extent_tree(inode, EX_BLOCK_AGE))
512 return;
513 __grab_extent_tree(inode, EX_BLOCK_AGE);
514 }
515
f2fs_init_extent_tree(struct inode * inode)516 void f2fs_init_extent_tree(struct inode *inode)
517 {
518 /* initialize read cache */
519 if (__init_may_extent_tree(inode, EX_READ))
520 __grab_extent_tree(inode, EX_READ);
521
522 /* initialize block age cache */
523 if (__init_may_extent_tree(inode, EX_BLOCK_AGE))
524 __grab_extent_tree(inode, EX_BLOCK_AGE);
525 }
526
__lookup_extent_tree(struct inode * inode,pgoff_t pgofs,struct extent_info * ei,enum extent_type type)527 static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
528 struct extent_info *ei, enum extent_type type)
529 {
530 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
531 struct extent_tree_info *eti = &sbi->extent_tree[type];
532 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
533 struct extent_node *en;
534 bool ret = false;
535
536 if (!et)
537 return false;
538
539 trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
540
541 read_lock(&et->lock);
542
543 if (type == EX_READ &&
544 et->largest.fofs <= pgofs &&
545 et->largest.fofs + et->largest.len > pgofs) {
546 *ei = et->largest;
547 ret = true;
548 stat_inc_largest_node_hit(sbi);
549 goto out;
550 }
551
552 en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root,
553 (struct rb_entry *)et->cached_en, pgofs);
554 if (!en)
555 goto out;
556
557 if (en == et->cached_en)
558 stat_inc_cached_node_hit(sbi, type);
559 else
560 stat_inc_rbtree_node_hit(sbi, type);
561
562 *ei = en->ei;
563 spin_lock(&eti->extent_lock);
564 if (!list_empty(&en->list)) {
565 list_move_tail(&en->list, &eti->extent_list);
566 et->cached_en = en;
567 }
568 spin_unlock(&eti->extent_lock);
569 ret = true;
570 out:
571 stat_inc_total_hit(sbi, type);
572 read_unlock(&et->lock);
573
574 if (type == EX_READ)
575 trace_f2fs_lookup_read_extent_tree_end(inode, pgofs, ei);
576 else if (type == EX_BLOCK_AGE)
577 trace_f2fs_lookup_age_extent_tree_end(inode, pgofs, ei);
578 return ret;
579 }
580
__try_merge_extent_node(struct f2fs_sb_info * sbi,struct extent_tree * et,struct extent_info * ei,struct extent_node * prev_ex,struct extent_node * next_ex)581 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
582 struct extent_tree *et, struct extent_info *ei,
583 struct extent_node *prev_ex,
584 struct extent_node *next_ex)
585 {
586 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
587 struct extent_node *en = NULL;
588
589 if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei, et->type)) {
590 prev_ex->ei.len += ei->len;
591 ei = &prev_ex->ei;
592 en = prev_ex;
593 }
594
595 if (next_ex && __is_front_mergeable(ei, &next_ex->ei, et->type)) {
596 next_ex->ei.fofs = ei->fofs;
597 next_ex->ei.len += ei->len;
598 if (et->type == EX_READ)
599 next_ex->ei.blk = ei->blk;
600 if (en)
601 __release_extent_node(sbi, et, prev_ex);
602
603 en = next_ex;
604 }
605
606 if (!en)
607 return NULL;
608
609 __try_update_largest_extent(et, en);
610
611 spin_lock(&eti->extent_lock);
612 if (!list_empty(&en->list)) {
613 list_move_tail(&en->list, &eti->extent_list);
614 et->cached_en = en;
615 }
616 spin_unlock(&eti->extent_lock);
617 return en;
618 }
619
__insert_extent_tree(struct f2fs_sb_info * sbi,struct extent_tree * et,struct extent_info * ei,struct rb_node ** insert_p,struct rb_node * insert_parent,bool leftmost)620 static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
621 struct extent_tree *et, struct extent_info *ei,
622 struct rb_node **insert_p,
623 struct rb_node *insert_parent,
624 bool leftmost)
625 {
626 struct extent_tree_info *eti = &sbi->extent_tree[et->type];
627 struct rb_node **p;
628 struct rb_node *parent = NULL;
629 struct extent_node *en = NULL;
630
631 if (insert_p && insert_parent) {
632 parent = insert_parent;
633 p = insert_p;
634 goto do_insert;
635 }
636
637 leftmost = true;
638
639 p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent,
640 ei->fofs, &leftmost);
641 do_insert:
642 en = __attach_extent_node(sbi, et, ei, parent, p, leftmost);
643 if (!en)
644 return NULL;
645
646 __try_update_largest_extent(et, en);
647
648 /* update in global extent list */
649 spin_lock(&eti->extent_lock);
650 list_add_tail(&en->list, &eti->extent_list);
651 et->cached_en = en;
652 spin_unlock(&eti->extent_lock);
653 return en;
654 }
655
__update_extent_tree_range(struct inode * inode,struct extent_info * tei,enum extent_type type)656 static void __update_extent_tree_range(struct inode *inode,
657 struct extent_info *tei, enum extent_type type)
658 {
659 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
660 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
661 struct extent_node *en = NULL, *en1 = NULL;
662 struct extent_node *prev_en = NULL, *next_en = NULL;
663 struct extent_info ei, dei, prev;
664 struct rb_node **insert_p = NULL, *insert_parent = NULL;
665 unsigned int fofs = tei->fofs, len = tei->len;
666 unsigned int end = fofs + len;
667 bool updated = false;
668 bool leftmost = false;
669
670 if (!et)
671 return;
672
673 if (type == EX_READ)
674 trace_f2fs_update_read_extent_tree_range(inode, fofs, len,
675 tei->blk, 0);
676 else if (type == EX_BLOCK_AGE)
677 trace_f2fs_update_age_extent_tree_range(inode, fofs, len,
678 tei->age, tei->last_blocks);
679
680 write_lock(&et->lock);
681
682 if (type == EX_READ) {
683 if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
684 write_unlock(&et->lock);
685 return;
686 }
687
688 prev = et->largest;
689 dei.len = 0;
690
691 /*
692 * drop largest extent before lookup, in case it's already
693 * been shrunk from extent tree
694 */
695 __drop_largest_extent(et, fofs, len);
696 }
697
698 /* 1. lookup first extent node in range [fofs, fofs + len - 1] */
699 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
700 (struct rb_entry *)et->cached_en, fofs,
701 (struct rb_entry **)&prev_en,
702 (struct rb_entry **)&next_en,
703 &insert_p, &insert_parent, false,
704 &leftmost);
705 if (!en)
706 en = next_en;
707
708 /* 2. invlidate all extent nodes in range [fofs, fofs + len - 1] */
709 while (en && en->ei.fofs < end) {
710 unsigned int org_end;
711 int parts = 0; /* # of parts current extent split into */
712
713 next_en = en1 = NULL;
714
715 dei = en->ei;
716 org_end = dei.fofs + dei.len;
717 f2fs_bug_on(sbi, fofs >= org_end);
718
719 if (fofs > dei.fofs && (type != EX_READ ||
720 fofs - dei.fofs >= F2FS_MIN_EXTENT_LEN)) {
721 en->ei.len = fofs - en->ei.fofs;
722 prev_en = en;
723 parts = 1;
724 }
725
726 if (end < org_end && (type != EX_READ ||
727 org_end - end >= F2FS_MIN_EXTENT_LEN)) {
728 if (parts) {
729 __set_extent_info(&ei,
730 end, org_end - end,
731 end - dei.fofs + dei.blk, false,
732 dei.age, dei.last_blocks,
733 type);
734 en1 = __insert_extent_tree(sbi, et, &ei,
735 NULL, NULL, true);
736 next_en = en1;
737 } else {
738 __set_extent_info(&en->ei,
739 end, en->ei.len - (end - dei.fofs),
740 en->ei.blk + (end - dei.fofs), true,
741 dei.age, dei.last_blocks,
742 type);
743 next_en = en;
744 }
745 parts++;
746 }
747
748 if (!next_en) {
749 struct rb_node *node = rb_next(&en->rb_node);
750
751 next_en = rb_entry_safe(node, struct extent_node,
752 rb_node);
753 }
754
755 if (parts)
756 __try_update_largest_extent(et, en);
757 else
758 __release_extent_node(sbi, et, en);
759
760 /*
761 * if original extent is split into zero or two parts, extent
762 * tree has been altered by deletion or insertion, therefore
763 * invalidate pointers regard to tree.
764 */
765 if (parts != 1) {
766 insert_p = NULL;
767 insert_parent = NULL;
768 }
769 en = next_en;
770 }
771
772 if (type == EX_BLOCK_AGE)
773 goto update_age_extent_cache;
774
775 /* 3. update extent in read extent cache */
776 BUG_ON(type != EX_READ);
777
778 if (tei->blk) {
779 __set_extent_info(&ei, fofs, len, tei->blk, false,
780 0, 0, EX_READ);
781 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
782 __insert_extent_tree(sbi, et, &ei,
783 insert_p, insert_parent, leftmost);
784
785 /* give up extent_cache, if split and small updates happen */
786 if (dei.len >= 1 &&
787 prev.len < F2FS_MIN_EXTENT_LEN &&
788 et->largest.len < F2FS_MIN_EXTENT_LEN) {
789 et->largest.len = 0;
790 et->largest_updated = true;
791 set_inode_flag(inode, FI_NO_EXTENT);
792 }
793 }
794
795 if (is_inode_flag_set(inode, FI_NO_EXTENT))
796 __free_extent_tree(sbi, et);
797
798 if (et->largest_updated) {
799 et->largest_updated = false;
800 updated = true;
801 }
802 goto out_read_extent_cache;
803 update_age_extent_cache:
804 if (!tei->last_blocks)
805 goto out_read_extent_cache;
806
807 __set_extent_info(&ei, fofs, len, 0, false,
808 tei->age, tei->last_blocks, EX_BLOCK_AGE);
809 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
810 __insert_extent_tree(sbi, et, &ei,
811 insert_p, insert_parent, leftmost);
812 out_read_extent_cache:
813 write_unlock(&et->lock);
814
815 if (updated)
816 f2fs_mark_inode_dirty_sync(inode, true);
817 }
818
819 #ifdef CONFIG_F2FS_FS_COMPRESSION
f2fs_update_read_extent_tree_range_compressed(struct inode * inode,pgoff_t fofs,block_t blkaddr,unsigned int llen,unsigned int c_len)820 void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
821 pgoff_t fofs, block_t blkaddr, unsigned int llen,
822 unsigned int c_len)
823 {
824 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
825 struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
826 struct extent_node *en = NULL;
827 struct extent_node *prev_en = NULL, *next_en = NULL;
828 struct extent_info ei;
829 struct rb_node **insert_p = NULL, *insert_parent = NULL;
830 bool leftmost = false;
831
832 trace_f2fs_update_read_extent_tree_range(inode, fofs, llen,
833 blkaddr, c_len);
834
835 /* it is safe here to check FI_NO_EXTENT w/o et->lock in ro image */
836 if (is_inode_flag_set(inode, FI_NO_EXTENT))
837 return;
838
839 write_lock(&et->lock);
840
841 en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root,
842 (struct rb_entry *)et->cached_en, fofs,
843 (struct rb_entry **)&prev_en,
844 (struct rb_entry **)&next_en,
845 &insert_p, &insert_parent, false,
846 &leftmost);
847 if (en)
848 goto unlock_out;
849
850 __set_extent_info(&ei, fofs, llen, blkaddr, true, 0, 0, EX_READ);
851 ei.c_len = c_len;
852
853 if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
854 __insert_extent_tree(sbi, et, &ei,
855 insert_p, insert_parent, leftmost);
856 unlock_out:
857 write_unlock(&et->lock);
858 }
859 #endif
860
__calculate_block_age(struct f2fs_sb_info * sbi,unsigned long long new,unsigned long long old)861 static unsigned long long __calculate_block_age(struct f2fs_sb_info *sbi,
862 unsigned long long new,
863 unsigned long long old)
864 {
865 unsigned int rem_old, rem_new;
866 unsigned long long res;
867 unsigned int weight = sbi->last_age_weight;
868
869 res = div_u64_rem(new, 100, &rem_new) * (100 - weight)
870 + div_u64_rem(old, 100, &rem_old) * weight;
871
872 if (rem_new)
873 res += rem_new * (100 - weight) / 100;
874 if (rem_old)
875 res += rem_old * weight / 100;
876
877 return res;
878 }
879
880 /* This returns a new age and allocated blocks in ei */
__get_new_block_age(struct inode * inode,struct extent_info * ei,block_t blkaddr)881 static int __get_new_block_age(struct inode *inode, struct extent_info *ei,
882 block_t blkaddr)
883 {
884 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
885 loff_t f_size = i_size_read(inode);
886 unsigned long long cur_blocks =
887 atomic64_read(&sbi->allocated_data_blocks);
888 struct extent_info tei = *ei; /* only fofs and len are valid */
889
890 /*
891 * When I/O is not aligned to a PAGE_SIZE, update will happen to the last
892 * file block even in seq write. So don't record age for newly last file
893 * block here.
894 */
895 if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) &&
896 blkaddr == NEW_ADDR)
897 return -EINVAL;
898
899 if (__lookup_extent_tree(inode, ei->fofs, &tei, EX_BLOCK_AGE)) {
900 unsigned long long cur_age;
901
902 if (cur_blocks >= tei.last_blocks)
903 cur_age = cur_blocks - tei.last_blocks;
904 else
905 /* allocated_data_blocks overflow */
906 cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks;
907
908 if (tei.age)
909 ei->age = __calculate_block_age(sbi, cur_age, tei.age);
910 else
911 ei->age = cur_age;
912 ei->last_blocks = cur_blocks;
913 WARN_ON(ei->age > cur_blocks);
914 return 0;
915 }
916
917 f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
918
919 /* the data block was allocated for the first time */
920 if (blkaddr == NEW_ADDR)
921 goto out;
922
923 if (__is_valid_data_blkaddr(blkaddr) &&
924 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
925 f2fs_bug_on(sbi, 1);
926 return -EINVAL;
927 }
928 out:
929 /*
930 * init block age with zero, this can happen when the block age extent
931 * was reclaimed due to memory constraint or system reboot
932 */
933 ei->age = 0;
934 ei->last_blocks = cur_blocks;
935 return 0;
936 }
937
__update_extent_cache(struct dnode_of_data * dn,enum extent_type type)938 static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
939 {
940 struct extent_info ei = {};
941
942 if (!__may_extent_tree(dn->inode, type))
943 return;
944
945 ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
946 dn->ofs_in_node;
947 ei.len = 1;
948
949 if (type == EX_READ) {
950 if (dn->data_blkaddr == NEW_ADDR)
951 ei.blk = NULL_ADDR;
952 else
953 ei.blk = dn->data_blkaddr;
954 } else if (type == EX_BLOCK_AGE) {
955 if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr))
956 return;
957 }
958 __update_extent_tree_range(dn->inode, &ei, type);
959 }
960
__shrink_extent_tree(struct f2fs_sb_info * sbi,int nr_shrink,enum extent_type type)961 static unsigned int __shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink,
962 enum extent_type type)
963 {
964 struct extent_tree_info *eti = &sbi->extent_tree[type];
965 struct extent_tree *et, *next;
966 struct extent_node *en;
967 unsigned int node_cnt = 0, tree_cnt = 0;
968 int remained;
969
970 if (!atomic_read(&eti->total_zombie_tree))
971 goto free_node;
972
973 if (!mutex_trylock(&eti->extent_tree_lock))
974 goto out;
975
976 /* 1. remove unreferenced extent tree */
977 list_for_each_entry_safe(et, next, &eti->zombie_list, list) {
978 if (atomic_read(&et->node_cnt)) {
979 write_lock(&et->lock);
980 node_cnt += __free_extent_tree(sbi, et);
981 write_unlock(&et->lock);
982 }
983 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
984 list_del_init(&et->list);
985 radix_tree_delete(&eti->extent_tree_root, et->ino);
986 kmem_cache_free(extent_tree_slab, et);
987 atomic_dec(&eti->total_ext_tree);
988 atomic_dec(&eti->total_zombie_tree);
989 tree_cnt++;
990
991 if (node_cnt + tree_cnt >= nr_shrink)
992 goto unlock_out;
993 cond_resched();
994 }
995 mutex_unlock(&eti->extent_tree_lock);
996
997 free_node:
998 /* 2. remove LRU extent entries */
999 if (!mutex_trylock(&eti->extent_tree_lock))
1000 goto out;
1001
1002 remained = nr_shrink - (node_cnt + tree_cnt);
1003
1004 spin_lock(&eti->extent_lock);
1005 for (; remained > 0; remained--) {
1006 if (list_empty(&eti->extent_list))
1007 break;
1008 en = list_first_entry(&eti->extent_list,
1009 struct extent_node, list);
1010 et = en->et;
1011 if (!write_trylock(&et->lock)) {
1012 /* refresh this extent node's position in extent list */
1013 list_move_tail(&en->list, &eti->extent_list);
1014 continue;
1015 }
1016
1017 list_del_init(&en->list);
1018 spin_unlock(&eti->extent_lock);
1019
1020 __detach_extent_node(sbi, et, en);
1021
1022 write_unlock(&et->lock);
1023 node_cnt++;
1024 spin_lock(&eti->extent_lock);
1025 }
1026 spin_unlock(&eti->extent_lock);
1027
1028 unlock_out:
1029 mutex_unlock(&eti->extent_tree_lock);
1030 out:
1031 trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt, type);
1032
1033 return node_cnt + tree_cnt;
1034 }
1035
1036 /* read extent cache operations */
f2fs_lookup_read_extent_cache(struct inode * inode,pgoff_t pgofs,struct extent_info * ei)1037 bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
1038 struct extent_info *ei)
1039 {
1040 if (!__may_extent_tree(inode, EX_READ))
1041 return false;
1042
1043 return __lookup_extent_tree(inode, pgofs, ei, EX_READ);
1044 }
1045
f2fs_update_read_extent_cache(struct dnode_of_data * dn)1046 void f2fs_update_read_extent_cache(struct dnode_of_data *dn)
1047 {
1048 return __update_extent_cache(dn, EX_READ);
1049 }
1050
f2fs_update_read_extent_cache_range(struct dnode_of_data * dn,pgoff_t fofs,block_t blkaddr,unsigned int len)1051 void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
1052 pgoff_t fofs, block_t blkaddr, unsigned int len)
1053 {
1054 struct extent_info ei = {
1055 .fofs = fofs,
1056 .len = len,
1057 .blk = blkaddr,
1058 };
1059
1060 if (!__may_extent_tree(dn->inode, EX_READ))
1061 return;
1062
1063 __update_extent_tree_range(dn->inode, &ei, EX_READ);
1064 }
1065
f2fs_shrink_read_extent_tree(struct f2fs_sb_info * sbi,int nr_shrink)1066 unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
1067 {
1068 if (!test_opt(sbi, READ_EXTENT_CACHE))
1069 return 0;
1070
1071 return __shrink_extent_tree(sbi, nr_shrink, EX_READ);
1072 }
1073
1074 /* block age extent cache operations */
f2fs_lookup_age_extent_cache(struct inode * inode,pgoff_t pgofs,struct extent_info * ei)1075 bool f2fs_lookup_age_extent_cache(struct inode *inode, pgoff_t pgofs,
1076 struct extent_info *ei)
1077 {
1078 if (!__may_extent_tree(inode, EX_BLOCK_AGE))
1079 return false;
1080
1081 return __lookup_extent_tree(inode, pgofs, ei, EX_BLOCK_AGE);
1082 }
1083
f2fs_update_age_extent_cache(struct dnode_of_data * dn)1084 void f2fs_update_age_extent_cache(struct dnode_of_data *dn)
1085 {
1086 return __update_extent_cache(dn, EX_BLOCK_AGE);
1087 }
1088
f2fs_update_age_extent_cache_range(struct dnode_of_data * dn,pgoff_t fofs,unsigned int len)1089 void f2fs_update_age_extent_cache_range(struct dnode_of_data *dn,
1090 pgoff_t fofs, unsigned int len)
1091 {
1092 struct extent_info ei = {
1093 .fofs = fofs,
1094 .len = len,
1095 };
1096
1097 if (!__may_extent_tree(dn->inode, EX_BLOCK_AGE))
1098 return;
1099
1100 __update_extent_tree_range(dn->inode, &ei, EX_BLOCK_AGE);
1101 }
1102
f2fs_shrink_age_extent_tree(struct f2fs_sb_info * sbi,int nr_shrink)1103 unsigned int f2fs_shrink_age_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
1104 {
1105 if (!test_opt(sbi, AGE_EXTENT_CACHE))
1106 return 0;
1107
1108 return __shrink_extent_tree(sbi, nr_shrink, EX_BLOCK_AGE);
1109 }
1110
__destroy_extent_node(struct inode * inode,enum extent_type type)1111 static unsigned int __destroy_extent_node(struct inode *inode,
1112 enum extent_type type)
1113 {
1114 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1115 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
1116 unsigned int node_cnt = 0;
1117
1118 if (!et || !atomic_read(&et->node_cnt))
1119 return 0;
1120
1121 write_lock(&et->lock);
1122 node_cnt = __free_extent_tree(sbi, et);
1123 write_unlock(&et->lock);
1124
1125 return node_cnt;
1126 }
1127
f2fs_destroy_extent_node(struct inode * inode)1128 void f2fs_destroy_extent_node(struct inode *inode)
1129 {
1130 __destroy_extent_node(inode, EX_READ);
1131 __destroy_extent_node(inode, EX_BLOCK_AGE);
1132 }
1133
__drop_extent_tree(struct inode * inode,enum extent_type type)1134 static void __drop_extent_tree(struct inode *inode, enum extent_type type)
1135 {
1136 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1137 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
1138 bool updated = false;
1139
1140 if (!__may_extent_tree(inode, type))
1141 return;
1142
1143 write_lock(&et->lock);
1144 set_inode_flag(inode, FI_NO_EXTENT);
1145 __free_extent_tree(sbi, et);
1146 if (type == EX_READ) {
1147 set_inode_flag(inode, FI_NO_EXTENT);
1148 if (et->largest.len) {
1149 et->largest.len = 0;
1150 updated = true;
1151 }
1152 }
1153 write_unlock(&et->lock);
1154 if (updated)
1155 f2fs_mark_inode_dirty_sync(inode, true);
1156 }
1157
f2fs_drop_extent_tree(struct inode * inode)1158 void f2fs_drop_extent_tree(struct inode *inode)
1159 {
1160 __drop_extent_tree(inode, EX_READ);
1161 __drop_extent_tree(inode, EX_BLOCK_AGE);
1162 }
1163
__destroy_extent_tree(struct inode * inode,enum extent_type type)1164 static void __destroy_extent_tree(struct inode *inode, enum extent_type type)
1165 {
1166 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1167 struct extent_tree_info *eti = &sbi->extent_tree[type];
1168 struct extent_tree *et = F2FS_I(inode)->extent_tree[type];
1169 unsigned int node_cnt = 0;
1170
1171 if (!et)
1172 return;
1173
1174 if (inode->i_nlink && !is_bad_inode(inode) &&
1175 atomic_read(&et->node_cnt)) {
1176 mutex_lock(&eti->extent_tree_lock);
1177 list_add_tail(&et->list, &eti->zombie_list);
1178 atomic_inc(&eti->total_zombie_tree);
1179 mutex_unlock(&eti->extent_tree_lock);
1180 return;
1181 }
1182
1183 /* free all extent info belong to this extent tree */
1184 node_cnt = __destroy_extent_node(inode, type);
1185
1186 /* delete extent tree entry in radix tree */
1187 mutex_lock(&eti->extent_tree_lock);
1188 f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
1189 radix_tree_delete(&eti->extent_tree_root, inode->i_ino);
1190 kmem_cache_free(extent_tree_slab, et);
1191 atomic_dec(&eti->total_ext_tree);
1192 mutex_unlock(&eti->extent_tree_lock);
1193
1194 F2FS_I(inode)->extent_tree[type] = NULL;
1195
1196 trace_f2fs_destroy_extent_tree(inode, node_cnt, type);
1197 }
1198
f2fs_destroy_extent_tree(struct inode * inode)1199 void f2fs_destroy_extent_tree(struct inode *inode)
1200 {
1201 __destroy_extent_tree(inode, EX_READ);
1202 __destroy_extent_tree(inode, EX_BLOCK_AGE);
1203 }
1204
__init_extent_tree_info(struct extent_tree_info * eti)1205 static void __init_extent_tree_info(struct extent_tree_info *eti)
1206 {
1207 INIT_RADIX_TREE(&eti->extent_tree_root, GFP_NOIO);
1208 mutex_init(&eti->extent_tree_lock);
1209 INIT_LIST_HEAD(&eti->extent_list);
1210 spin_lock_init(&eti->extent_lock);
1211 atomic_set(&eti->total_ext_tree, 0);
1212 INIT_LIST_HEAD(&eti->zombie_list);
1213 atomic_set(&eti->total_zombie_tree, 0);
1214 atomic_set(&eti->total_ext_node, 0);
1215 }
1216
f2fs_init_extent_cache_info(struct f2fs_sb_info * sbi)1217 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi)
1218 {
1219 __init_extent_tree_info(&sbi->extent_tree[EX_READ]);
1220 __init_extent_tree_info(&sbi->extent_tree[EX_BLOCK_AGE]);
1221
1222 /* initialize for block age extents */
1223 atomic64_set(&sbi->allocated_data_blocks, 0);
1224 sbi->hot_data_age_threshold = DEF_HOT_DATA_AGE_THRESHOLD;
1225 sbi->warm_data_age_threshold = DEF_WARM_DATA_AGE_THRESHOLD;
1226 sbi->last_age_weight = LAST_AGE_WEIGHT;
1227 }
1228
f2fs_create_extent_cache(void)1229 int __init f2fs_create_extent_cache(void)
1230 {
1231 extent_tree_slab = f2fs_kmem_cache_create("f2fs_extent_tree",
1232 sizeof(struct extent_tree));
1233 if (!extent_tree_slab)
1234 return -ENOMEM;
1235 extent_node_slab = f2fs_kmem_cache_create("f2fs_extent_node",
1236 sizeof(struct extent_node));
1237 if (!extent_node_slab) {
1238 kmem_cache_destroy(extent_tree_slab);
1239 return -ENOMEM;
1240 }
1241 return 0;
1242 }
1243
f2fs_destroy_extent_cache(void)1244 void f2fs_destroy_extent_cache(void)
1245 {
1246 kmem_cache_destroy(extent_node_slab);
1247 kmem_cache_destroy(extent_tree_slab);
1248 }
1249