• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/node.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/backing-dev.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15 
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "iostat.h"
21 #include <trace/events/f2fs.h>
22 
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24 
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29 
30 /*
31  * Check whether the given nid is within node id range.
32  */
f2fs_check_nid_range(struct f2fs_sb_info * sbi,nid_t nid)33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34 {
35 	if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36 		set_sbi_flag(sbi, SBI_NEED_FSCK);
37 		f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38 			  __func__, nid);
39 		return -EFSCORRUPTED;
40 	}
41 	return 0;
42 }
43 
f2fs_available_free_memory(struct f2fs_sb_info * sbi,int type)44 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
45 {
46 	struct f2fs_nm_info *nm_i = NM_I(sbi);
47 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
48 	struct sysinfo val;
49 	unsigned long avail_ram;
50 	unsigned long mem_size = 0;
51 	bool res = false;
52 
53 	if (!nm_i)
54 		return true;
55 
56 	si_meminfo(&val);
57 
58 	/* only uses low memory */
59 	avail_ram = val.totalram - val.totalhigh;
60 
61 	/*
62 	 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
63 	 */
64 	if (type == FREE_NIDS) {
65 		mem_size = (nm_i->nid_cnt[FREE_NID] *
66 				sizeof(struct free_nid)) >> PAGE_SHIFT;
67 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
68 	} else if (type == NAT_ENTRIES) {
69 		mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
70 				sizeof(struct nat_entry)) >> PAGE_SHIFT;
71 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
72 		if (excess_cached_nats(sbi))
73 			res = false;
74 	} else if (type == DIRTY_DENTS) {
75 		if (sbi->sb->s_bdi->wb.dirty_exceeded)
76 			return false;
77 		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
78 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
79 	} else if (type == INO_ENTRIES) {
80 		int i;
81 
82 		for (i = 0; i < MAX_INO_ENTRY; i++)
83 			mem_size += sbi->im[i].ino_num *
84 						sizeof(struct ino_entry);
85 		mem_size >>= PAGE_SHIFT;
86 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
87 	} else if (type == EXTENT_CACHE) {
88 		mem_size = (atomic_read(&sbi->total_ext_tree) *
89 				sizeof(struct extent_tree) +
90 				atomic_read(&sbi->total_ext_node) *
91 				sizeof(struct extent_node)) >> PAGE_SHIFT;
92 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
93 	} else if (type == INMEM_PAGES) {
94 		/* it allows 20% / total_ram for inmemory pages */
95 		mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
96 		res = mem_size < (val.totalram / 5);
97 	} else if (type == DISCARD_CACHE) {
98 		mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
99 				sizeof(struct discard_cmd)) >> PAGE_SHIFT;
100 		res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
101 	} else if (type == COMPRESS_PAGE) {
102 #ifdef CONFIG_F2FS_FS_COMPRESSION
103 		unsigned long free_ram = val.freeram;
104 
105 		/*
106 		 * free memory is lower than watermark or cached page count
107 		 * exceed threshold, deny caching compress page.
108 		 */
109 		res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
110 			(COMPRESS_MAPPING(sbi)->nrpages <
111 			 free_ram * sbi->compress_percent / 100);
112 #else
113 		res = false;
114 #endif
115 	} else {
116 		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
117 			return true;
118 	}
119 	return res;
120 }
121 
clear_node_page_dirty(struct page * page)122 static void clear_node_page_dirty(struct page *page)
123 {
124 	if (PageDirty(page)) {
125 		f2fs_clear_page_cache_dirty_tag(page);
126 		clear_page_dirty_for_io(page);
127 		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
128 	}
129 	ClearPageUptodate(page);
130 }
131 
get_current_nat_page(struct f2fs_sb_info * sbi,nid_t nid)132 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
133 {
134 	return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
135 }
136 
get_next_nat_page(struct f2fs_sb_info * sbi,nid_t nid)137 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
138 {
139 	struct page *src_page;
140 	struct page *dst_page;
141 	pgoff_t dst_off;
142 	void *src_addr;
143 	void *dst_addr;
144 	struct f2fs_nm_info *nm_i = NM_I(sbi);
145 
146 	dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
147 
148 	/* get current nat block page with lock */
149 	src_page = get_current_nat_page(sbi, nid);
150 	if (IS_ERR(src_page))
151 		return src_page;
152 	dst_page = f2fs_grab_meta_page(sbi, dst_off);
153 	f2fs_bug_on(sbi, PageDirty(src_page));
154 
155 	src_addr = page_address(src_page);
156 	dst_addr = page_address(dst_page);
157 	memcpy(dst_addr, src_addr, PAGE_SIZE);
158 	set_page_dirty(dst_page);
159 	f2fs_put_page(src_page, 1);
160 
161 	set_to_next_nat(nm_i, nid);
162 
163 	return dst_page;
164 }
165 
__alloc_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,bool no_fail)166 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
167 						nid_t nid, bool no_fail)
168 {
169 	struct nat_entry *new;
170 
171 	new = f2fs_kmem_cache_alloc(nat_entry_slab,
172 					GFP_F2FS_ZERO, no_fail, sbi);
173 	if (new) {
174 		nat_set_nid(new, nid);
175 		nat_reset_flag(new);
176 	}
177 	return new;
178 }
179 
__free_nat_entry(struct nat_entry * e)180 static void __free_nat_entry(struct nat_entry *e)
181 {
182 	kmem_cache_free(nat_entry_slab, e);
183 }
184 
185 /* must be locked by nat_tree_lock */
__init_nat_entry(struct f2fs_nm_info * nm_i,struct nat_entry * ne,struct f2fs_nat_entry * raw_ne,bool no_fail)186 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
187 	struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
188 {
189 	if (no_fail)
190 		f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
191 	else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
192 		return NULL;
193 
194 	if (raw_ne)
195 		node_info_from_raw_nat(&ne->ni, raw_ne);
196 
197 	spin_lock(&nm_i->nat_list_lock);
198 	list_add_tail(&ne->list, &nm_i->nat_entries);
199 	spin_unlock(&nm_i->nat_list_lock);
200 
201 	nm_i->nat_cnt[TOTAL_NAT]++;
202 	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
203 	return ne;
204 }
205 
__lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t n)206 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
207 {
208 	struct nat_entry *ne;
209 
210 	ne = radix_tree_lookup(&nm_i->nat_root, n);
211 
212 	/* for recent accessed nat entry, move it to tail of lru list */
213 	if (ne && !get_nat_flag(ne, IS_DIRTY)) {
214 		spin_lock(&nm_i->nat_list_lock);
215 		if (!list_empty(&ne->list))
216 			list_move_tail(&ne->list, &nm_i->nat_entries);
217 		spin_unlock(&nm_i->nat_list_lock);
218 	}
219 
220 	return ne;
221 }
222 
__gang_lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry ** ep)223 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
224 		nid_t start, unsigned int nr, struct nat_entry **ep)
225 {
226 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
227 }
228 
__del_from_nat_cache(struct f2fs_nm_info * nm_i,struct nat_entry * e)229 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
230 {
231 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
232 	nm_i->nat_cnt[TOTAL_NAT]--;
233 	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
234 	__free_nat_entry(e);
235 }
236 
__grab_nat_entry_set(struct f2fs_nm_info * nm_i,struct nat_entry * ne)237 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
238 							struct nat_entry *ne)
239 {
240 	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
241 	struct nat_entry_set *head;
242 
243 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
244 	if (!head) {
245 		head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
246 						GFP_NOFS, true, NULL);
247 
248 		INIT_LIST_HEAD(&head->entry_list);
249 		INIT_LIST_HEAD(&head->set_list);
250 		head->set = set;
251 		head->entry_cnt = 0;
252 		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
253 	}
254 	return head;
255 }
256 
__set_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry * ne)257 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
258 						struct nat_entry *ne)
259 {
260 	struct nat_entry_set *head;
261 	bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
262 
263 	if (!new_ne)
264 		head = __grab_nat_entry_set(nm_i, ne);
265 
266 	/*
267 	 * update entry_cnt in below condition:
268 	 * 1. update NEW_ADDR to valid block address;
269 	 * 2. update old block address to new one;
270 	 */
271 	if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
272 				!get_nat_flag(ne, IS_DIRTY)))
273 		head->entry_cnt++;
274 
275 	set_nat_flag(ne, IS_PREALLOC, new_ne);
276 
277 	if (get_nat_flag(ne, IS_DIRTY))
278 		goto refresh_list;
279 
280 	nm_i->nat_cnt[DIRTY_NAT]++;
281 	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
282 	set_nat_flag(ne, IS_DIRTY, true);
283 refresh_list:
284 	spin_lock(&nm_i->nat_list_lock);
285 	if (new_ne)
286 		list_del_init(&ne->list);
287 	else
288 		list_move_tail(&ne->list, &head->entry_list);
289 	spin_unlock(&nm_i->nat_list_lock);
290 }
291 
__clear_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry_set * set,struct nat_entry * ne)292 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
293 		struct nat_entry_set *set, struct nat_entry *ne)
294 {
295 	spin_lock(&nm_i->nat_list_lock);
296 	list_move_tail(&ne->list, &nm_i->nat_entries);
297 	spin_unlock(&nm_i->nat_list_lock);
298 
299 	set_nat_flag(ne, IS_DIRTY, false);
300 	set->entry_cnt--;
301 	nm_i->nat_cnt[DIRTY_NAT]--;
302 	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
303 }
304 
__gang_lookup_nat_set(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry_set ** ep)305 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
306 		nid_t start, unsigned int nr, struct nat_entry_set **ep)
307 {
308 	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
309 							start, nr);
310 }
311 
f2fs_in_warm_node_list(struct f2fs_sb_info * sbi,struct page * page)312 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
313 {
314 	return NODE_MAPPING(sbi) == page->mapping &&
315 			IS_DNODE(page) && is_cold_node(page);
316 }
317 
f2fs_init_fsync_node_info(struct f2fs_sb_info * sbi)318 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
319 {
320 	spin_lock_init(&sbi->fsync_node_lock);
321 	INIT_LIST_HEAD(&sbi->fsync_node_list);
322 	sbi->fsync_seg_id = 0;
323 	sbi->fsync_node_num = 0;
324 }
325 
f2fs_add_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)326 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
327 							struct page *page)
328 {
329 	struct fsync_node_entry *fn;
330 	unsigned long flags;
331 	unsigned int seq_id;
332 
333 	fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
334 					GFP_NOFS, true, NULL);
335 
336 	get_page(page);
337 	fn->page = page;
338 	INIT_LIST_HEAD(&fn->list);
339 
340 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
341 	list_add_tail(&fn->list, &sbi->fsync_node_list);
342 	fn->seq_id = sbi->fsync_seg_id++;
343 	seq_id = fn->seq_id;
344 	sbi->fsync_node_num++;
345 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
346 
347 	return seq_id;
348 }
349 
f2fs_del_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)350 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
351 {
352 	struct fsync_node_entry *fn;
353 	unsigned long flags;
354 
355 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
356 	list_for_each_entry(fn, &sbi->fsync_node_list, list) {
357 		if (fn->page == page) {
358 			list_del(&fn->list);
359 			sbi->fsync_node_num--;
360 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
361 			kmem_cache_free(fsync_node_entry_slab, fn);
362 			put_page(page);
363 			return;
364 		}
365 	}
366 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
367 	f2fs_bug_on(sbi, 1);
368 }
369 
f2fs_reset_fsync_node_info(struct f2fs_sb_info * sbi)370 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
371 {
372 	unsigned long flags;
373 
374 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
375 	sbi->fsync_seg_id = 0;
376 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
377 }
378 
f2fs_need_dentry_mark(struct f2fs_sb_info * sbi,nid_t nid)379 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
380 {
381 	struct f2fs_nm_info *nm_i = NM_I(sbi);
382 	struct nat_entry *e;
383 	bool need = false;
384 
385 	f2fs_down_read(&nm_i->nat_tree_lock);
386 	e = __lookup_nat_cache(nm_i, nid);
387 	if (e) {
388 		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
389 				!get_nat_flag(e, HAS_FSYNCED_INODE))
390 			need = true;
391 	}
392 	f2fs_up_read(&nm_i->nat_tree_lock);
393 	return need;
394 }
395 
f2fs_is_checkpointed_node(struct f2fs_sb_info * sbi,nid_t nid)396 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
397 {
398 	struct f2fs_nm_info *nm_i = NM_I(sbi);
399 	struct nat_entry *e;
400 	bool is_cp = true;
401 
402 	f2fs_down_read(&nm_i->nat_tree_lock);
403 	e = __lookup_nat_cache(nm_i, nid);
404 	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
405 		is_cp = false;
406 	f2fs_up_read(&nm_i->nat_tree_lock);
407 	return is_cp;
408 }
409 
f2fs_need_inode_block_update(struct f2fs_sb_info * sbi,nid_t ino)410 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
411 {
412 	struct f2fs_nm_info *nm_i = NM_I(sbi);
413 	struct nat_entry *e;
414 	bool need_update = true;
415 
416 	f2fs_down_read(&nm_i->nat_tree_lock);
417 	e = __lookup_nat_cache(nm_i, ino);
418 	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
419 			(get_nat_flag(e, IS_CHECKPOINTED) ||
420 			 get_nat_flag(e, HAS_FSYNCED_INODE)))
421 		need_update = false;
422 	f2fs_up_read(&nm_i->nat_tree_lock);
423 	return need_update;
424 }
425 
426 /* must be locked by nat_tree_lock */
cache_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,struct f2fs_nat_entry * ne)427 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
428 						struct f2fs_nat_entry *ne)
429 {
430 	struct f2fs_nm_info *nm_i = NM_I(sbi);
431 	struct nat_entry *new, *e;
432 
433 	/* Let's mitigate lock contention of nat_tree_lock during checkpoint */
434 	if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
435 		return;
436 
437 	new = __alloc_nat_entry(sbi, nid, false);
438 	if (!new)
439 		return;
440 
441 	f2fs_down_write(&nm_i->nat_tree_lock);
442 	e = __lookup_nat_cache(nm_i, nid);
443 	if (!e)
444 		e = __init_nat_entry(nm_i, new, ne, false);
445 	else
446 		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
447 				nat_get_blkaddr(e) !=
448 					le32_to_cpu(ne->block_addr) ||
449 				nat_get_version(e) != ne->version);
450 	f2fs_up_write(&nm_i->nat_tree_lock);
451 	if (e != new)
452 		__free_nat_entry(new);
453 }
454 
set_node_addr(struct f2fs_sb_info * sbi,struct node_info * ni,block_t new_blkaddr,bool fsync_done)455 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
456 			block_t new_blkaddr, bool fsync_done)
457 {
458 	struct f2fs_nm_info *nm_i = NM_I(sbi);
459 	struct nat_entry *e;
460 	struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
461 
462 	f2fs_down_write(&nm_i->nat_tree_lock);
463 	e = __lookup_nat_cache(nm_i, ni->nid);
464 	if (!e) {
465 		e = __init_nat_entry(nm_i, new, NULL, true);
466 		copy_node_info(&e->ni, ni);
467 		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
468 	} else if (new_blkaddr == NEW_ADDR) {
469 		/*
470 		 * when nid is reallocated,
471 		 * previous nat entry can be remained in nat cache.
472 		 * So, reinitialize it with new information.
473 		 */
474 		copy_node_info(&e->ni, ni);
475 		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
476 	}
477 	/* let's free early to reduce memory consumption */
478 	if (e != new)
479 		__free_nat_entry(new);
480 
481 	/* sanity check */
482 	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
483 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
484 			new_blkaddr == NULL_ADDR);
485 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
486 			new_blkaddr == NEW_ADDR);
487 	f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
488 			new_blkaddr == NEW_ADDR);
489 
490 	/* increment version no as node is removed */
491 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
492 		unsigned char version = nat_get_version(e);
493 
494 		nat_set_version(e, inc_node_version(version));
495 	}
496 
497 	/* change address */
498 	nat_set_blkaddr(e, new_blkaddr);
499 	if (!__is_valid_data_blkaddr(new_blkaddr))
500 		set_nat_flag(e, IS_CHECKPOINTED, false);
501 	__set_nat_cache_dirty(nm_i, e);
502 
503 	/* update fsync_mark if its inode nat entry is still alive */
504 	if (ni->nid != ni->ino)
505 		e = __lookup_nat_cache(nm_i, ni->ino);
506 	if (e) {
507 		if (fsync_done && ni->nid == ni->ino)
508 			set_nat_flag(e, HAS_FSYNCED_INODE, true);
509 		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
510 	}
511 	f2fs_up_write(&nm_i->nat_tree_lock);
512 }
513 
f2fs_try_to_free_nats(struct f2fs_sb_info * sbi,int nr_shrink)514 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
515 {
516 	struct f2fs_nm_info *nm_i = NM_I(sbi);
517 	int nr = nr_shrink;
518 
519 	if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
520 		return 0;
521 
522 	spin_lock(&nm_i->nat_list_lock);
523 	while (nr_shrink) {
524 		struct nat_entry *ne;
525 
526 		if (list_empty(&nm_i->nat_entries))
527 			break;
528 
529 		ne = list_first_entry(&nm_i->nat_entries,
530 					struct nat_entry, list);
531 		list_del(&ne->list);
532 		spin_unlock(&nm_i->nat_list_lock);
533 
534 		__del_from_nat_cache(nm_i, ne);
535 		nr_shrink--;
536 
537 		spin_lock(&nm_i->nat_list_lock);
538 	}
539 	spin_unlock(&nm_i->nat_list_lock);
540 
541 	f2fs_up_write(&nm_i->nat_tree_lock);
542 	return nr - nr_shrink;
543 }
544 
f2fs_get_node_info(struct f2fs_sb_info * sbi,nid_t nid,struct node_info * ni,bool checkpoint_context)545 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
546 				struct node_info *ni, bool checkpoint_context)
547 {
548 	struct f2fs_nm_info *nm_i = NM_I(sbi);
549 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
550 	struct f2fs_journal *journal = curseg->journal;
551 	nid_t start_nid = START_NID(nid);
552 	struct f2fs_nat_block *nat_blk;
553 	struct page *page = NULL;
554 	struct f2fs_nat_entry ne;
555 	struct nat_entry *e;
556 	pgoff_t index;
557 	block_t blkaddr;
558 	int i;
559 
560 	ni->nid = nid;
561 retry:
562 	/* Check nat cache */
563 	f2fs_down_read(&nm_i->nat_tree_lock);
564 	e = __lookup_nat_cache(nm_i, nid);
565 	if (e) {
566 		ni->ino = nat_get_ino(e);
567 		ni->blk_addr = nat_get_blkaddr(e);
568 		ni->version = nat_get_version(e);
569 		f2fs_up_read(&nm_i->nat_tree_lock);
570 		return 0;
571 	}
572 
573 	/*
574 	 * Check current segment summary by trying to grab journal_rwsem first.
575 	 * This sem is on the critical path on the checkpoint requiring the above
576 	 * nat_tree_lock. Therefore, we should retry, if we failed to grab here
577 	 * while not bothering checkpoint.
578 	 */
579 	if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
580 		down_read(&curseg->journal_rwsem);
581 	} else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
582 				!down_read_trylock(&curseg->journal_rwsem)) {
583 		f2fs_up_read(&nm_i->nat_tree_lock);
584 		goto retry;
585 	}
586 
587 	i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
588 	if (i >= 0) {
589 		ne = nat_in_journal(journal, i);
590 		node_info_from_raw_nat(ni, &ne);
591 	}
592         up_read(&curseg->journal_rwsem);
593 	if (i >= 0) {
594 		f2fs_up_read(&nm_i->nat_tree_lock);
595 		goto cache;
596 	}
597 
598 	/* Fill node_info from nat page */
599 	index = current_nat_addr(sbi, nid);
600 	f2fs_up_read(&nm_i->nat_tree_lock);
601 
602 	page = f2fs_get_meta_page(sbi, index);
603 	if (IS_ERR(page))
604 		return PTR_ERR(page);
605 
606 	nat_blk = (struct f2fs_nat_block *)page_address(page);
607 	ne = nat_blk->entries[nid - start_nid];
608 	node_info_from_raw_nat(ni, &ne);
609 	f2fs_put_page(page, 1);
610 cache:
611 	blkaddr = le32_to_cpu(ne.block_addr);
612 	if (__is_valid_data_blkaddr(blkaddr) &&
613 		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
614 		return -EFAULT;
615 
616 	/* cache nat entry */
617 	cache_nat_entry(sbi, nid, &ne);
618 	return 0;
619 }
620 
621 /*
622  * readahead MAX_RA_NODE number of node pages.
623  */
f2fs_ra_node_pages(struct page * parent,int start,int n)624 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
625 {
626 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
627 	struct blk_plug plug;
628 	int i, end;
629 	nid_t nid;
630 
631 	blk_start_plug(&plug);
632 
633 	/* Then, try readahead for siblings of the desired node */
634 	end = start + n;
635 	end = min(end, NIDS_PER_BLOCK);
636 	for (i = start; i < end; i++) {
637 		nid = get_nid(parent, i, false);
638 		f2fs_ra_node_page(sbi, nid);
639 	}
640 
641 	blk_finish_plug(&plug);
642 }
643 
f2fs_get_next_page_offset(struct dnode_of_data * dn,pgoff_t pgofs)644 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
645 {
646 	const long direct_index = ADDRS_PER_INODE(dn->inode);
647 	const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
648 	const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
649 	unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
650 	int cur_level = dn->cur_level;
651 	int max_level = dn->max_level;
652 	pgoff_t base = 0;
653 
654 	if (!dn->max_level)
655 		return pgofs + 1;
656 
657 	while (max_level-- > cur_level)
658 		skipped_unit *= NIDS_PER_BLOCK;
659 
660 	switch (dn->max_level) {
661 	case 3:
662 		base += 2 * indirect_blks;
663 		fallthrough;
664 	case 2:
665 		base += 2 * direct_blks;
666 		fallthrough;
667 	case 1:
668 		base += direct_index;
669 		break;
670 	default:
671 		f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
672 	}
673 
674 	return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
675 }
676 
677 /*
678  * The maximum depth is four.
679  * Offset[0] will have raw inode offset.
680  */
get_node_path(struct inode * inode,long block,int offset[4],unsigned int noffset[4])681 static int get_node_path(struct inode *inode, long block,
682 				int offset[4], unsigned int noffset[4])
683 {
684 	const long direct_index = ADDRS_PER_INODE(inode);
685 	const long direct_blks = ADDRS_PER_BLOCK(inode);
686 	const long dptrs_per_blk = NIDS_PER_BLOCK;
687 	const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
688 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
689 	int n = 0;
690 	int level = 0;
691 
692 	noffset[0] = 0;
693 
694 	if (block < direct_index) {
695 		offset[n] = block;
696 		goto got;
697 	}
698 	block -= direct_index;
699 	if (block < direct_blks) {
700 		offset[n++] = NODE_DIR1_BLOCK;
701 		noffset[n] = 1;
702 		offset[n] = block;
703 		level = 1;
704 		goto got;
705 	}
706 	block -= direct_blks;
707 	if (block < direct_blks) {
708 		offset[n++] = NODE_DIR2_BLOCK;
709 		noffset[n] = 2;
710 		offset[n] = block;
711 		level = 1;
712 		goto got;
713 	}
714 	block -= direct_blks;
715 	if (block < indirect_blks) {
716 		offset[n++] = NODE_IND1_BLOCK;
717 		noffset[n] = 3;
718 		offset[n++] = block / direct_blks;
719 		noffset[n] = 4 + offset[n - 1];
720 		offset[n] = block % direct_blks;
721 		level = 2;
722 		goto got;
723 	}
724 	block -= indirect_blks;
725 	if (block < indirect_blks) {
726 		offset[n++] = NODE_IND2_BLOCK;
727 		noffset[n] = 4 + dptrs_per_blk;
728 		offset[n++] = block / direct_blks;
729 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
730 		offset[n] = block % direct_blks;
731 		level = 2;
732 		goto got;
733 	}
734 	block -= indirect_blks;
735 	if (block < dindirect_blks) {
736 		offset[n++] = NODE_DIND_BLOCK;
737 		noffset[n] = 5 + (dptrs_per_blk * 2);
738 		offset[n++] = block / indirect_blks;
739 		noffset[n] = 6 + (dptrs_per_blk * 2) +
740 			      offset[n - 1] * (dptrs_per_blk + 1);
741 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
742 		noffset[n] = 7 + (dptrs_per_blk * 2) +
743 			      offset[n - 2] * (dptrs_per_blk + 1) +
744 			      offset[n - 1];
745 		offset[n] = block % direct_blks;
746 		level = 3;
747 		goto got;
748 	} else {
749 		return -E2BIG;
750 	}
751 got:
752 	return level;
753 }
754 
755 /*
756  * Caller should call f2fs_put_dnode(dn).
757  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
758  * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
759  */
f2fs_get_dnode_of_data(struct dnode_of_data * dn,pgoff_t index,int mode)760 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
761 {
762 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
763 	struct page *npage[4];
764 	struct page *parent = NULL;
765 	int offset[4];
766 	unsigned int noffset[4];
767 	nid_t nids[4];
768 	int level, i = 0;
769 	int err = 0;
770 
771 	level = get_node_path(dn->inode, index, offset, noffset);
772 	if (level < 0)
773 		return level;
774 
775 	nids[0] = dn->inode->i_ino;
776 	npage[0] = dn->inode_page;
777 
778 	if (!npage[0]) {
779 		npage[0] = f2fs_get_node_page(sbi, nids[0]);
780 		if (IS_ERR(npage[0]))
781 			return PTR_ERR(npage[0]);
782 	}
783 
784 	/* if inline_data is set, should not report any block indices */
785 	if (f2fs_has_inline_data(dn->inode) && index) {
786 		err = -ENOENT;
787 		f2fs_put_page(npage[0], 1);
788 		goto release_out;
789 	}
790 
791 	parent = npage[0];
792 	if (level != 0)
793 		nids[1] = get_nid(parent, offset[0], true);
794 	dn->inode_page = npage[0];
795 	dn->inode_page_locked = true;
796 
797 	/* get indirect or direct nodes */
798 	for (i = 1; i <= level; i++) {
799 		bool done = false;
800 
801 		if (!nids[i] && mode == ALLOC_NODE) {
802 			/* alloc new node */
803 			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
804 				err = -ENOSPC;
805 				goto release_pages;
806 			}
807 
808 			dn->nid = nids[i];
809 			npage[i] = f2fs_new_node_page(dn, noffset[i]);
810 			if (IS_ERR(npage[i])) {
811 				f2fs_alloc_nid_failed(sbi, nids[i]);
812 				err = PTR_ERR(npage[i]);
813 				goto release_pages;
814 			}
815 
816 			set_nid(parent, offset[i - 1], nids[i], i == 1);
817 			f2fs_alloc_nid_done(sbi, nids[i]);
818 			done = true;
819 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
820 			npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
821 			if (IS_ERR(npage[i])) {
822 				err = PTR_ERR(npage[i]);
823 				goto release_pages;
824 			}
825 			done = true;
826 		}
827 		if (i == 1) {
828 			dn->inode_page_locked = false;
829 			unlock_page(parent);
830 		} else {
831 			f2fs_put_page(parent, 1);
832 		}
833 
834 		if (!done) {
835 			npage[i] = f2fs_get_node_page(sbi, nids[i]);
836 			if (IS_ERR(npage[i])) {
837 				err = PTR_ERR(npage[i]);
838 				f2fs_put_page(npage[0], 0);
839 				goto release_out;
840 			}
841 		}
842 		if (i < level) {
843 			parent = npage[i];
844 			nids[i + 1] = get_nid(parent, offset[i], false);
845 		}
846 	}
847 	dn->nid = nids[level];
848 	dn->ofs_in_node = offset[level];
849 	dn->node_page = npage[level];
850 	dn->data_blkaddr = f2fs_data_blkaddr(dn);
851 
852 	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
853 					f2fs_sb_has_readonly(sbi)) {
854 		unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
855 		block_t blkaddr;
856 
857 		if (!c_len)
858 			goto out;
859 
860 		blkaddr = f2fs_data_blkaddr(dn);
861 		if (blkaddr == COMPRESS_ADDR)
862 			blkaddr = data_blkaddr(dn->inode, dn->node_page,
863 						dn->ofs_in_node + 1);
864 
865 		f2fs_update_extent_tree_range_compressed(dn->inode,
866 					index, blkaddr,
867 					F2FS_I(dn->inode)->i_cluster_size,
868 					c_len);
869 	}
870 out:
871 	return 0;
872 
873 release_pages:
874 	f2fs_put_page(parent, 1);
875 	if (i > 1)
876 		f2fs_put_page(npage[0], 0);
877 release_out:
878 	dn->inode_page = NULL;
879 	dn->node_page = NULL;
880 	if (err == -ENOENT) {
881 		dn->cur_level = i;
882 		dn->max_level = level;
883 		dn->ofs_in_node = offset[level];
884 	}
885 	return err;
886 }
887 
truncate_node(struct dnode_of_data * dn)888 static int truncate_node(struct dnode_of_data *dn)
889 {
890 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
891 	struct node_info ni;
892 	int err;
893 	pgoff_t index;
894 
895 	err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
896 	if (err)
897 		return err;
898 
899 	/* Deallocate node address */
900 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
901 	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
902 	set_node_addr(sbi, &ni, NULL_ADDR, false);
903 
904 	if (dn->nid == dn->inode->i_ino) {
905 		f2fs_remove_orphan_inode(sbi, dn->nid);
906 		dec_valid_inode_count(sbi);
907 		f2fs_inode_synced(dn->inode);
908 	}
909 
910 	clear_node_page_dirty(dn->node_page);
911 	set_sbi_flag(sbi, SBI_IS_DIRTY);
912 
913 	index = dn->node_page->index;
914 	f2fs_put_page(dn->node_page, 1);
915 
916 	invalidate_mapping_pages(NODE_MAPPING(sbi),
917 			index, index);
918 
919 	dn->node_page = NULL;
920 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
921 
922 	return 0;
923 }
924 
truncate_dnode(struct dnode_of_data * dn)925 static int truncate_dnode(struct dnode_of_data *dn)
926 {
927 	struct page *page;
928 	int err;
929 
930 	if (dn->nid == 0)
931 		return 1;
932 
933 	/* get direct node */
934 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
935 	if (PTR_ERR(page) == -ENOENT)
936 		return 1;
937 	else if (IS_ERR(page))
938 		return PTR_ERR(page);
939 
940 	/* Make dnode_of_data for parameter */
941 	dn->node_page = page;
942 	dn->ofs_in_node = 0;
943 	f2fs_truncate_data_blocks(dn);
944 	err = truncate_node(dn);
945 	if (err) {
946 		f2fs_put_page(page, 1);
947 		return err;
948 	}
949 
950 	return 1;
951 }
952 
truncate_nodes(struct dnode_of_data * dn,unsigned int nofs,int ofs,int depth)953 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
954 						int ofs, int depth)
955 {
956 	struct dnode_of_data rdn = *dn;
957 	struct page *page;
958 	struct f2fs_node *rn;
959 	nid_t child_nid;
960 	unsigned int child_nofs;
961 	int freed = 0;
962 	int i, ret;
963 
964 	if (dn->nid == 0)
965 		return NIDS_PER_BLOCK + 1;
966 
967 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
968 
969 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
970 	if (IS_ERR(page)) {
971 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
972 		return PTR_ERR(page);
973 	}
974 
975 	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
976 
977 	rn = F2FS_NODE(page);
978 	if (depth < 3) {
979 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
980 			child_nid = le32_to_cpu(rn->in.nid[i]);
981 			if (child_nid == 0)
982 				continue;
983 			rdn.nid = child_nid;
984 			ret = truncate_dnode(&rdn);
985 			if (ret < 0)
986 				goto out_err;
987 			if (set_nid(page, i, 0, false))
988 				dn->node_changed = true;
989 		}
990 	} else {
991 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
992 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
993 			child_nid = le32_to_cpu(rn->in.nid[i]);
994 			if (child_nid == 0) {
995 				child_nofs += NIDS_PER_BLOCK + 1;
996 				continue;
997 			}
998 			rdn.nid = child_nid;
999 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
1000 			if (ret == (NIDS_PER_BLOCK + 1)) {
1001 				if (set_nid(page, i, 0, false))
1002 					dn->node_changed = true;
1003 				child_nofs += ret;
1004 			} else if (ret < 0 && ret != -ENOENT) {
1005 				goto out_err;
1006 			}
1007 		}
1008 		freed = child_nofs;
1009 	}
1010 
1011 	if (!ofs) {
1012 		/* remove current indirect node */
1013 		dn->node_page = page;
1014 		ret = truncate_node(dn);
1015 		if (ret)
1016 			goto out_err;
1017 		freed++;
1018 	} else {
1019 		f2fs_put_page(page, 1);
1020 	}
1021 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1022 	return freed;
1023 
1024 out_err:
1025 	f2fs_put_page(page, 1);
1026 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1027 	return ret;
1028 }
1029 
truncate_partial_nodes(struct dnode_of_data * dn,struct f2fs_inode * ri,int * offset,int depth)1030 static int truncate_partial_nodes(struct dnode_of_data *dn,
1031 			struct f2fs_inode *ri, int *offset, int depth)
1032 {
1033 	struct page *pages[2];
1034 	nid_t nid[3];
1035 	nid_t child_nid;
1036 	int err = 0;
1037 	int i;
1038 	int idx = depth - 2;
1039 
1040 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1041 	if (!nid[0])
1042 		return 0;
1043 
1044 	/* get indirect nodes in the path */
1045 	for (i = 0; i < idx + 1; i++) {
1046 		/* reference count'll be increased */
1047 		pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
1048 		if (IS_ERR(pages[i])) {
1049 			err = PTR_ERR(pages[i]);
1050 			idx = i - 1;
1051 			goto fail;
1052 		}
1053 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
1054 	}
1055 
1056 	f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
1057 
1058 	/* free direct nodes linked to a partial indirect node */
1059 	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1060 		child_nid = get_nid(pages[idx], i, false);
1061 		if (!child_nid)
1062 			continue;
1063 		dn->nid = child_nid;
1064 		err = truncate_dnode(dn);
1065 		if (err < 0)
1066 			goto fail;
1067 		if (set_nid(pages[idx], i, 0, false))
1068 			dn->node_changed = true;
1069 	}
1070 
1071 	if (offset[idx + 1] == 0) {
1072 		dn->node_page = pages[idx];
1073 		dn->nid = nid[idx];
1074 		err = truncate_node(dn);
1075 		if (err)
1076 			goto fail;
1077 	} else {
1078 		f2fs_put_page(pages[idx], 1);
1079 	}
1080 	offset[idx]++;
1081 	offset[idx + 1] = 0;
1082 	idx--;
1083 fail:
1084 	for (i = idx; i >= 0; i--)
1085 		f2fs_put_page(pages[i], 1);
1086 
1087 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1088 
1089 	return err;
1090 }
1091 
1092 /*
1093  * All the block addresses of data and nodes should be nullified.
1094  */
f2fs_truncate_inode_blocks(struct inode * inode,pgoff_t from)1095 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1096 {
1097 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1098 	int err = 0, cont = 1;
1099 	int level, offset[4], noffset[4];
1100 	unsigned int nofs = 0;
1101 	struct f2fs_inode *ri;
1102 	struct dnode_of_data dn;
1103 	struct page *page;
1104 
1105 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
1106 
1107 	level = get_node_path(inode, from, offset, noffset);
1108 	if (level < 0) {
1109 		trace_f2fs_truncate_inode_blocks_exit(inode, level);
1110 		return level;
1111 	}
1112 
1113 	page = f2fs_get_node_page(sbi, inode->i_ino);
1114 	if (IS_ERR(page)) {
1115 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1116 		return PTR_ERR(page);
1117 	}
1118 
1119 	set_new_dnode(&dn, inode, page, NULL, 0);
1120 	unlock_page(page);
1121 
1122 	ri = F2FS_INODE(page);
1123 	switch (level) {
1124 	case 0:
1125 	case 1:
1126 		nofs = noffset[1];
1127 		break;
1128 	case 2:
1129 		nofs = noffset[1];
1130 		if (!offset[level - 1])
1131 			goto skip_partial;
1132 		err = truncate_partial_nodes(&dn, ri, offset, level);
1133 		if (err < 0 && err != -ENOENT)
1134 			goto fail;
1135 		nofs += 1 + NIDS_PER_BLOCK;
1136 		break;
1137 	case 3:
1138 		nofs = 5 + 2 * NIDS_PER_BLOCK;
1139 		if (!offset[level - 1])
1140 			goto skip_partial;
1141 		err = truncate_partial_nodes(&dn, ri, offset, level);
1142 		if (err < 0 && err != -ENOENT)
1143 			goto fail;
1144 		break;
1145 	default:
1146 		BUG();
1147 	}
1148 
1149 skip_partial:
1150 	while (cont) {
1151 		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1152 		switch (offset[0]) {
1153 		case NODE_DIR1_BLOCK:
1154 		case NODE_DIR2_BLOCK:
1155 			err = truncate_dnode(&dn);
1156 			break;
1157 
1158 		case NODE_IND1_BLOCK:
1159 		case NODE_IND2_BLOCK:
1160 			err = truncate_nodes(&dn, nofs, offset[1], 2);
1161 			break;
1162 
1163 		case NODE_DIND_BLOCK:
1164 			err = truncate_nodes(&dn, nofs, offset[1], 3);
1165 			cont = 0;
1166 			break;
1167 
1168 		default:
1169 			BUG();
1170 		}
1171 		if (err < 0 && err != -ENOENT)
1172 			goto fail;
1173 		if (offset[1] == 0 &&
1174 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1175 			lock_page(page);
1176 			BUG_ON(page->mapping != NODE_MAPPING(sbi));
1177 			f2fs_wait_on_page_writeback(page, NODE, true, true);
1178 			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1179 			set_page_dirty(page);
1180 			unlock_page(page);
1181 		}
1182 		offset[1] = 0;
1183 		offset[0]++;
1184 		nofs += err;
1185 	}
1186 fail:
1187 	f2fs_put_page(page, 0);
1188 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
1189 	return err > 0 ? 0 : err;
1190 }
1191 
1192 /* caller must lock inode page */
f2fs_truncate_xattr_node(struct inode * inode)1193 int f2fs_truncate_xattr_node(struct inode *inode)
1194 {
1195 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1196 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
1197 	struct dnode_of_data dn;
1198 	struct page *npage;
1199 	int err;
1200 
1201 	if (!nid)
1202 		return 0;
1203 
1204 	npage = f2fs_get_node_page(sbi, nid);
1205 	if (IS_ERR(npage))
1206 		return PTR_ERR(npage);
1207 
1208 	set_new_dnode(&dn, inode, NULL, npage, nid);
1209 	err = truncate_node(&dn);
1210 	if (err) {
1211 		f2fs_put_page(npage, 1);
1212 		return err;
1213 	}
1214 
1215 	f2fs_i_xnid_write(inode, 0);
1216 
1217 	return 0;
1218 }
1219 
1220 /*
1221  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1222  * f2fs_unlock_op().
1223  */
f2fs_remove_inode_page(struct inode * inode)1224 int f2fs_remove_inode_page(struct inode *inode)
1225 {
1226 	struct dnode_of_data dn;
1227 	int err;
1228 
1229 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1230 	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1231 	if (err)
1232 		return err;
1233 
1234 	err = f2fs_truncate_xattr_node(inode);
1235 	if (err) {
1236 		f2fs_put_dnode(&dn);
1237 		return err;
1238 	}
1239 
1240 	/* remove potential inline_data blocks */
1241 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1242 				S_ISLNK(inode->i_mode))
1243 		f2fs_truncate_data_blocks_range(&dn, 1);
1244 
1245 	/* 0 is possible, after f2fs_new_inode() has failed */
1246 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1247 		f2fs_put_dnode(&dn);
1248 		return -EIO;
1249 	}
1250 
1251 	if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1252 		f2fs_warn(F2FS_I_SB(inode),
1253 			"f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1254 			inode->i_ino, (unsigned long long)inode->i_blocks);
1255 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1256 	}
1257 
1258 	/* will put inode & node pages */
1259 	err = truncate_node(&dn);
1260 	if (err) {
1261 		f2fs_put_dnode(&dn);
1262 		return err;
1263 	}
1264 	return 0;
1265 }
1266 
f2fs_new_inode_page(struct inode * inode)1267 struct page *f2fs_new_inode_page(struct inode *inode)
1268 {
1269 	struct dnode_of_data dn;
1270 
1271 	/* allocate inode page for new inode */
1272 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1273 
1274 	/* caller should f2fs_put_page(page, 1); */
1275 	return f2fs_new_node_page(&dn, 0);
1276 }
1277 
f2fs_new_node_page(struct dnode_of_data * dn,unsigned int ofs)1278 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1279 {
1280 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1281 	struct node_info new_ni;
1282 	struct page *page;
1283 	int err;
1284 
1285 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1286 		return ERR_PTR(-EPERM);
1287 
1288 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1289 	if (!page)
1290 		return ERR_PTR(-ENOMEM);
1291 
1292 	if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1293 		goto fail;
1294 
1295 #ifdef CONFIG_F2FS_CHECK_FS
1296 	err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
1297 	if (err) {
1298 		dec_valid_node_count(sbi, dn->inode, !ofs);
1299 		goto fail;
1300 	}
1301 	if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1302 		err = -EFSCORRUPTED;
1303 		set_sbi_flag(sbi, SBI_NEED_FSCK);
1304 		goto fail;
1305 	}
1306 #endif
1307 	new_ni.nid = dn->nid;
1308 	new_ni.ino = dn->inode->i_ino;
1309 	new_ni.blk_addr = NULL_ADDR;
1310 	new_ni.flag = 0;
1311 	new_ni.version = 0;
1312 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1313 
1314 	f2fs_wait_on_page_writeback(page, NODE, true, true);
1315 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1316 	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1317 	if (!PageUptodate(page))
1318 		SetPageUptodate(page);
1319 	if (set_page_dirty(page))
1320 		dn->node_changed = true;
1321 
1322 	if (f2fs_has_xattr_block(ofs))
1323 		f2fs_i_xnid_write(dn->inode, dn->nid);
1324 
1325 	if (ofs == 0)
1326 		inc_valid_inode_count(sbi);
1327 	return page;
1328 
1329 fail:
1330 	clear_node_page_dirty(page);
1331 	f2fs_put_page(page, 1);
1332 	return ERR_PTR(err);
1333 }
1334 
1335 /*
1336  * Caller should do after getting the following values.
1337  * 0: f2fs_put_page(page, 0)
1338  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1339  */
read_node_page(struct page * page,int op_flags)1340 static int read_node_page(struct page *page, int op_flags)
1341 {
1342 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1343 	struct node_info ni;
1344 	struct f2fs_io_info fio = {
1345 		.sbi = sbi,
1346 		.type = NODE,
1347 		.op = REQ_OP_READ,
1348 		.op_flags = op_flags,
1349 		.page = page,
1350 		.encrypted_page = NULL,
1351 	};
1352 	int err;
1353 
1354 	if (PageUptodate(page)) {
1355 		if (!f2fs_inode_chksum_verify(sbi, page)) {
1356 			ClearPageUptodate(page);
1357 			return -EFSBADCRC;
1358 		}
1359 		return LOCKED_PAGE;
1360 	}
1361 
1362 	err = f2fs_get_node_info(sbi, page->index, &ni, false);
1363 	if (err)
1364 		return err;
1365 
1366 	/* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1367 	if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
1368 		ClearPageUptodate(page);
1369 		return -ENOENT;
1370 	}
1371 
1372 	fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1373 
1374 	err = f2fs_submit_page_bio(&fio);
1375 
1376 	if (!err)
1377 		f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE);
1378 
1379 	return err;
1380 }
1381 
1382 /*
1383  * Readahead a node page
1384  */
f2fs_ra_node_page(struct f2fs_sb_info * sbi,nid_t nid)1385 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1386 {
1387 	struct page *apage;
1388 	int err;
1389 
1390 	if (!nid)
1391 		return;
1392 	if (f2fs_check_nid_range(sbi, nid))
1393 		return;
1394 
1395 	apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1396 	if (apage)
1397 		return;
1398 
1399 	apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1400 	if (!apage)
1401 		return;
1402 
1403 	err = read_node_page(apage, REQ_RAHEAD);
1404 	f2fs_put_page(apage, err ? 1 : 0);
1405 }
1406 
__get_node_page(struct f2fs_sb_info * sbi,pgoff_t nid,struct page * parent,int start)1407 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1408 					struct page *parent, int start)
1409 {
1410 	struct page *page;
1411 	int err;
1412 
1413 	if (!nid)
1414 		return ERR_PTR(-ENOENT);
1415 	if (f2fs_check_nid_range(sbi, nid))
1416 		return ERR_PTR(-EINVAL);
1417 repeat:
1418 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1419 	if (!page)
1420 		return ERR_PTR(-ENOMEM);
1421 
1422 	err = read_node_page(page, 0);
1423 	if (err < 0) {
1424 		goto out_put_err;
1425 	} else if (err == LOCKED_PAGE) {
1426 		err = 0;
1427 		goto page_hit;
1428 	}
1429 
1430 	if (parent)
1431 		f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1432 
1433 	lock_page(page);
1434 
1435 	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1436 		f2fs_put_page(page, 1);
1437 		goto repeat;
1438 	}
1439 
1440 	if (unlikely(!PageUptodate(page))) {
1441 		err = -EIO;
1442 		goto out_err;
1443 	}
1444 
1445 	if (!f2fs_inode_chksum_verify(sbi, page)) {
1446 		err = -EFSBADCRC;
1447 		goto out_err;
1448 	}
1449 page_hit:
1450 	if (likely(nid == nid_of_node(page)))
1451 		return page;
1452 
1453 	f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1454 			  nid, nid_of_node(page), ino_of_node(page),
1455 			  ofs_of_node(page), cpver_of_node(page),
1456 			  next_blkaddr_of_node(page));
1457 	set_sbi_flag(sbi, SBI_NEED_FSCK);
1458 	err = -EINVAL;
1459 out_err:
1460 	ClearPageUptodate(page);
1461 out_put_err:
1462 	/* ENOENT comes from read_node_page which is not an error. */
1463 	if (err != -ENOENT)
1464 		f2fs_handle_page_eio(sbi, page->index, NODE);
1465 	f2fs_put_page(page, 1);
1466 	return ERR_PTR(err);
1467 }
1468 
f2fs_get_node_page(struct f2fs_sb_info * sbi,pgoff_t nid)1469 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1470 {
1471 	return __get_node_page(sbi, nid, NULL, 0);
1472 }
1473 
f2fs_get_node_page_ra(struct page * parent,int start)1474 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1475 {
1476 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1477 	nid_t nid = get_nid(parent, start, false);
1478 
1479 	return __get_node_page(sbi, nid, parent, start);
1480 }
1481 
flush_inline_data(struct f2fs_sb_info * sbi,nid_t ino)1482 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1483 {
1484 	struct inode *inode;
1485 	struct page *page;
1486 	int ret;
1487 
1488 	/* should flush inline_data before evict_inode */
1489 	inode = ilookup(sbi->sb, ino);
1490 	if (!inode)
1491 		return;
1492 
1493 	page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1494 					FGP_LOCK|FGP_NOWAIT, 0);
1495 	if (!page)
1496 		goto iput_out;
1497 
1498 	if (!PageUptodate(page))
1499 		goto page_out;
1500 
1501 	if (!PageDirty(page))
1502 		goto page_out;
1503 
1504 	if (!clear_page_dirty_for_io(page))
1505 		goto page_out;
1506 
1507 	ret = f2fs_write_inline_data(inode, page);
1508 	inode_dec_dirty_pages(inode);
1509 	f2fs_remove_dirty_inode(inode);
1510 	if (ret)
1511 		set_page_dirty(page);
1512 page_out:
1513 	f2fs_put_page(page, 1);
1514 iput_out:
1515 	iput(inode);
1516 }
1517 
last_fsync_dnode(struct f2fs_sb_info * sbi,nid_t ino)1518 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1519 {
1520 	pgoff_t index;
1521 	struct pagevec pvec;
1522 	struct page *last_page = NULL;
1523 	int nr_pages;
1524 
1525 	pagevec_init(&pvec);
1526 	index = 0;
1527 
1528 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1529 				PAGECACHE_TAG_DIRTY))) {
1530 		int i;
1531 
1532 		for (i = 0; i < nr_pages; i++) {
1533 			struct page *page = pvec.pages[i];
1534 
1535 			if (unlikely(f2fs_cp_error(sbi))) {
1536 				f2fs_put_page(last_page, 0);
1537 				pagevec_release(&pvec);
1538 				return ERR_PTR(-EIO);
1539 			}
1540 
1541 			if (!IS_DNODE(page) || !is_cold_node(page))
1542 				continue;
1543 			if (ino_of_node(page) != ino)
1544 				continue;
1545 
1546 			lock_page(page);
1547 
1548 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1549 continue_unlock:
1550 				unlock_page(page);
1551 				continue;
1552 			}
1553 			if (ino_of_node(page) != ino)
1554 				goto continue_unlock;
1555 
1556 			if (!PageDirty(page)) {
1557 				/* someone wrote it for us */
1558 				goto continue_unlock;
1559 			}
1560 
1561 			if (last_page)
1562 				f2fs_put_page(last_page, 0);
1563 
1564 			get_page(page);
1565 			last_page = page;
1566 			unlock_page(page);
1567 		}
1568 		pagevec_release(&pvec);
1569 		cond_resched();
1570 	}
1571 	return last_page;
1572 }
1573 
__write_node_page(struct page * page,bool atomic,bool * submitted,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type,unsigned int * seq_id)1574 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1575 				struct writeback_control *wbc, bool do_balance,
1576 				enum iostat_type io_type, unsigned int *seq_id)
1577 {
1578 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1579 	nid_t nid;
1580 	struct node_info ni;
1581 	struct f2fs_io_info fio = {
1582 		.sbi = sbi,
1583 		.ino = ino_of_node(page),
1584 		.type = NODE,
1585 		.op = REQ_OP_WRITE,
1586 		.op_flags = wbc_to_write_flags(wbc),
1587 		.page = page,
1588 		.encrypted_page = NULL,
1589 		.submitted = false,
1590 		.io_type = io_type,
1591 		.io_wbc = wbc,
1592 	};
1593 	unsigned int seq;
1594 
1595 	trace_f2fs_writepage(page, NODE);
1596 
1597 	if (unlikely(f2fs_cp_error(sbi))) {
1598 		ClearPageUptodate(page);
1599 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1600 		unlock_page(page);
1601 		return 0;
1602 	}
1603 
1604 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1605 		goto redirty_out;
1606 
1607 	if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1608 			wbc->sync_mode == WB_SYNC_NONE &&
1609 			IS_DNODE(page) && is_cold_node(page))
1610 		goto redirty_out;
1611 
1612 	/* get old block addr of this node page */
1613 	nid = nid_of_node(page);
1614 	f2fs_bug_on(sbi, page->index != nid);
1615 
1616 	if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
1617 		goto redirty_out;
1618 
1619 	if (wbc->for_reclaim) {
1620 		if (!f2fs_down_read_trylock(&sbi->node_write))
1621 			goto redirty_out;
1622 	} else {
1623 		f2fs_down_read(&sbi->node_write);
1624 	}
1625 
1626 	/* This page is already truncated */
1627 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1628 		ClearPageUptodate(page);
1629 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1630 		f2fs_up_read(&sbi->node_write);
1631 		unlock_page(page);
1632 		return 0;
1633 	}
1634 
1635 	if (__is_valid_data_blkaddr(ni.blk_addr) &&
1636 		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1637 					DATA_GENERIC_ENHANCE)) {
1638 		f2fs_up_read(&sbi->node_write);
1639 		goto redirty_out;
1640 	}
1641 
1642 	if (atomic && !test_opt(sbi, NOBARRIER))
1643 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1644 
1645 	/* should add to global list before clearing PAGECACHE status */
1646 	if (f2fs_in_warm_node_list(sbi, page)) {
1647 		seq = f2fs_add_fsync_node_entry(sbi, page);
1648 		if (seq_id)
1649 			*seq_id = seq;
1650 	}
1651 
1652 	set_page_writeback(page);
1653 	ClearPageError(page);
1654 
1655 	fio.old_blkaddr = ni.blk_addr;
1656 	f2fs_do_write_node_page(nid, &fio);
1657 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1658 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1659 	f2fs_up_read(&sbi->node_write);
1660 
1661 	if (wbc->for_reclaim) {
1662 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1663 		submitted = NULL;
1664 	}
1665 
1666 	unlock_page(page);
1667 
1668 	if (unlikely(f2fs_cp_error(sbi))) {
1669 		f2fs_submit_merged_write(sbi, NODE);
1670 		submitted = NULL;
1671 	}
1672 	if (submitted)
1673 		*submitted = fio.submitted;
1674 
1675 	if (do_balance)
1676 		f2fs_balance_fs(sbi, false);
1677 	return 0;
1678 
1679 redirty_out:
1680 	redirty_page_for_writepage(wbc, page);
1681 	return AOP_WRITEPAGE_ACTIVATE;
1682 }
1683 
f2fs_move_node_page(struct page * node_page,int gc_type)1684 int f2fs_move_node_page(struct page *node_page, int gc_type)
1685 {
1686 	int err = 0;
1687 
1688 	if (gc_type == FG_GC) {
1689 		struct writeback_control wbc = {
1690 			.sync_mode = WB_SYNC_ALL,
1691 			.nr_to_write = 1,
1692 			.for_reclaim = 0,
1693 		};
1694 
1695 		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1696 
1697 		set_page_dirty(node_page);
1698 
1699 		if (!clear_page_dirty_for_io(node_page)) {
1700 			err = -EAGAIN;
1701 			goto out_page;
1702 		}
1703 
1704 		if (__write_node_page(node_page, false, NULL,
1705 					&wbc, false, FS_GC_NODE_IO, NULL)) {
1706 			err = -EAGAIN;
1707 			unlock_page(node_page);
1708 		}
1709 		goto release_page;
1710 	} else {
1711 		/* set page dirty and write it */
1712 		if (!PageWriteback(node_page))
1713 			set_page_dirty(node_page);
1714 	}
1715 out_page:
1716 	unlock_page(node_page);
1717 release_page:
1718 	f2fs_put_page(node_page, 0);
1719 	return err;
1720 }
1721 
f2fs_write_node_page(struct page * page,struct writeback_control * wbc)1722 static int f2fs_write_node_page(struct page *page,
1723 				struct writeback_control *wbc)
1724 {
1725 	return __write_node_page(page, false, NULL, wbc, false,
1726 						FS_NODE_IO, NULL);
1727 }
1728 
f2fs_fsync_node_pages(struct f2fs_sb_info * sbi,struct inode * inode,struct writeback_control * wbc,bool atomic,unsigned int * seq_id)1729 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1730 			struct writeback_control *wbc, bool atomic,
1731 			unsigned int *seq_id)
1732 {
1733 	pgoff_t index;
1734 	struct pagevec pvec;
1735 	int ret = 0;
1736 	struct page *last_page = NULL;
1737 	bool marked = false;
1738 	nid_t ino = inode->i_ino;
1739 	int nr_pages;
1740 	int nwritten = 0;
1741 
1742 	if (atomic) {
1743 		last_page = last_fsync_dnode(sbi, ino);
1744 		if (IS_ERR_OR_NULL(last_page))
1745 			return PTR_ERR_OR_ZERO(last_page);
1746 	}
1747 retry:
1748 	pagevec_init(&pvec);
1749 	index = 0;
1750 
1751 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1752 				PAGECACHE_TAG_DIRTY))) {
1753 		int i;
1754 
1755 		for (i = 0; i < nr_pages; i++) {
1756 			struct page *page = pvec.pages[i];
1757 			bool submitted = false;
1758 
1759 			if (unlikely(f2fs_cp_error(sbi))) {
1760 				f2fs_put_page(last_page, 0);
1761 				pagevec_release(&pvec);
1762 				ret = -EIO;
1763 				goto out;
1764 			}
1765 
1766 			if (!IS_DNODE(page) || !is_cold_node(page))
1767 				continue;
1768 			if (ino_of_node(page) != ino)
1769 				continue;
1770 
1771 			lock_page(page);
1772 
1773 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1774 continue_unlock:
1775 				unlock_page(page);
1776 				continue;
1777 			}
1778 			if (ino_of_node(page) != ino)
1779 				goto continue_unlock;
1780 
1781 			if (!PageDirty(page) && page != last_page) {
1782 				/* someone wrote it for us */
1783 				goto continue_unlock;
1784 			}
1785 
1786 			f2fs_wait_on_page_writeback(page, NODE, true, true);
1787 
1788 			set_fsync_mark(page, 0);
1789 			set_dentry_mark(page, 0);
1790 
1791 			if (!atomic || page == last_page) {
1792 				set_fsync_mark(page, 1);
1793 				percpu_counter_inc(&sbi->rf_node_block_count);
1794 				if (IS_INODE(page)) {
1795 					if (is_inode_flag_set(inode,
1796 								FI_DIRTY_INODE))
1797 						f2fs_update_inode(inode, page);
1798 					set_dentry_mark(page,
1799 						f2fs_need_dentry_mark(sbi, ino));
1800 				}
1801 				/* may be written by other thread */
1802 				if (!PageDirty(page))
1803 					set_page_dirty(page);
1804 			}
1805 
1806 			if (!clear_page_dirty_for_io(page))
1807 				goto continue_unlock;
1808 
1809 			ret = __write_node_page(page, atomic &&
1810 						page == last_page,
1811 						&submitted, wbc, true,
1812 						FS_NODE_IO, seq_id);
1813 			if (ret) {
1814 				unlock_page(page);
1815 				f2fs_put_page(last_page, 0);
1816 				break;
1817 			} else if (submitted) {
1818 				nwritten++;
1819 			}
1820 
1821 			if (page == last_page) {
1822 				f2fs_put_page(page, 0);
1823 				marked = true;
1824 				break;
1825 			}
1826 		}
1827 		pagevec_release(&pvec);
1828 		cond_resched();
1829 
1830 		if (ret || marked)
1831 			break;
1832 	}
1833 	if (!ret && atomic && !marked) {
1834 		f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1835 			   ino, last_page->index);
1836 		lock_page(last_page);
1837 		f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1838 		set_page_dirty(last_page);
1839 		unlock_page(last_page);
1840 		goto retry;
1841 	}
1842 out:
1843 	if (nwritten)
1844 		f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1845 	return ret ? -EIO : 0;
1846 }
1847 
f2fs_match_ino(struct inode * inode,unsigned long ino,void * data)1848 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1849 {
1850 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1851 	bool clean;
1852 
1853 	if (inode->i_ino != ino)
1854 		return 0;
1855 
1856 	if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1857 		return 0;
1858 
1859 	spin_lock(&sbi->inode_lock[DIRTY_META]);
1860 	clean = list_empty(&F2FS_I(inode)->gdirty_list);
1861 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1862 
1863 	if (clean)
1864 		return 0;
1865 
1866 	inode = igrab(inode);
1867 	if (!inode)
1868 		return 0;
1869 	return 1;
1870 }
1871 
flush_dirty_inode(struct page * page)1872 static bool flush_dirty_inode(struct page *page)
1873 {
1874 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1875 	struct inode *inode;
1876 	nid_t ino = ino_of_node(page);
1877 
1878 	inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1879 	if (!inode)
1880 		return false;
1881 
1882 	f2fs_update_inode(inode, page);
1883 	unlock_page(page);
1884 
1885 	iput(inode);
1886 	return true;
1887 }
1888 
f2fs_flush_inline_data(struct f2fs_sb_info * sbi)1889 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
1890 {
1891 	pgoff_t index = 0;
1892 	struct pagevec pvec;
1893 	int nr_pages;
1894 
1895 	pagevec_init(&pvec);
1896 
1897 	while ((nr_pages = pagevec_lookup_tag(&pvec,
1898 			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1899 		int i;
1900 
1901 		for (i = 0; i < nr_pages; i++) {
1902 			struct page *page = pvec.pages[i];
1903 
1904 			if (!IS_DNODE(page))
1905 				continue;
1906 
1907 			lock_page(page);
1908 
1909 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1910 continue_unlock:
1911 				unlock_page(page);
1912 				continue;
1913 			}
1914 
1915 			if (!PageDirty(page)) {
1916 				/* someone wrote it for us */
1917 				goto continue_unlock;
1918 			}
1919 
1920 			/* flush inline_data, if it's async context. */
1921 			if (page_private_inline(page)) {
1922 				clear_page_private_inline(page);
1923 				unlock_page(page);
1924 				flush_inline_data(sbi, ino_of_node(page));
1925 				continue;
1926 			}
1927 			unlock_page(page);
1928 		}
1929 		pagevec_release(&pvec);
1930 		cond_resched();
1931 	}
1932 }
1933 
f2fs_sync_node_pages(struct f2fs_sb_info * sbi,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type)1934 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1935 				struct writeback_control *wbc,
1936 				bool do_balance, enum iostat_type io_type)
1937 {
1938 	pgoff_t index;
1939 	struct pagevec pvec;
1940 	int step = 0;
1941 	int nwritten = 0;
1942 	int ret = 0;
1943 	int nr_pages, done = 0;
1944 
1945 	pagevec_init(&pvec);
1946 
1947 next_step:
1948 	index = 0;
1949 
1950 	while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1951 			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1952 		int i;
1953 
1954 		for (i = 0; i < nr_pages; i++) {
1955 			struct page *page = pvec.pages[i];
1956 			bool submitted = false;
1957 			bool may_dirty = true;
1958 
1959 			/* give a priority to WB_SYNC threads */
1960 			if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1961 					wbc->sync_mode == WB_SYNC_NONE) {
1962 				done = 1;
1963 				break;
1964 			}
1965 
1966 			/*
1967 			 * flushing sequence with step:
1968 			 * 0. indirect nodes
1969 			 * 1. dentry dnodes
1970 			 * 2. file dnodes
1971 			 */
1972 			if (step == 0 && IS_DNODE(page))
1973 				continue;
1974 			if (step == 1 && (!IS_DNODE(page) ||
1975 						is_cold_node(page)))
1976 				continue;
1977 			if (step == 2 && (!IS_DNODE(page) ||
1978 						!is_cold_node(page)))
1979 				continue;
1980 lock_node:
1981 			if (wbc->sync_mode == WB_SYNC_ALL)
1982 				lock_page(page);
1983 			else if (!trylock_page(page))
1984 				continue;
1985 
1986 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1987 continue_unlock:
1988 				unlock_page(page);
1989 				continue;
1990 			}
1991 
1992 			if (!PageDirty(page)) {
1993 				/* someone wrote it for us */
1994 				goto continue_unlock;
1995 			}
1996 
1997 			/* flush inline_data/inode, if it's async context. */
1998 			if (!do_balance)
1999 				goto write_node;
2000 
2001 			/* flush inline_data */
2002 			if (page_private_inline(page)) {
2003 				clear_page_private_inline(page);
2004 				unlock_page(page);
2005 				flush_inline_data(sbi, ino_of_node(page));
2006 				goto lock_node;
2007 			}
2008 
2009 			/* flush dirty inode */
2010 			if (IS_INODE(page) && may_dirty) {
2011 				may_dirty = false;
2012 				if (flush_dirty_inode(page))
2013 					goto lock_node;
2014 			}
2015 write_node:
2016 			f2fs_wait_on_page_writeback(page, NODE, true, true);
2017 
2018 			if (!clear_page_dirty_for_io(page))
2019 				goto continue_unlock;
2020 
2021 			set_fsync_mark(page, 0);
2022 			set_dentry_mark(page, 0);
2023 
2024 			ret = __write_node_page(page, false, &submitted,
2025 						wbc, do_balance, io_type, NULL);
2026 			if (ret)
2027 				unlock_page(page);
2028 			else if (submitted)
2029 				nwritten++;
2030 
2031 			if (--wbc->nr_to_write == 0)
2032 				break;
2033 		}
2034 		pagevec_release(&pvec);
2035 		cond_resched();
2036 
2037 		if (wbc->nr_to_write == 0) {
2038 			step = 2;
2039 			break;
2040 		}
2041 	}
2042 
2043 	if (step < 2) {
2044 		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2045 				wbc->sync_mode == WB_SYNC_NONE && step == 1)
2046 			goto out;
2047 		step++;
2048 		goto next_step;
2049 	}
2050 out:
2051 	if (nwritten)
2052 		f2fs_submit_merged_write(sbi, NODE);
2053 
2054 	if (unlikely(f2fs_cp_error(sbi)))
2055 		return -EIO;
2056 	return ret;
2057 }
2058 
f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info * sbi,unsigned int seq_id)2059 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2060 						unsigned int seq_id)
2061 {
2062 	struct fsync_node_entry *fn;
2063 	struct page *page;
2064 	struct list_head *head = &sbi->fsync_node_list;
2065 	unsigned long flags;
2066 	unsigned int cur_seq_id = 0;
2067 	int ret2, ret = 0;
2068 
2069 	while (seq_id && cur_seq_id < seq_id) {
2070 		spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2071 		if (list_empty(head)) {
2072 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2073 			break;
2074 		}
2075 		fn = list_first_entry(head, struct fsync_node_entry, list);
2076 		if (fn->seq_id > seq_id) {
2077 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2078 			break;
2079 		}
2080 		cur_seq_id = fn->seq_id;
2081 		page = fn->page;
2082 		get_page(page);
2083 		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2084 
2085 		f2fs_wait_on_page_writeback(page, NODE, true, false);
2086 		if (TestClearPageError(page))
2087 			ret = -EIO;
2088 
2089 		put_page(page);
2090 
2091 		if (ret)
2092 			break;
2093 	}
2094 
2095 	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
2096 	if (!ret)
2097 		ret = ret2;
2098 
2099 	return ret;
2100 }
2101 
f2fs_write_node_pages(struct address_space * mapping,struct writeback_control * wbc)2102 static int f2fs_write_node_pages(struct address_space *mapping,
2103 			    struct writeback_control *wbc)
2104 {
2105 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2106 	struct blk_plug plug;
2107 	long diff;
2108 
2109 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2110 		goto skip_write;
2111 
2112 	/* balancing f2fs's metadata in background */
2113 	f2fs_balance_fs_bg(sbi, true);
2114 
2115 	/* collect a number of dirty node pages and write together */
2116 	if (wbc->sync_mode != WB_SYNC_ALL &&
2117 			get_pages(sbi, F2FS_DIRTY_NODES) <
2118 					nr_pages_to_skip(sbi, NODE))
2119 		goto skip_write;
2120 
2121 	if (wbc->sync_mode == WB_SYNC_ALL)
2122 		atomic_inc(&sbi->wb_sync_req[NODE]);
2123 	else if (atomic_read(&sbi->wb_sync_req[NODE])) {
2124 		/* to avoid potential deadlock */
2125 		if (current->plug)
2126 			blk_finish_plug(current->plug);
2127 		goto skip_write;
2128 	}
2129 
2130 	trace_f2fs_writepages(mapping->host, wbc, NODE);
2131 
2132 	diff = nr_pages_to_write(sbi, NODE, wbc);
2133 	blk_start_plug(&plug);
2134 	f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2135 	blk_finish_plug(&plug);
2136 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2137 
2138 	if (wbc->sync_mode == WB_SYNC_ALL)
2139 		atomic_dec(&sbi->wb_sync_req[NODE]);
2140 	return 0;
2141 
2142 skip_write:
2143 	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2144 	trace_f2fs_writepages(mapping->host, wbc, NODE);
2145 	return 0;
2146 }
2147 
f2fs_set_node_page_dirty(struct page * page)2148 static int f2fs_set_node_page_dirty(struct page *page)
2149 {
2150 	trace_f2fs_set_page_dirty(page, NODE);
2151 
2152 	if (!PageUptodate(page))
2153 		SetPageUptodate(page);
2154 #ifdef CONFIG_F2FS_CHECK_FS
2155 	if (IS_INODE(page))
2156 		f2fs_inode_chksum_set(F2FS_P_SB(page), page);
2157 #endif
2158 	if (!PageDirty(page)) {
2159 		__set_page_dirty_nobuffers(page);
2160 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
2161 		set_page_private_reference(page);
2162 		return 1;
2163 	}
2164 	return 0;
2165 }
2166 
2167 /*
2168  * Structure of the f2fs node operations
2169  */
2170 const struct address_space_operations f2fs_node_aops = {
2171 	.writepage	= f2fs_write_node_page,
2172 	.writepages	= f2fs_write_node_pages,
2173 	.set_page_dirty	= f2fs_set_node_page_dirty,
2174 	.invalidatepage	= f2fs_invalidate_page,
2175 	.releasepage	= f2fs_release_page,
2176 #ifdef CONFIG_MIGRATION
2177 	.migratepage	= f2fs_migrate_page,
2178 #endif
2179 };
2180 
__lookup_free_nid_list(struct f2fs_nm_info * nm_i,nid_t n)2181 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2182 						nid_t n)
2183 {
2184 	return radix_tree_lookup(&nm_i->free_nid_root, n);
2185 }
2186 
__insert_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i)2187 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2188 				struct free_nid *i)
2189 {
2190 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2191 	int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2192 
2193 	if (err)
2194 		return err;
2195 
2196 	nm_i->nid_cnt[FREE_NID]++;
2197 	list_add_tail(&i->list, &nm_i->free_nid_list);
2198 	return 0;
2199 }
2200 
__remove_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state state)2201 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2202 			struct free_nid *i, enum nid_state state)
2203 {
2204 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2205 
2206 	f2fs_bug_on(sbi, state != i->state);
2207 	nm_i->nid_cnt[state]--;
2208 	if (state == FREE_NID)
2209 		list_del(&i->list);
2210 	radix_tree_delete(&nm_i->free_nid_root, i->nid);
2211 }
2212 
__move_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state org_state,enum nid_state dst_state)2213 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2214 			enum nid_state org_state, enum nid_state dst_state)
2215 {
2216 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2217 
2218 	f2fs_bug_on(sbi, org_state != i->state);
2219 	i->state = dst_state;
2220 	nm_i->nid_cnt[org_state]--;
2221 	nm_i->nid_cnt[dst_state]++;
2222 
2223 	switch (dst_state) {
2224 	case PREALLOC_NID:
2225 		list_del(&i->list);
2226 		break;
2227 	case FREE_NID:
2228 		list_add_tail(&i->list, &nm_i->free_nid_list);
2229 		break;
2230 	default:
2231 		BUG_ON(1);
2232 	}
2233 }
2234 
f2fs_nat_bitmap_enabled(struct f2fs_sb_info * sbi)2235 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
2236 {
2237 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2238 	unsigned int i;
2239 	bool ret = true;
2240 
2241 	f2fs_down_read(&nm_i->nat_tree_lock);
2242 	for (i = 0; i < nm_i->nat_blocks; i++) {
2243 		if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
2244 			ret = false;
2245 			break;
2246 		}
2247 	}
2248 	f2fs_up_read(&nm_i->nat_tree_lock);
2249 
2250 	return ret;
2251 }
2252 
update_free_nid_bitmap(struct f2fs_sb_info * sbi,nid_t nid,bool set,bool build)2253 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2254 							bool set, bool build)
2255 {
2256 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2257 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2258 	unsigned int nid_ofs = nid - START_NID(nid);
2259 
2260 	if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2261 		return;
2262 
2263 	if (set) {
2264 		if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2265 			return;
2266 		__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2267 		nm_i->free_nid_count[nat_ofs]++;
2268 	} else {
2269 		if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2270 			return;
2271 		__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2272 		if (!build)
2273 			nm_i->free_nid_count[nat_ofs]--;
2274 	}
2275 }
2276 
2277 /* return if the nid is recognized as free */
add_free_nid(struct f2fs_sb_info * sbi,nid_t nid,bool build,bool update)2278 static bool add_free_nid(struct f2fs_sb_info *sbi,
2279 				nid_t nid, bool build, bool update)
2280 {
2281 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2282 	struct free_nid *i, *e;
2283 	struct nat_entry *ne;
2284 	int err = -EINVAL;
2285 	bool ret = false;
2286 
2287 	/* 0 nid should not be used */
2288 	if (unlikely(nid == 0))
2289 		return false;
2290 
2291 	if (unlikely(f2fs_check_nid_range(sbi, nid)))
2292 		return false;
2293 
2294 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2295 	i->nid = nid;
2296 	i->state = FREE_NID;
2297 
2298 	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2299 
2300 	spin_lock(&nm_i->nid_list_lock);
2301 
2302 	if (build) {
2303 		/*
2304 		 *   Thread A             Thread B
2305 		 *  - f2fs_create
2306 		 *   - f2fs_new_inode
2307 		 *    - f2fs_alloc_nid
2308 		 *     - __insert_nid_to_list(PREALLOC_NID)
2309 		 *                     - f2fs_balance_fs_bg
2310 		 *                      - f2fs_build_free_nids
2311 		 *                       - __f2fs_build_free_nids
2312 		 *                        - scan_nat_page
2313 		 *                         - add_free_nid
2314 		 *                          - __lookup_nat_cache
2315 		 *  - f2fs_add_link
2316 		 *   - f2fs_init_inode_metadata
2317 		 *    - f2fs_new_inode_page
2318 		 *     - f2fs_new_node_page
2319 		 *      - set_node_addr
2320 		 *  - f2fs_alloc_nid_done
2321 		 *   - __remove_nid_from_list(PREALLOC_NID)
2322 		 *                         - __insert_nid_to_list(FREE_NID)
2323 		 */
2324 		ne = __lookup_nat_cache(nm_i, nid);
2325 		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2326 				nat_get_blkaddr(ne) != NULL_ADDR))
2327 			goto err_out;
2328 
2329 		e = __lookup_free_nid_list(nm_i, nid);
2330 		if (e) {
2331 			if (e->state == FREE_NID)
2332 				ret = true;
2333 			goto err_out;
2334 		}
2335 	}
2336 	ret = true;
2337 	err = __insert_free_nid(sbi, i);
2338 err_out:
2339 	if (update) {
2340 		update_free_nid_bitmap(sbi, nid, ret, build);
2341 		if (!build)
2342 			nm_i->available_nids++;
2343 	}
2344 	spin_unlock(&nm_i->nid_list_lock);
2345 	radix_tree_preload_end();
2346 
2347 	if (err)
2348 		kmem_cache_free(free_nid_slab, i);
2349 	return ret;
2350 }
2351 
remove_free_nid(struct f2fs_sb_info * sbi,nid_t nid)2352 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2353 {
2354 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2355 	struct free_nid *i;
2356 	bool need_free = false;
2357 
2358 	spin_lock(&nm_i->nid_list_lock);
2359 	i = __lookup_free_nid_list(nm_i, nid);
2360 	if (i && i->state == FREE_NID) {
2361 		__remove_free_nid(sbi, i, FREE_NID);
2362 		need_free = true;
2363 	}
2364 	spin_unlock(&nm_i->nid_list_lock);
2365 
2366 	if (need_free)
2367 		kmem_cache_free(free_nid_slab, i);
2368 }
2369 
scan_nat_page(struct f2fs_sb_info * sbi,struct page * nat_page,nid_t start_nid)2370 static int scan_nat_page(struct f2fs_sb_info *sbi,
2371 			struct page *nat_page, nid_t start_nid)
2372 {
2373 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2374 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
2375 	block_t blk_addr;
2376 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2377 	int i;
2378 
2379 	__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2380 
2381 	i = start_nid % NAT_ENTRY_PER_BLOCK;
2382 
2383 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2384 		if (unlikely(start_nid >= nm_i->max_nid))
2385 			break;
2386 
2387 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2388 
2389 		if (blk_addr == NEW_ADDR)
2390 			return -EINVAL;
2391 
2392 		if (blk_addr == NULL_ADDR) {
2393 			add_free_nid(sbi, start_nid, true, true);
2394 		} else {
2395 			spin_lock(&NM_I(sbi)->nid_list_lock);
2396 			update_free_nid_bitmap(sbi, start_nid, false, true);
2397 			spin_unlock(&NM_I(sbi)->nid_list_lock);
2398 		}
2399 	}
2400 
2401 	return 0;
2402 }
2403 
scan_curseg_cache(struct f2fs_sb_info * sbi)2404 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2405 {
2406 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2407 	struct f2fs_journal *journal = curseg->journal;
2408 	int i;
2409 
2410 	down_read(&curseg->journal_rwsem);
2411 	for (i = 0; i < nats_in_cursum(journal); i++) {
2412 		block_t addr;
2413 		nid_t nid;
2414 
2415 		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2416 		nid = le32_to_cpu(nid_in_journal(journal, i));
2417 		if (addr == NULL_ADDR)
2418 			add_free_nid(sbi, nid, true, false);
2419 		else
2420 			remove_free_nid(sbi, nid);
2421 	}
2422 	up_read(&curseg->journal_rwsem);
2423 }
2424 
scan_free_nid_bits(struct f2fs_sb_info * sbi)2425 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2426 {
2427 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2428 	unsigned int i, idx;
2429 	nid_t nid;
2430 
2431 	f2fs_down_read(&nm_i->nat_tree_lock);
2432 
2433 	for (i = 0; i < nm_i->nat_blocks; i++) {
2434 		if (!test_bit_le(i, nm_i->nat_block_bitmap))
2435 			continue;
2436 		if (!nm_i->free_nid_count[i])
2437 			continue;
2438 		for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2439 			idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2440 						NAT_ENTRY_PER_BLOCK, idx);
2441 			if (idx >= NAT_ENTRY_PER_BLOCK)
2442 				break;
2443 
2444 			nid = i * NAT_ENTRY_PER_BLOCK + idx;
2445 			add_free_nid(sbi, nid, true, false);
2446 
2447 			if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2448 				goto out;
2449 		}
2450 	}
2451 out:
2452 	scan_curseg_cache(sbi);
2453 
2454 	f2fs_up_read(&nm_i->nat_tree_lock);
2455 }
2456 
__f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2457 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2458 						bool sync, bool mount)
2459 {
2460 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2461 	int i = 0, ret;
2462 	nid_t nid = nm_i->next_scan_nid;
2463 
2464 	if (unlikely(nid >= nm_i->max_nid))
2465 		nid = 0;
2466 
2467 	if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2468 		nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2469 
2470 	/* Enough entries */
2471 	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2472 		return 0;
2473 
2474 	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2475 		return 0;
2476 
2477 	if (!mount) {
2478 		/* try to find free nids in free_nid_bitmap */
2479 		scan_free_nid_bits(sbi);
2480 
2481 		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2482 			return 0;
2483 	}
2484 
2485 	/* readahead nat pages to be scanned */
2486 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2487 							META_NAT, true);
2488 
2489 	f2fs_down_read(&nm_i->nat_tree_lock);
2490 
2491 	while (1) {
2492 		if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2493 						nm_i->nat_block_bitmap)) {
2494 			struct page *page = get_current_nat_page(sbi, nid);
2495 
2496 			if (IS_ERR(page)) {
2497 				ret = PTR_ERR(page);
2498 			} else {
2499 				ret = scan_nat_page(sbi, page, nid);
2500 				f2fs_put_page(page, 1);
2501 			}
2502 
2503 			if (ret) {
2504 				f2fs_up_read(&nm_i->nat_tree_lock);
2505 				f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2506 				return ret;
2507 			}
2508 		}
2509 
2510 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2511 		if (unlikely(nid >= nm_i->max_nid))
2512 			nid = 0;
2513 
2514 		if (++i >= FREE_NID_PAGES)
2515 			break;
2516 	}
2517 
2518 	/* go to the next free nat pages to find free nids abundantly */
2519 	nm_i->next_scan_nid = nid;
2520 
2521 	/* find free nids from current sum_pages */
2522 	scan_curseg_cache(sbi);
2523 
2524 	f2fs_up_read(&nm_i->nat_tree_lock);
2525 
2526 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2527 					nm_i->ra_nid_pages, META_NAT, false);
2528 
2529 	return 0;
2530 }
2531 
f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2532 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2533 {
2534 	int ret;
2535 
2536 	mutex_lock(&NM_I(sbi)->build_lock);
2537 	ret = __f2fs_build_free_nids(sbi, sync, mount);
2538 	mutex_unlock(&NM_I(sbi)->build_lock);
2539 
2540 	return ret;
2541 }
2542 
2543 /*
2544  * If this function returns success, caller can obtain a new nid
2545  * from second parameter of this function.
2546  * The returned nid could be used ino as well as nid when inode is created.
2547  */
f2fs_alloc_nid(struct f2fs_sb_info * sbi,nid_t * nid)2548 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2549 {
2550 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2551 	struct free_nid *i = NULL;
2552 retry:
2553 	if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2554 		f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
2555 		return false;
2556 	}
2557 
2558 	spin_lock(&nm_i->nid_list_lock);
2559 
2560 	if (unlikely(nm_i->available_nids == 0)) {
2561 		spin_unlock(&nm_i->nid_list_lock);
2562 		return false;
2563 	}
2564 
2565 	/* We should not use stale free nids created by f2fs_build_free_nids */
2566 	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2567 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2568 		i = list_first_entry(&nm_i->free_nid_list,
2569 					struct free_nid, list);
2570 		*nid = i->nid;
2571 
2572 		__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2573 		nm_i->available_nids--;
2574 
2575 		update_free_nid_bitmap(sbi, *nid, false, false);
2576 
2577 		spin_unlock(&nm_i->nid_list_lock);
2578 		return true;
2579 	}
2580 	spin_unlock(&nm_i->nid_list_lock);
2581 
2582 	/* Let's scan nat pages and its caches to get free nids */
2583 	if (!f2fs_build_free_nids(sbi, true, false))
2584 		goto retry;
2585 	return false;
2586 }
2587 
2588 /*
2589  * f2fs_alloc_nid() should be called prior to this function.
2590  */
f2fs_alloc_nid_done(struct f2fs_sb_info * sbi,nid_t nid)2591 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2592 {
2593 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2594 	struct free_nid *i;
2595 
2596 	spin_lock(&nm_i->nid_list_lock);
2597 	i = __lookup_free_nid_list(nm_i, nid);
2598 	f2fs_bug_on(sbi, !i);
2599 	__remove_free_nid(sbi, i, PREALLOC_NID);
2600 	spin_unlock(&nm_i->nid_list_lock);
2601 
2602 	kmem_cache_free(free_nid_slab, i);
2603 }
2604 
2605 /*
2606  * f2fs_alloc_nid() should be called prior to this function.
2607  */
f2fs_alloc_nid_failed(struct f2fs_sb_info * sbi,nid_t nid)2608 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2609 {
2610 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2611 	struct free_nid *i;
2612 	bool need_free = false;
2613 
2614 	if (!nid)
2615 		return;
2616 
2617 	spin_lock(&nm_i->nid_list_lock);
2618 	i = __lookup_free_nid_list(nm_i, nid);
2619 	f2fs_bug_on(sbi, !i);
2620 
2621 	if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2622 		__remove_free_nid(sbi, i, PREALLOC_NID);
2623 		need_free = true;
2624 	} else {
2625 		__move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2626 	}
2627 
2628 	nm_i->available_nids++;
2629 
2630 	update_free_nid_bitmap(sbi, nid, true, false);
2631 
2632 	spin_unlock(&nm_i->nid_list_lock);
2633 
2634 	if (need_free)
2635 		kmem_cache_free(free_nid_slab, i);
2636 }
2637 
f2fs_try_to_free_nids(struct f2fs_sb_info * sbi,int nr_shrink)2638 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2639 {
2640 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2641 	int nr = nr_shrink;
2642 
2643 	if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2644 		return 0;
2645 
2646 	if (!mutex_trylock(&nm_i->build_lock))
2647 		return 0;
2648 
2649 	while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2650 		struct free_nid *i, *next;
2651 		unsigned int batch = SHRINK_NID_BATCH_SIZE;
2652 
2653 		spin_lock(&nm_i->nid_list_lock);
2654 		list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2655 			if (!nr_shrink || !batch ||
2656 				nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2657 				break;
2658 			__remove_free_nid(sbi, i, FREE_NID);
2659 			kmem_cache_free(free_nid_slab, i);
2660 			nr_shrink--;
2661 			batch--;
2662 		}
2663 		spin_unlock(&nm_i->nid_list_lock);
2664 	}
2665 
2666 	mutex_unlock(&nm_i->build_lock);
2667 
2668 	return nr - nr_shrink;
2669 }
2670 
f2fs_recover_inline_xattr(struct inode * inode,struct page * page)2671 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2672 {
2673 	void *src_addr, *dst_addr;
2674 	size_t inline_size;
2675 	struct page *ipage;
2676 	struct f2fs_inode *ri;
2677 
2678 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2679 	if (IS_ERR(ipage))
2680 		return PTR_ERR(ipage);
2681 
2682 	ri = F2FS_INODE(page);
2683 	if (ri->i_inline & F2FS_INLINE_XATTR) {
2684 		if (!f2fs_has_inline_xattr(inode)) {
2685 			set_inode_flag(inode, FI_INLINE_XATTR);
2686 			stat_inc_inline_xattr(inode);
2687 		}
2688 	} else {
2689 		if (f2fs_has_inline_xattr(inode)) {
2690 			stat_dec_inline_xattr(inode);
2691 			clear_inode_flag(inode, FI_INLINE_XATTR);
2692 		}
2693 		goto update_inode;
2694 	}
2695 
2696 	dst_addr = inline_xattr_addr(inode, ipage);
2697 	src_addr = inline_xattr_addr(inode, page);
2698 	inline_size = inline_xattr_size(inode);
2699 
2700 	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2701 	memcpy(dst_addr, src_addr, inline_size);
2702 update_inode:
2703 	f2fs_update_inode(inode, ipage);
2704 	f2fs_put_page(ipage, 1);
2705 	return 0;
2706 }
2707 
f2fs_recover_xattr_data(struct inode * inode,struct page * page)2708 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2709 {
2710 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2711 	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2712 	nid_t new_xnid;
2713 	struct dnode_of_data dn;
2714 	struct node_info ni;
2715 	struct page *xpage;
2716 	int err;
2717 
2718 	if (!prev_xnid)
2719 		goto recover_xnid;
2720 
2721 	/* 1: invalidate the previous xattr nid */
2722 	err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
2723 	if (err)
2724 		return err;
2725 
2726 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
2727 	dec_valid_node_count(sbi, inode, false);
2728 	set_node_addr(sbi, &ni, NULL_ADDR, false);
2729 
2730 recover_xnid:
2731 	/* 2: update xattr nid in inode */
2732 	if (!f2fs_alloc_nid(sbi, &new_xnid))
2733 		return -ENOSPC;
2734 
2735 	set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2736 	xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2737 	if (IS_ERR(xpage)) {
2738 		f2fs_alloc_nid_failed(sbi, new_xnid);
2739 		return PTR_ERR(xpage);
2740 	}
2741 
2742 	f2fs_alloc_nid_done(sbi, new_xnid);
2743 	f2fs_update_inode_page(inode);
2744 
2745 	/* 3: update and set xattr node page dirty */
2746 	if (page)
2747 		memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
2748 				VALID_XATTR_BLOCK_SIZE);
2749 
2750 	set_page_dirty(xpage);
2751 	f2fs_put_page(xpage, 1);
2752 
2753 	return 0;
2754 }
2755 
f2fs_recover_inode_page(struct f2fs_sb_info * sbi,struct page * page)2756 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2757 {
2758 	struct f2fs_inode *src, *dst;
2759 	nid_t ino = ino_of_node(page);
2760 	struct node_info old_ni, new_ni;
2761 	struct page *ipage;
2762 	int err;
2763 
2764 	err = f2fs_get_node_info(sbi, ino, &old_ni, false);
2765 	if (err)
2766 		return err;
2767 
2768 	if (unlikely(old_ni.blk_addr != NULL_ADDR))
2769 		return -EINVAL;
2770 retry:
2771 	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2772 	if (!ipage) {
2773 		congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2774 		goto retry;
2775 	}
2776 
2777 	/* Should not use this inode from free nid list */
2778 	remove_free_nid(sbi, ino);
2779 
2780 	if (!PageUptodate(ipage))
2781 		SetPageUptodate(ipage);
2782 	fill_node_footer(ipage, ino, ino, 0, true);
2783 	set_cold_node(ipage, false);
2784 
2785 	src = F2FS_INODE(page);
2786 	dst = F2FS_INODE(ipage);
2787 
2788 	memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2789 	dst->i_size = 0;
2790 	dst->i_blocks = cpu_to_le64(1);
2791 	dst->i_links = cpu_to_le32(1);
2792 	dst->i_xattr_nid = 0;
2793 	dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2794 	if (dst->i_inline & F2FS_EXTRA_ATTR) {
2795 		dst->i_extra_isize = src->i_extra_isize;
2796 
2797 		if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2798 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2799 							i_inline_xattr_size))
2800 			dst->i_inline_xattr_size = src->i_inline_xattr_size;
2801 
2802 		if (f2fs_sb_has_project_quota(sbi) &&
2803 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2804 								i_projid))
2805 			dst->i_projid = src->i_projid;
2806 
2807 		if (f2fs_sb_has_inode_crtime(sbi) &&
2808 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2809 							i_crtime_nsec)) {
2810 			dst->i_crtime = src->i_crtime;
2811 			dst->i_crtime_nsec = src->i_crtime_nsec;
2812 		}
2813 	}
2814 
2815 	new_ni = old_ni;
2816 	new_ni.ino = ino;
2817 
2818 	if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2819 		WARN_ON(1);
2820 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2821 	inc_valid_inode_count(sbi);
2822 	set_page_dirty(ipage);
2823 	f2fs_put_page(ipage, 1);
2824 	return 0;
2825 }
2826 
f2fs_restore_node_summary(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_summary_block * sum)2827 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2828 			unsigned int segno, struct f2fs_summary_block *sum)
2829 {
2830 	struct f2fs_node *rn;
2831 	struct f2fs_summary *sum_entry;
2832 	block_t addr;
2833 	int i, idx, last_offset, nrpages;
2834 
2835 	/* scan the node segment */
2836 	last_offset = sbi->blocks_per_seg;
2837 	addr = START_BLOCK(sbi, segno);
2838 	sum_entry = &sum->entries[0];
2839 
2840 	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2841 		nrpages = bio_max_segs(last_offset - i);
2842 
2843 		/* readahead node pages */
2844 		f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2845 
2846 		for (idx = addr; idx < addr + nrpages; idx++) {
2847 			struct page *page = f2fs_get_tmp_page(sbi, idx);
2848 
2849 			if (IS_ERR(page))
2850 				return PTR_ERR(page);
2851 
2852 			rn = F2FS_NODE(page);
2853 			sum_entry->nid = rn->footer.nid;
2854 			sum_entry->version = 0;
2855 			sum_entry->ofs_in_node = 0;
2856 			sum_entry++;
2857 			f2fs_put_page(page, 1);
2858 		}
2859 
2860 		invalidate_mapping_pages(META_MAPPING(sbi), addr,
2861 							addr + nrpages);
2862 	}
2863 	return 0;
2864 }
2865 
remove_nats_in_journal(struct f2fs_sb_info * sbi)2866 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2867 {
2868 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2869 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2870 	struct f2fs_journal *journal = curseg->journal;
2871 	int i;
2872 
2873 	down_write(&curseg->journal_rwsem);
2874 	for (i = 0; i < nats_in_cursum(journal); i++) {
2875 		struct nat_entry *ne;
2876 		struct f2fs_nat_entry raw_ne;
2877 		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2878 
2879 		if (f2fs_check_nid_range(sbi, nid))
2880 			continue;
2881 
2882 		raw_ne = nat_in_journal(journal, i);
2883 
2884 		ne = __lookup_nat_cache(nm_i, nid);
2885 		if (!ne) {
2886 			ne = __alloc_nat_entry(sbi, nid, true);
2887 			__init_nat_entry(nm_i, ne, &raw_ne, true);
2888 		}
2889 
2890 		/*
2891 		 * if a free nat in journal has not been used after last
2892 		 * checkpoint, we should remove it from available nids,
2893 		 * since later we will add it again.
2894 		 */
2895 		if (!get_nat_flag(ne, IS_DIRTY) &&
2896 				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2897 			spin_lock(&nm_i->nid_list_lock);
2898 			nm_i->available_nids--;
2899 			spin_unlock(&nm_i->nid_list_lock);
2900 		}
2901 
2902 		__set_nat_cache_dirty(nm_i, ne);
2903 	}
2904 	update_nats_in_cursum(journal, -i);
2905 	up_write(&curseg->journal_rwsem);
2906 }
2907 
__adjust_nat_entry_set(struct nat_entry_set * nes,struct list_head * head,int max)2908 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2909 						struct list_head *head, int max)
2910 {
2911 	struct nat_entry_set *cur;
2912 
2913 	if (nes->entry_cnt >= max)
2914 		goto add_out;
2915 
2916 	list_for_each_entry(cur, head, set_list) {
2917 		if (cur->entry_cnt >= nes->entry_cnt) {
2918 			list_add(&nes->set_list, cur->set_list.prev);
2919 			return;
2920 		}
2921 	}
2922 add_out:
2923 	list_add_tail(&nes->set_list, head);
2924 }
2925 
__update_nat_bits(struct f2fs_nm_info * nm_i,unsigned int nat_ofs,unsigned int valid)2926 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
2927 							unsigned int valid)
2928 {
2929 	if (valid == 0) {
2930 		__set_bit_le(nat_ofs, nm_i->empty_nat_bits);
2931 		__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2932 		return;
2933 	}
2934 
2935 	__clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
2936 	if (valid == NAT_ENTRY_PER_BLOCK)
2937 		__set_bit_le(nat_ofs, nm_i->full_nat_bits);
2938 	else
2939 		__clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2940 }
2941 
update_nat_bits(struct f2fs_sb_info * sbi,nid_t start_nid,struct page * page)2942 static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2943 						struct page *page)
2944 {
2945 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2946 	unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2947 	struct f2fs_nat_block *nat_blk = page_address(page);
2948 	int valid = 0;
2949 	int i = 0;
2950 
2951 	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
2952 		return;
2953 
2954 	if (nat_index == 0) {
2955 		valid = 1;
2956 		i = 1;
2957 	}
2958 	for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2959 		if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2960 			valid++;
2961 	}
2962 
2963 	__update_nat_bits(nm_i, nat_index, valid);
2964 }
2965 
f2fs_enable_nat_bits(struct f2fs_sb_info * sbi)2966 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
2967 {
2968 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2969 	unsigned int nat_ofs;
2970 
2971 	f2fs_down_read(&nm_i->nat_tree_lock);
2972 
2973 	for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
2974 		unsigned int valid = 0, nid_ofs = 0;
2975 
2976 		/* handle nid zero due to it should never be used */
2977 		if (unlikely(nat_ofs == 0)) {
2978 			valid = 1;
2979 			nid_ofs = 1;
2980 		}
2981 
2982 		for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
2983 			if (!test_bit_le(nid_ofs,
2984 					nm_i->free_nid_bitmap[nat_ofs]))
2985 				valid++;
2986 		}
2987 
2988 		__update_nat_bits(nm_i, nat_ofs, valid);
2989 	}
2990 
2991 	f2fs_up_read(&nm_i->nat_tree_lock);
2992 }
2993 
__flush_nat_entry_set(struct f2fs_sb_info * sbi,struct nat_entry_set * set,struct cp_control * cpc)2994 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2995 		struct nat_entry_set *set, struct cp_control *cpc)
2996 {
2997 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2998 	struct f2fs_journal *journal = curseg->journal;
2999 	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
3000 	bool to_journal = true;
3001 	struct f2fs_nat_block *nat_blk;
3002 	struct nat_entry *ne, *cur;
3003 	struct page *page = NULL;
3004 
3005 	/*
3006 	 * there are two steps to flush nat entries:
3007 	 * #1, flush nat entries to journal in current hot data summary block.
3008 	 * #2, flush nat entries to nat page.
3009 	 */
3010 	if ((cpc->reason & CP_UMOUNT) ||
3011 		!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
3012 		to_journal = false;
3013 
3014 	if (to_journal) {
3015 		down_write(&curseg->journal_rwsem);
3016 	} else {
3017 		page = get_next_nat_page(sbi, start_nid);
3018 		if (IS_ERR(page))
3019 			return PTR_ERR(page);
3020 
3021 		nat_blk = page_address(page);
3022 		f2fs_bug_on(sbi, !nat_blk);
3023 	}
3024 
3025 	/* flush dirty nats in nat entry set */
3026 	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3027 		struct f2fs_nat_entry *raw_ne;
3028 		nid_t nid = nat_get_nid(ne);
3029 		int offset;
3030 
3031 		f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3032 
3033 		if (to_journal) {
3034 			offset = f2fs_lookup_journal_in_cursum(journal,
3035 							NAT_JOURNAL, nid, 1);
3036 			f2fs_bug_on(sbi, offset < 0);
3037 			raw_ne = &nat_in_journal(journal, offset);
3038 			nid_in_journal(journal, offset) = cpu_to_le32(nid);
3039 		} else {
3040 			raw_ne = &nat_blk->entries[nid - start_nid];
3041 		}
3042 		raw_nat_from_node_info(raw_ne, &ne->ni);
3043 		nat_reset_flag(ne);
3044 		__clear_nat_cache_dirty(NM_I(sbi), set, ne);
3045 		if (nat_get_blkaddr(ne) == NULL_ADDR) {
3046 			add_free_nid(sbi, nid, false, true);
3047 		} else {
3048 			spin_lock(&NM_I(sbi)->nid_list_lock);
3049 			update_free_nid_bitmap(sbi, nid, false, false);
3050 			spin_unlock(&NM_I(sbi)->nid_list_lock);
3051 		}
3052 	}
3053 
3054 	if (to_journal) {
3055 		up_write(&curseg->journal_rwsem);
3056 	} else {
3057 		update_nat_bits(sbi, start_nid, page);
3058 		f2fs_put_page(page, 1);
3059 	}
3060 
3061 	/* Allow dirty nats by node block allocation in write_begin */
3062 	if (!set->entry_cnt) {
3063 		radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3064 		kmem_cache_free(nat_entry_set_slab, set);
3065 	}
3066 	return 0;
3067 }
3068 
3069 /*
3070  * This function is called during the checkpointing process.
3071  */
f2fs_flush_nat_entries(struct f2fs_sb_info * sbi,struct cp_control * cpc)3072 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3073 {
3074 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3075 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3076 	struct f2fs_journal *journal = curseg->journal;
3077 	struct nat_entry_set *setvec[SETVEC_SIZE];
3078 	struct nat_entry_set *set, *tmp;
3079 	unsigned int found;
3080 	nid_t set_idx = 0;
3081 	LIST_HEAD(sets);
3082 	int err = 0;
3083 
3084 	/*
3085 	 * during unmount, let's flush nat_bits before checking
3086 	 * nat_cnt[DIRTY_NAT].
3087 	 */
3088 	if (cpc->reason & CP_UMOUNT) {
3089 		f2fs_down_write(&nm_i->nat_tree_lock);
3090 		remove_nats_in_journal(sbi);
3091 		f2fs_up_write(&nm_i->nat_tree_lock);
3092 	}
3093 
3094 	if (!nm_i->nat_cnt[DIRTY_NAT])
3095 		return 0;
3096 
3097 	f2fs_down_write(&nm_i->nat_tree_lock);
3098 
3099 	/*
3100 	 * if there are no enough space in journal to store dirty nat
3101 	 * entries, remove all entries from journal and merge them
3102 	 * into nat entry set.
3103 	 */
3104 	if (cpc->reason & CP_UMOUNT ||
3105 		!__has_cursum_space(journal,
3106 			nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3107 		remove_nats_in_journal(sbi);
3108 
3109 	while ((found = __gang_lookup_nat_set(nm_i,
3110 					set_idx, SETVEC_SIZE, setvec))) {
3111 		unsigned idx;
3112 
3113 		set_idx = setvec[found - 1]->set + 1;
3114 		for (idx = 0; idx < found; idx++)
3115 			__adjust_nat_entry_set(setvec[idx], &sets,
3116 						MAX_NAT_JENTRIES(journal));
3117 	}
3118 
3119 	/* flush dirty nats in nat entry set */
3120 	list_for_each_entry_safe(set, tmp, &sets, set_list) {
3121 		err = __flush_nat_entry_set(sbi, set, cpc);
3122 		if (err)
3123 			break;
3124 	}
3125 
3126 	f2fs_up_write(&nm_i->nat_tree_lock);
3127 	/* Allow dirty nats by node block allocation in write_begin */
3128 
3129 	return err;
3130 }
3131 
__get_nat_bitmaps(struct f2fs_sb_info * sbi)3132 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3133 {
3134 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3135 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3136 	unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3137 	unsigned int i;
3138 	__u64 cp_ver = cur_cp_version(ckpt);
3139 	block_t nat_bits_addr;
3140 
3141 	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3142 	nm_i->nat_bits = f2fs_kvzalloc(sbi,
3143 			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
3144 	if (!nm_i->nat_bits)
3145 		return -ENOMEM;
3146 
3147 	nm_i->full_nat_bits = nm_i->nat_bits + 8;
3148 	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3149 
3150 	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3151 		return 0;
3152 
3153 	nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
3154 						nm_i->nat_bits_blocks;
3155 	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3156 		struct page *page;
3157 
3158 		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
3159 		if (IS_ERR(page))
3160 			return PTR_ERR(page);
3161 
3162 		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
3163 					page_address(page), F2FS_BLKSIZE);
3164 		f2fs_put_page(page, 1);
3165 	}
3166 
3167 	cp_ver |= (cur_cp_crc(ckpt) << 32);
3168 	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3169 		clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
3170 		f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
3171 			cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
3172 		return 0;
3173 	}
3174 
3175 	f2fs_notice(sbi, "Found nat_bits in checkpoint");
3176 	return 0;
3177 }
3178 
load_free_nid_bitmap(struct f2fs_sb_info * sbi)3179 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3180 {
3181 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3182 	unsigned int i = 0;
3183 	nid_t nid, last_nid;
3184 
3185 	if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3186 		return;
3187 
3188 	for (i = 0; i < nm_i->nat_blocks; i++) {
3189 		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3190 		if (i >= nm_i->nat_blocks)
3191 			break;
3192 
3193 		__set_bit_le(i, nm_i->nat_block_bitmap);
3194 
3195 		nid = i * NAT_ENTRY_PER_BLOCK;
3196 		last_nid = nid + NAT_ENTRY_PER_BLOCK;
3197 
3198 		spin_lock(&NM_I(sbi)->nid_list_lock);
3199 		for (; nid < last_nid; nid++)
3200 			update_free_nid_bitmap(sbi, nid, true, true);
3201 		spin_unlock(&NM_I(sbi)->nid_list_lock);
3202 	}
3203 
3204 	for (i = 0; i < nm_i->nat_blocks; i++) {
3205 		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3206 		if (i >= nm_i->nat_blocks)
3207 			break;
3208 
3209 		__set_bit_le(i, nm_i->nat_block_bitmap);
3210 	}
3211 }
3212 
init_node_manager(struct f2fs_sb_info * sbi)3213 static int init_node_manager(struct f2fs_sb_info *sbi)
3214 {
3215 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3216 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3217 	unsigned char *version_bitmap;
3218 	unsigned int nat_segs;
3219 	int err;
3220 
3221 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3222 
3223 	/* segment_count_nat includes pair segment so divide to 2. */
3224 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3225 	nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3226 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3227 
3228 	/* not used nids: 0, node, meta, (and root counted as valid node) */
3229 	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3230 						F2FS_RESERVED_NODE_NUM;
3231 	nm_i->nid_cnt[FREE_NID] = 0;
3232 	nm_i->nid_cnt[PREALLOC_NID] = 0;
3233 	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3234 	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3235 	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3236 	nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3237 
3238 	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3239 	INIT_LIST_HEAD(&nm_i->free_nid_list);
3240 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3241 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3242 	INIT_LIST_HEAD(&nm_i->nat_entries);
3243 	spin_lock_init(&nm_i->nat_list_lock);
3244 
3245 	mutex_init(&nm_i->build_lock);
3246 	spin_lock_init(&nm_i->nid_list_lock);
3247 	init_f2fs_rwsem(&nm_i->nat_tree_lock);
3248 
3249 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3250 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3251 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3252 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3253 					GFP_KERNEL);
3254 	if (!nm_i->nat_bitmap)
3255 		return -ENOMEM;
3256 
3257 	err = __get_nat_bitmaps(sbi);
3258 	if (err)
3259 		return err;
3260 
3261 #ifdef CONFIG_F2FS_CHECK_FS
3262 	nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3263 					GFP_KERNEL);
3264 	if (!nm_i->nat_bitmap_mir)
3265 		return -ENOMEM;
3266 #endif
3267 
3268 	return 0;
3269 }
3270 
init_free_nid_cache(struct f2fs_sb_info * sbi)3271 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3272 {
3273 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3274 	int i;
3275 
3276 	nm_i->free_nid_bitmap =
3277 		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3278 					      nm_i->nat_blocks),
3279 			      GFP_KERNEL);
3280 	if (!nm_i->free_nid_bitmap)
3281 		return -ENOMEM;
3282 
3283 	for (i = 0; i < nm_i->nat_blocks; i++) {
3284 		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3285 			f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3286 		if (!nm_i->free_nid_bitmap[i])
3287 			return -ENOMEM;
3288 	}
3289 
3290 	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3291 								GFP_KERNEL);
3292 	if (!nm_i->nat_block_bitmap)
3293 		return -ENOMEM;
3294 
3295 	nm_i->free_nid_count =
3296 		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3297 					      nm_i->nat_blocks),
3298 			      GFP_KERNEL);
3299 	if (!nm_i->free_nid_count)
3300 		return -ENOMEM;
3301 	return 0;
3302 }
3303 
f2fs_build_node_manager(struct f2fs_sb_info * sbi)3304 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3305 {
3306 	int err;
3307 
3308 	sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3309 							GFP_KERNEL);
3310 	if (!sbi->nm_info)
3311 		return -ENOMEM;
3312 
3313 	err = init_node_manager(sbi);
3314 	if (err)
3315 		return err;
3316 
3317 	err = init_free_nid_cache(sbi);
3318 	if (err)
3319 		return err;
3320 
3321 	/* load free nid status from nat_bits table */
3322 	load_free_nid_bitmap(sbi);
3323 
3324 	return f2fs_build_free_nids(sbi, true, true);
3325 }
3326 
f2fs_destroy_node_manager(struct f2fs_sb_info * sbi)3327 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3328 {
3329 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3330 	struct free_nid *i, *next_i;
3331 	struct nat_entry *natvec[NATVEC_SIZE];
3332 	struct nat_entry_set *setvec[SETVEC_SIZE];
3333 	nid_t nid = 0;
3334 	unsigned int found;
3335 
3336 	if (!nm_i)
3337 		return;
3338 
3339 	/* destroy free nid list */
3340 	spin_lock(&nm_i->nid_list_lock);
3341 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3342 		__remove_free_nid(sbi, i, FREE_NID);
3343 		spin_unlock(&nm_i->nid_list_lock);
3344 		kmem_cache_free(free_nid_slab, i);
3345 		spin_lock(&nm_i->nid_list_lock);
3346 	}
3347 	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3348 	f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3349 	f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3350 	spin_unlock(&nm_i->nid_list_lock);
3351 
3352 	/* destroy nat cache */
3353 	f2fs_down_write(&nm_i->nat_tree_lock);
3354 	while ((found = __gang_lookup_nat_cache(nm_i,
3355 					nid, NATVEC_SIZE, natvec))) {
3356 		unsigned idx;
3357 
3358 		nid = nat_get_nid(natvec[found - 1]) + 1;
3359 		for (idx = 0; idx < found; idx++) {
3360 			spin_lock(&nm_i->nat_list_lock);
3361 			list_del(&natvec[idx]->list);
3362 			spin_unlock(&nm_i->nat_list_lock);
3363 
3364 			__del_from_nat_cache(nm_i, natvec[idx]);
3365 		}
3366 	}
3367 	f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3368 
3369 	/* destroy nat set cache */
3370 	nid = 0;
3371 	while ((found = __gang_lookup_nat_set(nm_i,
3372 					nid, SETVEC_SIZE, setvec))) {
3373 		unsigned idx;
3374 
3375 		nid = setvec[found - 1]->set + 1;
3376 		for (idx = 0; idx < found; idx++) {
3377 			/* entry_cnt is not zero, when cp_error was occurred */
3378 			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3379 			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3380 			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3381 		}
3382 	}
3383 	f2fs_up_write(&nm_i->nat_tree_lock);
3384 
3385 	kvfree(nm_i->nat_block_bitmap);
3386 	if (nm_i->free_nid_bitmap) {
3387 		int i;
3388 
3389 		for (i = 0; i < nm_i->nat_blocks; i++)
3390 			kvfree(nm_i->free_nid_bitmap[i]);
3391 		kvfree(nm_i->free_nid_bitmap);
3392 	}
3393 	kvfree(nm_i->free_nid_count);
3394 
3395 	kvfree(nm_i->nat_bitmap);
3396 	kvfree(nm_i->nat_bits);
3397 #ifdef CONFIG_F2FS_CHECK_FS
3398 	kvfree(nm_i->nat_bitmap_mir);
3399 #endif
3400 	sbi->nm_info = NULL;
3401 	kfree(nm_i);
3402 }
3403 
f2fs_create_node_manager_caches(void)3404 int __init f2fs_create_node_manager_caches(void)
3405 {
3406 	nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3407 			sizeof(struct nat_entry));
3408 	if (!nat_entry_slab)
3409 		goto fail;
3410 
3411 	free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3412 			sizeof(struct free_nid));
3413 	if (!free_nid_slab)
3414 		goto destroy_nat_entry;
3415 
3416 	nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3417 			sizeof(struct nat_entry_set));
3418 	if (!nat_entry_set_slab)
3419 		goto destroy_free_nid;
3420 
3421 	fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3422 			sizeof(struct fsync_node_entry));
3423 	if (!fsync_node_entry_slab)
3424 		goto destroy_nat_entry_set;
3425 	return 0;
3426 
3427 destroy_nat_entry_set:
3428 	kmem_cache_destroy(nat_entry_set_slab);
3429 destroy_free_nid:
3430 	kmem_cache_destroy(free_nid_slab);
3431 destroy_nat_entry:
3432 	kmem_cache_destroy(nat_entry_slab);
3433 fail:
3434 	return -ENOMEM;
3435 }
3436 
f2fs_destroy_node_manager_caches(void)3437 void f2fs_destroy_node_manager_caches(void)
3438 {
3439 	kmem_cache_destroy(fsync_node_entry_slab);
3440 	kmem_cache_destroy(nat_entry_set_slab);
3441 	kmem_cache_destroy(free_nid_slab);
3442 	kmem_cache_destroy(nat_entry_slab);
3443 }
3444