• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/node.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/backing-dev.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15 
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include <trace/events/f2fs.h>
21 
22 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
23 
24 static struct kmem_cache *nat_entry_slab;
25 static struct kmem_cache *free_nid_slab;
26 static struct kmem_cache *nat_entry_set_slab;
27 static struct kmem_cache *fsync_node_entry_slab;
28 
29 /*
30  * Check whether the given nid is within node id range.
31  */
f2fs_check_nid_range(struct f2fs_sb_info * sbi,nid_t nid)32 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
33 {
34 	if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
35 		set_sbi_flag(sbi, SBI_NEED_FSCK);
36 		f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
37 			  __func__, nid);
38 		return -EFSCORRUPTED;
39 	}
40 	return 0;
41 }
42 
f2fs_available_free_memory(struct f2fs_sb_info * sbi,int type)43 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
44 {
45 	struct f2fs_nm_info *nm_i = NM_I(sbi);
46 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
47 	struct sysinfo val;
48 	unsigned long avail_ram;
49 	unsigned long mem_size = 0;
50 	bool res = false;
51 
52 	if (!nm_i)
53 		return true;
54 
55 	si_meminfo(&val);
56 
57 	/* only uses low memory */
58 	avail_ram = val.totalram - val.totalhigh;
59 
60 	/*
61 	 * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively
62 	 */
63 	if (type == FREE_NIDS) {
64 		mem_size = (nm_i->nid_cnt[FREE_NID] *
65 				sizeof(struct free_nid)) >> PAGE_SHIFT;
66 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
67 	} else if (type == NAT_ENTRIES) {
68 		mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
69 				sizeof(struct nat_entry)) >> PAGE_SHIFT;
70 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
71 		if (excess_cached_nats(sbi))
72 			res = false;
73 	} else if (type == DIRTY_DENTS) {
74 		if (sbi->sb->s_bdi->wb.dirty_exceeded)
75 			return false;
76 		mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
77 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
78 	} else if (type == INO_ENTRIES) {
79 		int i;
80 
81 		for (i = 0; i < MAX_INO_ENTRY; i++)
82 			mem_size += sbi->im[i].ino_num *
83 						sizeof(struct ino_entry);
84 		mem_size >>= PAGE_SHIFT;
85 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
86 	} else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) {
87 		enum extent_type etype = type == READ_EXTENT_CACHE ?
88 						EX_READ : EX_BLOCK_AGE;
89 		struct extent_tree_info *eti = &sbi->extent_tree[etype];
90 
91 		mem_size = (atomic_read(&eti->total_ext_tree) *
92 				sizeof(struct extent_tree) +
93 				atomic_read(&eti->total_ext_node) *
94 				sizeof(struct extent_node)) >> PAGE_SHIFT;
95 		res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
96 	} else if (type == INMEM_PAGES) {
97 		/* it allows 20% / total_ram for inmemory pages */
98 		mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
99 		res = mem_size < (val.totalram / 5);
100 	} else if (type == DISCARD_CACHE) {
101 		mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
102 				sizeof(struct discard_cmd)) >> PAGE_SHIFT;
103 		res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
104 	} else if (type == COMPRESS_PAGE) {
105 #ifdef CONFIG_F2FS_FS_COMPRESSION
106 		unsigned long free_ram = val.freeram;
107 
108 		/*
109 		 * free memory is lower than watermark or cached page count
110 		 * exceed threshold, deny caching compress page.
111 		 */
112 		res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
113 			(COMPRESS_MAPPING(sbi)->nrpages <
114 			 free_ram * sbi->compress_percent / 100);
115 #else
116 		res = false;
117 #endif
118 	} else {
119 		if (!sbi->sb->s_bdi->wb.dirty_exceeded)
120 			return true;
121 	}
122 	return res;
123 }
124 
clear_node_page_dirty(struct page * page)125 static void clear_node_page_dirty(struct page *page)
126 {
127 	if (PageDirty(page)) {
128 		f2fs_clear_page_cache_dirty_tag(page);
129 		clear_page_dirty_for_io(page);
130 		dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
131 	}
132 	ClearPageUptodate(page);
133 }
134 
get_current_nat_page(struct f2fs_sb_info * sbi,nid_t nid)135 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
136 {
137 	return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
138 }
139 
get_next_nat_page(struct f2fs_sb_info * sbi,nid_t nid)140 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
141 {
142 	struct page *src_page;
143 	struct page *dst_page;
144 	pgoff_t dst_off;
145 	void *src_addr;
146 	void *dst_addr;
147 	struct f2fs_nm_info *nm_i = NM_I(sbi);
148 
149 	dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
150 
151 	/* get current nat block page with lock */
152 	src_page = get_current_nat_page(sbi, nid);
153 	if (IS_ERR(src_page))
154 		return src_page;
155 	dst_page = f2fs_grab_meta_page(sbi, dst_off);
156 	f2fs_bug_on(sbi, PageDirty(src_page));
157 
158 	src_addr = page_address(src_page);
159 	dst_addr = page_address(dst_page);
160 	memcpy(dst_addr, src_addr, PAGE_SIZE);
161 	set_page_dirty(dst_page);
162 	f2fs_put_page(src_page, 1);
163 
164 	set_to_next_nat(nm_i, nid);
165 
166 	return dst_page;
167 }
168 
__alloc_nat_entry(nid_t nid,bool no_fail)169 static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
170 {
171 	struct nat_entry *new;
172 
173 	if (no_fail)
174 		new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
175 	else
176 		new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
177 	if (new) {
178 		nat_set_nid(new, nid);
179 		nat_reset_flag(new);
180 	}
181 	return new;
182 }
183 
__free_nat_entry(struct nat_entry * e)184 static void __free_nat_entry(struct nat_entry *e)
185 {
186 	kmem_cache_free(nat_entry_slab, e);
187 }
188 
189 /* must be locked by nat_tree_lock */
__init_nat_entry(struct f2fs_nm_info * nm_i,struct nat_entry * ne,struct f2fs_nat_entry * raw_ne,bool no_fail)190 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
191 	struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
192 {
193 	if (no_fail)
194 		f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
195 	else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
196 		return NULL;
197 
198 	if (raw_ne)
199 		node_info_from_raw_nat(&ne->ni, raw_ne);
200 
201 	spin_lock(&nm_i->nat_list_lock);
202 	list_add_tail(&ne->list, &nm_i->nat_entries);
203 	spin_unlock(&nm_i->nat_list_lock);
204 
205 	nm_i->nat_cnt[TOTAL_NAT]++;
206 	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
207 	return ne;
208 }
209 
__lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t n)210 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
211 {
212 	struct nat_entry *ne;
213 
214 	ne = radix_tree_lookup(&nm_i->nat_root, n);
215 
216 	/* for recent accessed nat entry, move it to tail of lru list */
217 	if (ne && !get_nat_flag(ne, IS_DIRTY)) {
218 		spin_lock(&nm_i->nat_list_lock);
219 		if (!list_empty(&ne->list))
220 			list_move_tail(&ne->list, &nm_i->nat_entries);
221 		spin_unlock(&nm_i->nat_list_lock);
222 	}
223 
224 	return ne;
225 }
226 
__gang_lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry ** ep)227 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
228 		nid_t start, unsigned int nr, struct nat_entry **ep)
229 {
230 	return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
231 }
232 
__del_from_nat_cache(struct f2fs_nm_info * nm_i,struct nat_entry * e)233 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
234 {
235 	radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
236 	nm_i->nat_cnt[TOTAL_NAT]--;
237 	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
238 	__free_nat_entry(e);
239 }
240 
__grab_nat_entry_set(struct f2fs_nm_info * nm_i,struct nat_entry * ne)241 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
242 							struct nat_entry *ne)
243 {
244 	nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
245 	struct nat_entry_set *head;
246 
247 	head = radix_tree_lookup(&nm_i->nat_set_root, set);
248 	if (!head) {
249 		head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
250 
251 		INIT_LIST_HEAD(&head->entry_list);
252 		INIT_LIST_HEAD(&head->set_list);
253 		head->set = set;
254 		head->entry_cnt = 0;
255 		f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
256 	}
257 	return head;
258 }
259 
__set_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry * ne)260 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
261 						struct nat_entry *ne)
262 {
263 	struct nat_entry_set *head;
264 	bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
265 
266 	if (!new_ne)
267 		head = __grab_nat_entry_set(nm_i, ne);
268 
269 	/*
270 	 * update entry_cnt in below condition:
271 	 * 1. update NEW_ADDR to valid block address;
272 	 * 2. update old block address to new one;
273 	 */
274 	if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
275 				!get_nat_flag(ne, IS_DIRTY)))
276 		head->entry_cnt++;
277 
278 	set_nat_flag(ne, IS_PREALLOC, new_ne);
279 
280 	if (get_nat_flag(ne, IS_DIRTY))
281 		goto refresh_list;
282 
283 	nm_i->nat_cnt[DIRTY_NAT]++;
284 	nm_i->nat_cnt[RECLAIMABLE_NAT]--;
285 	set_nat_flag(ne, IS_DIRTY, true);
286 refresh_list:
287 	spin_lock(&nm_i->nat_list_lock);
288 	if (new_ne)
289 		list_del_init(&ne->list);
290 	else
291 		list_move_tail(&ne->list, &head->entry_list);
292 	spin_unlock(&nm_i->nat_list_lock);
293 }
294 
__clear_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry_set * set,struct nat_entry * ne)295 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
296 		struct nat_entry_set *set, struct nat_entry *ne)
297 {
298 	spin_lock(&nm_i->nat_list_lock);
299 	list_move_tail(&ne->list, &nm_i->nat_entries);
300 	spin_unlock(&nm_i->nat_list_lock);
301 
302 	set_nat_flag(ne, IS_DIRTY, false);
303 	set->entry_cnt--;
304 	nm_i->nat_cnt[DIRTY_NAT]--;
305 	nm_i->nat_cnt[RECLAIMABLE_NAT]++;
306 }
307 
__gang_lookup_nat_set(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry_set ** ep)308 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
309 		nid_t start, unsigned int nr, struct nat_entry_set **ep)
310 {
311 	return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
312 							start, nr);
313 }
314 
f2fs_in_warm_node_list(struct f2fs_sb_info * sbi,struct page * page)315 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
316 {
317 	return NODE_MAPPING(sbi) == page->mapping &&
318 			IS_DNODE(page) && is_cold_node(page);
319 }
320 
f2fs_init_fsync_node_info(struct f2fs_sb_info * sbi)321 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
322 {
323 	spin_lock_init(&sbi->fsync_node_lock);
324 	INIT_LIST_HEAD(&sbi->fsync_node_list);
325 	sbi->fsync_seg_id = 0;
326 	sbi->fsync_node_num = 0;
327 }
328 
f2fs_add_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)329 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
330 							struct page *page)
331 {
332 	struct fsync_node_entry *fn;
333 	unsigned long flags;
334 	unsigned int seq_id;
335 
336 	fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
337 
338 	get_page(page);
339 	fn->page = page;
340 	INIT_LIST_HEAD(&fn->list);
341 
342 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
343 	list_add_tail(&fn->list, &sbi->fsync_node_list);
344 	fn->seq_id = sbi->fsync_seg_id++;
345 	seq_id = fn->seq_id;
346 	sbi->fsync_node_num++;
347 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
348 
349 	return seq_id;
350 }
351 
f2fs_del_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)352 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
353 {
354 	struct fsync_node_entry *fn;
355 	unsigned long flags;
356 
357 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
358 	list_for_each_entry(fn, &sbi->fsync_node_list, list) {
359 		if (fn->page == page) {
360 			list_del(&fn->list);
361 			sbi->fsync_node_num--;
362 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
363 			kmem_cache_free(fsync_node_entry_slab, fn);
364 			put_page(page);
365 			return;
366 		}
367 	}
368 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
369 	f2fs_bug_on(sbi, 1);
370 }
371 
f2fs_reset_fsync_node_info(struct f2fs_sb_info * sbi)372 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
373 {
374 	unsigned long flags;
375 
376 	spin_lock_irqsave(&sbi->fsync_node_lock, flags);
377 	sbi->fsync_seg_id = 0;
378 	spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
379 }
380 
f2fs_need_dentry_mark(struct f2fs_sb_info * sbi,nid_t nid)381 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
382 {
383 	struct f2fs_nm_info *nm_i = NM_I(sbi);
384 	struct nat_entry *e;
385 	bool need = false;
386 
387 	f2fs_down_read(&nm_i->nat_tree_lock);
388 	e = __lookup_nat_cache(nm_i, nid);
389 	if (e) {
390 		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
391 				!get_nat_flag(e, HAS_FSYNCED_INODE))
392 			need = true;
393 	}
394 	f2fs_up_read(&nm_i->nat_tree_lock);
395 	return need;
396 }
397 
f2fs_is_checkpointed_node(struct f2fs_sb_info * sbi,nid_t nid)398 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
399 {
400 	struct f2fs_nm_info *nm_i = NM_I(sbi);
401 	struct nat_entry *e;
402 	bool is_cp = true;
403 
404 	f2fs_down_read(&nm_i->nat_tree_lock);
405 	e = __lookup_nat_cache(nm_i, nid);
406 	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
407 		is_cp = false;
408 	f2fs_up_read(&nm_i->nat_tree_lock);
409 	return is_cp;
410 }
411 
f2fs_need_inode_block_update(struct f2fs_sb_info * sbi,nid_t ino)412 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
413 {
414 	struct f2fs_nm_info *nm_i = NM_I(sbi);
415 	struct nat_entry *e;
416 	bool need_update = true;
417 
418 	f2fs_down_read(&nm_i->nat_tree_lock);
419 	e = __lookup_nat_cache(nm_i, ino);
420 	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
421 			(get_nat_flag(e, IS_CHECKPOINTED) ||
422 			 get_nat_flag(e, HAS_FSYNCED_INODE)))
423 		need_update = false;
424 	f2fs_up_read(&nm_i->nat_tree_lock);
425 	return need_update;
426 }
427 
428 /* must be locked by nat_tree_lock */
cache_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,struct f2fs_nat_entry * ne)429 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
430 						struct f2fs_nat_entry *ne)
431 {
432 	struct f2fs_nm_info *nm_i = NM_I(sbi);
433 	struct nat_entry *new, *e;
434 
435 	/* Let's mitigate lock contention of nat_tree_lock during checkpoint */
436 	if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
437 		return;
438 
439 	new = __alloc_nat_entry(nid, false);
440 	if (!new)
441 		return;
442 
443 	f2fs_down_write(&nm_i->nat_tree_lock);
444 	e = __lookup_nat_cache(nm_i, nid);
445 	if (!e)
446 		e = __init_nat_entry(nm_i, new, ne, false);
447 	else
448 		f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
449 				nat_get_blkaddr(e) !=
450 					le32_to_cpu(ne->block_addr) ||
451 				nat_get_version(e) != ne->version);
452 	f2fs_up_write(&nm_i->nat_tree_lock);
453 	if (e != new)
454 		__free_nat_entry(new);
455 }
456 
set_node_addr(struct f2fs_sb_info * sbi,struct node_info * ni,block_t new_blkaddr,bool fsync_done)457 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
458 			block_t new_blkaddr, bool fsync_done)
459 {
460 	struct f2fs_nm_info *nm_i = NM_I(sbi);
461 	struct nat_entry *e;
462 	struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
463 
464 	f2fs_down_write(&nm_i->nat_tree_lock);
465 	e = __lookup_nat_cache(nm_i, ni->nid);
466 	if (!e) {
467 		e = __init_nat_entry(nm_i, new, NULL, true);
468 		copy_node_info(&e->ni, ni);
469 		f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
470 	} else if (new_blkaddr == NEW_ADDR) {
471 		/*
472 		 * when nid is reallocated,
473 		 * previous nat entry can be remained in nat cache.
474 		 * So, reinitialize it with new information.
475 		 */
476 		copy_node_info(&e->ni, ni);
477 		f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
478 	}
479 	/* let's free early to reduce memory consumption */
480 	if (e != new)
481 		__free_nat_entry(new);
482 
483 	/* sanity check */
484 	f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
485 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
486 			new_blkaddr == NULL_ADDR);
487 	f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
488 			new_blkaddr == NEW_ADDR);
489 	f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
490 			new_blkaddr == NEW_ADDR);
491 
492 	/* increment version no as node is removed */
493 	if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
494 		unsigned char version = nat_get_version(e);
495 
496 		nat_set_version(e, inc_node_version(version));
497 	}
498 
499 	/* change address */
500 	nat_set_blkaddr(e, new_blkaddr);
501 	if (!__is_valid_data_blkaddr(new_blkaddr))
502 		set_nat_flag(e, IS_CHECKPOINTED, false);
503 	__set_nat_cache_dirty(nm_i, e);
504 
505 	/* update fsync_mark if its inode nat entry is still alive */
506 	if (ni->nid != ni->ino)
507 		e = __lookup_nat_cache(nm_i, ni->ino);
508 	if (e) {
509 		if (fsync_done && ni->nid == ni->ino)
510 			set_nat_flag(e, HAS_FSYNCED_INODE, true);
511 		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
512 	}
513 	f2fs_up_write(&nm_i->nat_tree_lock);
514 }
515 
f2fs_try_to_free_nats(struct f2fs_sb_info * sbi,int nr_shrink)516 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
517 {
518 	struct f2fs_nm_info *nm_i = NM_I(sbi);
519 	int nr = nr_shrink;
520 
521 	if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
522 		return 0;
523 
524 	spin_lock(&nm_i->nat_list_lock);
525 	while (nr_shrink) {
526 		struct nat_entry *ne;
527 
528 		if (list_empty(&nm_i->nat_entries))
529 			break;
530 
531 		ne = list_first_entry(&nm_i->nat_entries,
532 					struct nat_entry, list);
533 		list_del(&ne->list);
534 		spin_unlock(&nm_i->nat_list_lock);
535 
536 		__del_from_nat_cache(nm_i, ne);
537 		nr_shrink--;
538 
539 		spin_lock(&nm_i->nat_list_lock);
540 	}
541 	spin_unlock(&nm_i->nat_list_lock);
542 
543 	f2fs_up_write(&nm_i->nat_tree_lock);
544 	return nr - nr_shrink;
545 }
546 
f2fs_get_node_info(struct f2fs_sb_info * sbi,nid_t nid,struct node_info * ni,bool checkpoint_context)547 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
548 				struct node_info *ni, bool checkpoint_context)
549 {
550 	struct f2fs_nm_info *nm_i = NM_I(sbi);
551 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
552 	struct f2fs_journal *journal = curseg->journal;
553 	nid_t start_nid = START_NID(nid);
554 	struct f2fs_nat_block *nat_blk;
555 	struct page *page = NULL;
556 	struct f2fs_nat_entry ne;
557 	struct nat_entry *e;
558 	pgoff_t index;
559 	block_t blkaddr;
560 	int i;
561 
562 	ni->nid = nid;
563 retry:
564 	/* Check nat cache */
565 	f2fs_down_read(&nm_i->nat_tree_lock);
566 	e = __lookup_nat_cache(nm_i, nid);
567 	if (e) {
568 		ni->ino = nat_get_ino(e);
569 		ni->blk_addr = nat_get_blkaddr(e);
570 		ni->version = nat_get_version(e);
571 		f2fs_up_read(&nm_i->nat_tree_lock);
572 		return 0;
573 	}
574 
575 	/*
576 	 * Check current segment summary by trying to grab journal_rwsem first.
577 	 * This sem is on the critical path on the checkpoint requiring the above
578 	 * nat_tree_lock. Therefore, we should retry, if we failed to grab here
579 	 * while not bothering checkpoint.
580 	 */
581 	if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
582 		down_read(&curseg->journal_rwsem);
583 	} else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
584 				!down_read_trylock(&curseg->journal_rwsem)) {
585 		f2fs_up_read(&nm_i->nat_tree_lock);
586 		goto retry;
587 	}
588 
589 	i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
590 	if (i >= 0) {
591 		ne = nat_in_journal(journal, i);
592 		node_info_from_raw_nat(ni, &ne);
593 	}
594         up_read(&curseg->journal_rwsem);
595 	if (i >= 0) {
596 		f2fs_up_read(&nm_i->nat_tree_lock);
597 		goto cache;
598 	}
599 
600 	/* Fill node_info from nat page */
601 	index = current_nat_addr(sbi, nid);
602 	f2fs_up_read(&nm_i->nat_tree_lock);
603 
604 	page = f2fs_get_meta_page(sbi, index);
605 	if (IS_ERR(page))
606 		return PTR_ERR(page);
607 
608 	nat_blk = (struct f2fs_nat_block *)page_address(page);
609 	ne = nat_blk->entries[nid - start_nid];
610 	node_info_from_raw_nat(ni, &ne);
611 	f2fs_put_page(page, 1);
612 cache:
613 	blkaddr = le32_to_cpu(ne.block_addr);
614 	if (__is_valid_data_blkaddr(blkaddr) &&
615 		!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
616 		return -EFAULT;
617 
618 	/* cache nat entry */
619 	cache_nat_entry(sbi, nid, &ne);
620 	return 0;
621 }
622 
623 /*
624  * readahead MAX_RA_NODE number of node pages.
625  */
f2fs_ra_node_pages(struct page * parent,int start,int n)626 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
627 {
628 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
629 	struct blk_plug plug;
630 	int i, end;
631 	nid_t nid;
632 
633 	blk_start_plug(&plug);
634 
635 	/* Then, try readahead for siblings of the desired node */
636 	end = start + n;
637 	end = min(end, NIDS_PER_BLOCK);
638 	for (i = start; i < end; i++) {
639 		nid = get_nid(parent, i, false);
640 		f2fs_ra_node_page(sbi, nid);
641 	}
642 
643 	blk_finish_plug(&plug);
644 }
645 
f2fs_get_next_page_offset(struct dnode_of_data * dn,pgoff_t pgofs)646 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
647 {
648 	const long direct_index = ADDRS_PER_INODE(dn->inode);
649 	const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
650 	const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
651 	unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
652 	int cur_level = dn->cur_level;
653 	int max_level = dn->max_level;
654 	pgoff_t base = 0;
655 
656 	if (!dn->max_level)
657 		return pgofs + 1;
658 
659 	while (max_level-- > cur_level)
660 		skipped_unit *= NIDS_PER_BLOCK;
661 
662 	switch (dn->max_level) {
663 	case 3:
664 		base += 2 * indirect_blks;
665 		fallthrough;
666 	case 2:
667 		base += 2 * direct_blks;
668 		fallthrough;
669 	case 1:
670 		base += direct_index;
671 		break;
672 	default:
673 		f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
674 	}
675 
676 	return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
677 }
678 
679 /*
680  * The maximum depth is four.
681  * Offset[0] will have raw inode offset.
682  */
get_node_path(struct inode * inode,long block,int offset[4],unsigned int noffset[4])683 static int get_node_path(struct inode *inode, long block,
684 				int offset[4], unsigned int noffset[4])
685 {
686 	const long direct_index = ADDRS_PER_INODE(inode);
687 	const long direct_blks = ADDRS_PER_BLOCK(inode);
688 	const long dptrs_per_blk = NIDS_PER_BLOCK;
689 	const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
690 	const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
691 	int n = 0;
692 	int level = 0;
693 
694 	noffset[0] = 0;
695 
696 	if (block < direct_index) {
697 		offset[n] = block;
698 		goto got;
699 	}
700 	block -= direct_index;
701 	if (block < direct_blks) {
702 		offset[n++] = NODE_DIR1_BLOCK;
703 		noffset[n] = 1;
704 		offset[n] = block;
705 		level = 1;
706 		goto got;
707 	}
708 	block -= direct_blks;
709 	if (block < direct_blks) {
710 		offset[n++] = NODE_DIR2_BLOCK;
711 		noffset[n] = 2;
712 		offset[n] = block;
713 		level = 1;
714 		goto got;
715 	}
716 	block -= direct_blks;
717 	if (block < indirect_blks) {
718 		offset[n++] = NODE_IND1_BLOCK;
719 		noffset[n] = 3;
720 		offset[n++] = block / direct_blks;
721 		noffset[n] = 4 + offset[n - 1];
722 		offset[n] = block % direct_blks;
723 		level = 2;
724 		goto got;
725 	}
726 	block -= indirect_blks;
727 	if (block < indirect_blks) {
728 		offset[n++] = NODE_IND2_BLOCK;
729 		noffset[n] = 4 + dptrs_per_blk;
730 		offset[n++] = block / direct_blks;
731 		noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
732 		offset[n] = block % direct_blks;
733 		level = 2;
734 		goto got;
735 	}
736 	block -= indirect_blks;
737 	if (block < dindirect_blks) {
738 		offset[n++] = NODE_DIND_BLOCK;
739 		noffset[n] = 5 + (dptrs_per_blk * 2);
740 		offset[n++] = block / indirect_blks;
741 		noffset[n] = 6 + (dptrs_per_blk * 2) +
742 			      offset[n - 1] * (dptrs_per_blk + 1);
743 		offset[n++] = (block / direct_blks) % dptrs_per_blk;
744 		noffset[n] = 7 + (dptrs_per_blk * 2) +
745 			      offset[n - 2] * (dptrs_per_blk + 1) +
746 			      offset[n - 1];
747 		offset[n] = block % direct_blks;
748 		level = 3;
749 		goto got;
750 	} else {
751 		return -E2BIG;
752 	}
753 got:
754 	return level;
755 }
756 
757 /*
758  * Caller should call f2fs_put_dnode(dn).
759  * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
760  * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
761  */
f2fs_get_dnode_of_data(struct dnode_of_data * dn,pgoff_t index,int mode)762 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
763 {
764 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
765 	struct page *npage[4];
766 	struct page *parent = NULL;
767 	int offset[4];
768 	unsigned int noffset[4];
769 	nid_t nids[4];
770 	int level, i = 0;
771 	int err = 0;
772 
773 	level = get_node_path(dn->inode, index, offset, noffset);
774 	if (level < 0)
775 		return level;
776 
777 	nids[0] = dn->inode->i_ino;
778 	npage[0] = dn->inode_page;
779 
780 	if (!npage[0]) {
781 		npage[0] = f2fs_get_node_page(sbi, nids[0]);
782 		if (IS_ERR(npage[0]))
783 			return PTR_ERR(npage[0]);
784 	}
785 
786 	/* if inline_data is set, should not report any block indices */
787 	if (f2fs_has_inline_data(dn->inode) && index) {
788 		err = -ENOENT;
789 		f2fs_put_page(npage[0], 1);
790 		goto release_out;
791 	}
792 
793 	parent = npage[0];
794 	if (level != 0)
795 		nids[1] = get_nid(parent, offset[0], true);
796 	dn->inode_page = npage[0];
797 	dn->inode_page_locked = true;
798 
799 	/* get indirect or direct nodes */
800 	for (i = 1; i <= level; i++) {
801 		bool done = false;
802 
803 		if (!nids[i] && mode == ALLOC_NODE) {
804 			/* alloc new node */
805 			if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
806 				err = -ENOSPC;
807 				goto release_pages;
808 			}
809 
810 			dn->nid = nids[i];
811 			npage[i] = f2fs_new_node_page(dn, noffset[i]);
812 			if (IS_ERR(npage[i])) {
813 				f2fs_alloc_nid_failed(sbi, nids[i]);
814 				err = PTR_ERR(npage[i]);
815 				goto release_pages;
816 			}
817 
818 			set_nid(parent, offset[i - 1], nids[i], i == 1);
819 			f2fs_alloc_nid_done(sbi, nids[i]);
820 			done = true;
821 		} else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
822 			npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
823 			if (IS_ERR(npage[i])) {
824 				err = PTR_ERR(npage[i]);
825 				goto release_pages;
826 			}
827 			done = true;
828 		}
829 		if (i == 1) {
830 			dn->inode_page_locked = false;
831 			unlock_page(parent);
832 		} else {
833 			f2fs_put_page(parent, 1);
834 		}
835 
836 		if (!done) {
837 			npage[i] = f2fs_get_node_page(sbi, nids[i]);
838 			if (IS_ERR(npage[i])) {
839 				err = PTR_ERR(npage[i]);
840 				f2fs_put_page(npage[0], 0);
841 				goto release_out;
842 			}
843 		}
844 		if (i < level) {
845 			parent = npage[i];
846 			nids[i + 1] = get_nid(parent, offset[i], false);
847 		}
848 	}
849 	dn->nid = nids[level];
850 	dn->ofs_in_node = offset[level];
851 	dn->node_page = npage[level];
852 	dn->data_blkaddr = f2fs_data_blkaddr(dn);
853 
854 	if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
855 					f2fs_sb_has_readonly(sbi)) {
856 		unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
857 		block_t blkaddr;
858 
859 		if (!c_len)
860 			goto out;
861 
862 		blkaddr = f2fs_data_blkaddr(dn);
863 		if (blkaddr == COMPRESS_ADDR)
864 			blkaddr = data_blkaddr(dn->inode, dn->node_page,
865 						dn->ofs_in_node + 1);
866 
867 		f2fs_update_read_extent_tree_range_compressed(dn->inode,
868 					index, blkaddr,
869 					F2FS_I(dn->inode)->i_cluster_size,
870 					c_len);
871 	}
872 out:
873 	return 0;
874 
875 release_pages:
876 	f2fs_put_page(parent, 1);
877 	if (i > 1)
878 		f2fs_put_page(npage[0], 0);
879 release_out:
880 	dn->inode_page = NULL;
881 	dn->node_page = NULL;
882 	if (err == -ENOENT) {
883 		dn->cur_level = i;
884 		dn->max_level = level;
885 		dn->ofs_in_node = offset[level];
886 	}
887 	return err;
888 }
889 
truncate_node(struct dnode_of_data * dn)890 static int truncate_node(struct dnode_of_data *dn)
891 {
892 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
893 	struct node_info ni;
894 	int err;
895 	pgoff_t index;
896 
897 	err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
898 	if (err)
899 		return err;
900 
901 	/* Deallocate node address */
902 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
903 	dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
904 	set_node_addr(sbi, &ni, NULL_ADDR, false);
905 
906 	if (dn->nid == dn->inode->i_ino) {
907 		f2fs_remove_orphan_inode(sbi, dn->nid);
908 		dec_valid_inode_count(sbi);
909 		f2fs_inode_synced(dn->inode);
910 	}
911 
912 	clear_node_page_dirty(dn->node_page);
913 	set_sbi_flag(sbi, SBI_IS_DIRTY);
914 
915 	index = dn->node_page->index;
916 	f2fs_put_page(dn->node_page, 1);
917 
918 	invalidate_mapping_pages(NODE_MAPPING(sbi),
919 			index, index);
920 
921 	dn->node_page = NULL;
922 	trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
923 
924 	return 0;
925 }
926 
truncate_dnode(struct dnode_of_data * dn)927 static int truncate_dnode(struct dnode_of_data *dn)
928 {
929 	struct page *page;
930 	int err;
931 
932 	if (dn->nid == 0)
933 		return 1;
934 
935 	/* get direct node */
936 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
937 	if (PTR_ERR(page) == -ENOENT)
938 		return 1;
939 	else if (IS_ERR(page))
940 		return PTR_ERR(page);
941 
942 	/* Make dnode_of_data for parameter */
943 	dn->node_page = page;
944 	dn->ofs_in_node = 0;
945 	f2fs_truncate_data_blocks(dn);
946 	err = truncate_node(dn);
947 	if (err) {
948 		f2fs_put_page(page, 1);
949 		return err;
950 	}
951 
952 	return 1;
953 }
954 
truncate_nodes(struct dnode_of_data * dn,unsigned int nofs,int ofs,int depth)955 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
956 						int ofs, int depth)
957 {
958 	struct dnode_of_data rdn = *dn;
959 	struct page *page;
960 	struct f2fs_node *rn;
961 	nid_t child_nid;
962 	unsigned int child_nofs;
963 	int freed = 0;
964 	int i, ret;
965 
966 	if (dn->nid == 0)
967 		return NIDS_PER_BLOCK + 1;
968 
969 	trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
970 
971 	page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
972 	if (IS_ERR(page)) {
973 		trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
974 		return PTR_ERR(page);
975 	}
976 
977 	f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
978 
979 	rn = F2FS_NODE(page);
980 	if (depth < 3) {
981 		for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
982 			child_nid = le32_to_cpu(rn->in.nid[i]);
983 			if (child_nid == 0)
984 				continue;
985 			rdn.nid = child_nid;
986 			ret = truncate_dnode(&rdn);
987 			if (ret < 0)
988 				goto out_err;
989 			if (set_nid(page, i, 0, false))
990 				dn->node_changed = true;
991 		}
992 	} else {
993 		child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
994 		for (i = ofs; i < NIDS_PER_BLOCK; i++) {
995 			child_nid = le32_to_cpu(rn->in.nid[i]);
996 			if (child_nid == 0) {
997 				child_nofs += NIDS_PER_BLOCK + 1;
998 				continue;
999 			}
1000 			rdn.nid = child_nid;
1001 			ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
1002 			if (ret == (NIDS_PER_BLOCK + 1)) {
1003 				if (set_nid(page, i, 0, false))
1004 					dn->node_changed = true;
1005 				child_nofs += ret;
1006 			} else if (ret < 0 && ret != -ENOENT) {
1007 				goto out_err;
1008 			}
1009 		}
1010 		freed = child_nofs;
1011 	}
1012 
1013 	if (!ofs) {
1014 		/* remove current indirect node */
1015 		dn->node_page = page;
1016 		ret = truncate_node(dn);
1017 		if (ret)
1018 			goto out_err;
1019 		freed++;
1020 	} else {
1021 		f2fs_put_page(page, 1);
1022 	}
1023 	trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1024 	return freed;
1025 
1026 out_err:
1027 	f2fs_put_page(page, 1);
1028 	trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1029 	return ret;
1030 }
1031 
truncate_partial_nodes(struct dnode_of_data * dn,struct f2fs_inode * ri,int * offset,int depth)1032 static int truncate_partial_nodes(struct dnode_of_data *dn,
1033 			struct f2fs_inode *ri, int *offset, int depth)
1034 {
1035 	struct page *pages[2];
1036 	nid_t nid[3];
1037 	nid_t child_nid;
1038 	int err = 0;
1039 	int i;
1040 	int idx = depth - 2;
1041 
1042 	nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1043 	if (!nid[0])
1044 		return 0;
1045 
1046 	/* get indirect nodes in the path */
1047 	for (i = 0; i < idx + 1; i++) {
1048 		/* reference count'll be increased */
1049 		pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
1050 		if (IS_ERR(pages[i])) {
1051 			err = PTR_ERR(pages[i]);
1052 			idx = i - 1;
1053 			goto fail;
1054 		}
1055 		nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
1056 	}
1057 
1058 	f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
1059 
1060 	/* free direct nodes linked to a partial indirect node */
1061 	for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1062 		child_nid = get_nid(pages[idx], i, false);
1063 		if (!child_nid)
1064 			continue;
1065 		dn->nid = child_nid;
1066 		err = truncate_dnode(dn);
1067 		if (err < 0)
1068 			goto fail;
1069 		if (set_nid(pages[idx], i, 0, false))
1070 			dn->node_changed = true;
1071 	}
1072 
1073 	if (offset[idx + 1] == 0) {
1074 		dn->node_page = pages[idx];
1075 		dn->nid = nid[idx];
1076 		err = truncate_node(dn);
1077 		if (err)
1078 			goto fail;
1079 	} else {
1080 		f2fs_put_page(pages[idx], 1);
1081 	}
1082 	offset[idx]++;
1083 	offset[idx + 1] = 0;
1084 	idx--;
1085 fail:
1086 	for (i = idx; i >= 0; i--)
1087 		f2fs_put_page(pages[i], 1);
1088 
1089 	trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1090 
1091 	return err;
1092 }
1093 
1094 /*
1095  * All the block addresses of data and nodes should be nullified.
1096  */
f2fs_truncate_inode_blocks(struct inode * inode,pgoff_t from)1097 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1098 {
1099 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1100 	int err = 0, cont = 1;
1101 	int level, offset[4], noffset[4];
1102 	unsigned int nofs = 0;
1103 	struct f2fs_inode *ri;
1104 	struct dnode_of_data dn;
1105 	struct page *page;
1106 
1107 	trace_f2fs_truncate_inode_blocks_enter(inode, from);
1108 
1109 	level = get_node_path(inode, from, offset, noffset);
1110 	if (level < 0) {
1111 		trace_f2fs_truncate_inode_blocks_exit(inode, level);
1112 		return level;
1113 	}
1114 
1115 	page = f2fs_get_node_page(sbi, inode->i_ino);
1116 	if (IS_ERR(page)) {
1117 		trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1118 		return PTR_ERR(page);
1119 	}
1120 
1121 	set_new_dnode(&dn, inode, page, NULL, 0);
1122 	unlock_page(page);
1123 
1124 	ri = F2FS_INODE(page);
1125 	switch (level) {
1126 	case 0:
1127 	case 1:
1128 		nofs = noffset[1];
1129 		break;
1130 	case 2:
1131 		nofs = noffset[1];
1132 		if (!offset[level - 1])
1133 			goto skip_partial;
1134 		err = truncate_partial_nodes(&dn, ri, offset, level);
1135 		if (err < 0 && err != -ENOENT)
1136 			goto fail;
1137 		nofs += 1 + NIDS_PER_BLOCK;
1138 		break;
1139 	case 3:
1140 		nofs = 5 + 2 * NIDS_PER_BLOCK;
1141 		if (!offset[level - 1])
1142 			goto skip_partial;
1143 		err = truncate_partial_nodes(&dn, ri, offset, level);
1144 		if (err < 0 && err != -ENOENT)
1145 			goto fail;
1146 		break;
1147 	default:
1148 		BUG();
1149 	}
1150 
1151 skip_partial:
1152 	while (cont) {
1153 		dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1154 		switch (offset[0]) {
1155 		case NODE_DIR1_BLOCK:
1156 		case NODE_DIR2_BLOCK:
1157 			err = truncate_dnode(&dn);
1158 			break;
1159 
1160 		case NODE_IND1_BLOCK:
1161 		case NODE_IND2_BLOCK:
1162 			err = truncate_nodes(&dn, nofs, offset[1], 2);
1163 			break;
1164 
1165 		case NODE_DIND_BLOCK:
1166 			err = truncate_nodes(&dn, nofs, offset[1], 3);
1167 			cont = 0;
1168 			break;
1169 
1170 		default:
1171 			BUG();
1172 		}
1173 		if (err < 0 && err != -ENOENT)
1174 			goto fail;
1175 		if (offset[1] == 0 &&
1176 				ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1177 			lock_page(page);
1178 			BUG_ON(page->mapping != NODE_MAPPING(sbi));
1179 			f2fs_wait_on_page_writeback(page, NODE, true, true);
1180 			ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1181 			set_page_dirty(page);
1182 			unlock_page(page);
1183 		}
1184 		offset[1] = 0;
1185 		offset[0]++;
1186 		nofs += err;
1187 	}
1188 fail:
1189 	f2fs_put_page(page, 0);
1190 	trace_f2fs_truncate_inode_blocks_exit(inode, err);
1191 	return err > 0 ? 0 : err;
1192 }
1193 
1194 /* caller must lock inode page */
f2fs_truncate_xattr_node(struct inode * inode)1195 int f2fs_truncate_xattr_node(struct inode *inode)
1196 {
1197 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1198 	nid_t nid = F2FS_I(inode)->i_xattr_nid;
1199 	struct dnode_of_data dn;
1200 	struct page *npage;
1201 	int err;
1202 
1203 	if (!nid)
1204 		return 0;
1205 
1206 	npage = f2fs_get_node_page(sbi, nid);
1207 	if (IS_ERR(npage))
1208 		return PTR_ERR(npage);
1209 
1210 	set_new_dnode(&dn, inode, NULL, npage, nid);
1211 	err = truncate_node(&dn);
1212 	if (err) {
1213 		f2fs_put_page(npage, 1);
1214 		return err;
1215 	}
1216 
1217 	f2fs_i_xnid_write(inode, 0);
1218 
1219 	return 0;
1220 }
1221 
1222 /*
1223  * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1224  * f2fs_unlock_op().
1225  */
f2fs_remove_inode_page(struct inode * inode)1226 int f2fs_remove_inode_page(struct inode *inode)
1227 {
1228 	struct dnode_of_data dn;
1229 	int err;
1230 
1231 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1232 	err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1233 	if (err)
1234 		return err;
1235 
1236 	err = f2fs_truncate_xattr_node(inode);
1237 	if (err) {
1238 		f2fs_put_dnode(&dn);
1239 		return err;
1240 	}
1241 
1242 	/* remove potential inline_data blocks */
1243 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1244 				S_ISLNK(inode->i_mode))
1245 		f2fs_truncate_data_blocks_range(&dn, 1);
1246 
1247 	/* 0 is possible, after f2fs_new_inode() has failed */
1248 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1249 		f2fs_put_dnode(&dn);
1250 		return -EIO;
1251 	}
1252 
1253 	if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1254 		f2fs_warn(F2FS_I_SB(inode),
1255 			"f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1256 			inode->i_ino, (unsigned long long)inode->i_blocks);
1257 		set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1258 	}
1259 
1260 	/* will put inode & node pages */
1261 	err = truncate_node(&dn);
1262 	if (err) {
1263 		f2fs_put_dnode(&dn);
1264 		return err;
1265 	}
1266 	return 0;
1267 }
1268 
f2fs_new_inode_page(struct inode * inode)1269 struct page *f2fs_new_inode_page(struct inode *inode)
1270 {
1271 	struct dnode_of_data dn;
1272 
1273 	/* allocate inode page for new inode */
1274 	set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1275 
1276 	/* caller should f2fs_put_page(page, 1); */
1277 	return f2fs_new_node_page(&dn, 0);
1278 }
1279 
f2fs_new_node_page(struct dnode_of_data * dn,unsigned int ofs)1280 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1281 {
1282 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1283 	struct node_info new_ni;
1284 	struct page *page;
1285 	int err;
1286 
1287 	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1288 		return ERR_PTR(-EPERM);
1289 
1290 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1291 	if (!page)
1292 		return ERR_PTR(-ENOMEM);
1293 
1294 	if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1295 		goto fail;
1296 
1297 #ifdef CONFIG_F2FS_CHECK_FS
1298 	err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
1299 	if (err) {
1300 		dec_valid_node_count(sbi, dn->inode, !ofs);
1301 		goto fail;
1302 	}
1303 	if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1304 		err = -EFSCORRUPTED;
1305 		set_sbi_flag(sbi, SBI_NEED_FSCK);
1306 		goto fail;
1307 	}
1308 #endif
1309 	new_ni.nid = dn->nid;
1310 	new_ni.ino = dn->inode->i_ino;
1311 	new_ni.blk_addr = NULL_ADDR;
1312 	new_ni.flag = 0;
1313 	new_ni.version = 0;
1314 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1315 
1316 	f2fs_wait_on_page_writeback(page, NODE, true, true);
1317 	fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1318 	set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1319 	if (!PageUptodate(page))
1320 		SetPageUptodate(page);
1321 	if (set_page_dirty(page))
1322 		dn->node_changed = true;
1323 
1324 	if (f2fs_has_xattr_block(ofs))
1325 		f2fs_i_xnid_write(dn->inode, dn->nid);
1326 
1327 	if (ofs == 0)
1328 		inc_valid_inode_count(sbi);
1329 	return page;
1330 
1331 fail:
1332 	clear_node_page_dirty(page);
1333 	f2fs_put_page(page, 1);
1334 	return ERR_PTR(err);
1335 }
1336 
1337 /*
1338  * Caller should do after getting the following values.
1339  * 0: f2fs_put_page(page, 0)
1340  * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1341  */
read_node_page(struct page * page,int op_flags)1342 static int read_node_page(struct page *page, int op_flags)
1343 {
1344 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1345 	struct node_info ni;
1346 	struct f2fs_io_info fio = {
1347 		.sbi = sbi,
1348 		.type = NODE,
1349 		.op = REQ_OP_READ,
1350 		.op_flags = op_flags,
1351 		.page = page,
1352 		.encrypted_page = NULL,
1353 	};
1354 	int err;
1355 
1356 	if (PageUptodate(page)) {
1357 		if (!f2fs_inode_chksum_verify(sbi, page)) {
1358 			ClearPageUptodate(page);
1359 			return -EFSBADCRC;
1360 		}
1361 		return LOCKED_PAGE;
1362 	}
1363 
1364 	err = f2fs_get_node_info(sbi, page->index, &ni, false);
1365 	if (err)
1366 		return err;
1367 
1368 	/* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1369 	if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
1370 		ClearPageUptodate(page);
1371 		return -ENOENT;
1372 	}
1373 
1374 	fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1375 
1376 	err = f2fs_submit_page_bio(&fio);
1377 
1378 	if (!err)
1379 		f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE);
1380 
1381 	return err;
1382 }
1383 
1384 /*
1385  * Readahead a node page
1386  */
f2fs_ra_node_page(struct f2fs_sb_info * sbi,nid_t nid)1387 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1388 {
1389 	struct page *apage;
1390 	int err;
1391 
1392 	if (!nid)
1393 		return;
1394 	if (f2fs_check_nid_range(sbi, nid))
1395 		return;
1396 
1397 	apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1398 	if (apage)
1399 		return;
1400 
1401 	apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1402 	if (!apage)
1403 		return;
1404 
1405 	err = read_node_page(apage, REQ_RAHEAD);
1406 	f2fs_put_page(apage, err ? 1 : 0);
1407 }
1408 
__get_node_page(struct f2fs_sb_info * sbi,pgoff_t nid,struct page * parent,int start)1409 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1410 					struct page *parent, int start)
1411 {
1412 	struct page *page;
1413 	int err;
1414 
1415 	if (!nid)
1416 		return ERR_PTR(-ENOENT);
1417 	if (f2fs_check_nid_range(sbi, nid))
1418 		return ERR_PTR(-EINVAL);
1419 repeat:
1420 	page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1421 	if (!page)
1422 		return ERR_PTR(-ENOMEM);
1423 
1424 	err = read_node_page(page, 0);
1425 	if (err < 0) {
1426 		f2fs_put_page(page, 1);
1427 		return ERR_PTR(err);
1428 	} else if (err == LOCKED_PAGE) {
1429 		err = 0;
1430 		goto page_hit;
1431 	}
1432 
1433 	if (parent)
1434 		f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1435 
1436 	lock_page(page);
1437 
1438 	if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1439 		f2fs_put_page(page, 1);
1440 		goto repeat;
1441 	}
1442 
1443 	if (unlikely(!PageUptodate(page))) {
1444 		err = -EIO;
1445 		goto out_err;
1446 	}
1447 
1448 	if (!f2fs_inode_chksum_verify(sbi, page)) {
1449 		err = -EFSBADCRC;
1450 		goto out_err;
1451 	}
1452 page_hit:
1453 	if (unlikely(nid != nid_of_node(page))) {
1454 		f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1455 			  nid, nid_of_node(page), ino_of_node(page),
1456 			  ofs_of_node(page), cpver_of_node(page),
1457 			  next_blkaddr_of_node(page));
1458 		set_sbi_flag(sbi, SBI_NEED_FSCK);
1459 		err = -EINVAL;
1460 out_err:
1461 		ClearPageUptodate(page);
1462 		f2fs_put_page(page, 1);
1463 		return ERR_PTR(err);
1464 	}
1465 	return page;
1466 }
1467 
f2fs_get_node_page(struct f2fs_sb_info * sbi,pgoff_t nid)1468 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1469 {
1470 	return __get_node_page(sbi, nid, NULL, 0);
1471 }
1472 
f2fs_get_node_page_ra(struct page * parent,int start)1473 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1474 {
1475 	struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1476 	nid_t nid = get_nid(parent, start, false);
1477 
1478 	return __get_node_page(sbi, nid, parent, start);
1479 }
1480 
flush_inline_data(struct f2fs_sb_info * sbi,nid_t ino)1481 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1482 {
1483 	struct inode *inode;
1484 	struct page *page;
1485 	int ret;
1486 
1487 	/* should flush inline_data before evict_inode */
1488 	inode = ilookup(sbi->sb, ino);
1489 	if (!inode)
1490 		return;
1491 
1492 	page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1493 					FGP_LOCK|FGP_NOWAIT, 0);
1494 	if (!page)
1495 		goto iput_out;
1496 
1497 	if (!PageUptodate(page))
1498 		goto page_out;
1499 
1500 	if (!PageDirty(page))
1501 		goto page_out;
1502 
1503 	if (!clear_page_dirty_for_io(page))
1504 		goto page_out;
1505 
1506 	ret = f2fs_write_inline_data(inode, page);
1507 	inode_dec_dirty_pages(inode);
1508 	f2fs_remove_dirty_inode(inode);
1509 	if (ret)
1510 		set_page_dirty(page);
1511 page_out:
1512 	f2fs_put_page(page, 1);
1513 iput_out:
1514 	iput(inode);
1515 }
1516 
last_fsync_dnode(struct f2fs_sb_info * sbi,nid_t ino)1517 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1518 {
1519 	pgoff_t index;
1520 	struct pagevec pvec;
1521 	struct page *last_page = NULL;
1522 	int nr_pages;
1523 
1524 	pagevec_init(&pvec);
1525 	index = 0;
1526 
1527 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1528 				PAGECACHE_TAG_DIRTY))) {
1529 		int i;
1530 
1531 		for (i = 0; i < nr_pages; i++) {
1532 			struct page *page = pvec.pages[i];
1533 
1534 			if (unlikely(f2fs_cp_error(sbi))) {
1535 				f2fs_put_page(last_page, 0);
1536 				pagevec_release(&pvec);
1537 				return ERR_PTR(-EIO);
1538 			}
1539 
1540 			if (!IS_DNODE(page) || !is_cold_node(page))
1541 				continue;
1542 			if (ino_of_node(page) != ino)
1543 				continue;
1544 
1545 			lock_page(page);
1546 
1547 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1548 continue_unlock:
1549 				unlock_page(page);
1550 				continue;
1551 			}
1552 			if (ino_of_node(page) != ino)
1553 				goto continue_unlock;
1554 
1555 			if (!PageDirty(page)) {
1556 				/* someone wrote it for us */
1557 				goto continue_unlock;
1558 			}
1559 
1560 			if (last_page)
1561 				f2fs_put_page(last_page, 0);
1562 
1563 			get_page(page);
1564 			last_page = page;
1565 			unlock_page(page);
1566 		}
1567 		pagevec_release(&pvec);
1568 		cond_resched();
1569 	}
1570 	return last_page;
1571 }
1572 
__write_node_page(struct page * page,bool atomic,bool * submitted,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type,unsigned int * seq_id)1573 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1574 				struct writeback_control *wbc, bool do_balance,
1575 				enum iostat_type io_type, unsigned int *seq_id)
1576 {
1577 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1578 	nid_t nid;
1579 	struct node_info ni;
1580 	struct f2fs_io_info fio = {
1581 		.sbi = sbi,
1582 		.ino = ino_of_node(page),
1583 		.type = NODE,
1584 		.op = REQ_OP_WRITE,
1585 		.op_flags = wbc_to_write_flags(wbc),
1586 		.page = page,
1587 		.encrypted_page = NULL,
1588 		.submitted = false,
1589 		.io_type = io_type,
1590 		.io_wbc = wbc,
1591 	};
1592 	unsigned int seq;
1593 
1594 	trace_f2fs_writepage(page, NODE);
1595 
1596 	if (unlikely(f2fs_cp_error(sbi))) {
1597 		if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
1598 			ClearPageUptodate(page);
1599 			dec_page_count(sbi, F2FS_DIRTY_NODES);
1600 			unlock_page(page);
1601 			return 0;
1602 		}
1603 		goto redirty_out;
1604 	}
1605 
1606 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1607 		goto redirty_out;
1608 
1609 	if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1610 			wbc->sync_mode == WB_SYNC_NONE &&
1611 			IS_DNODE(page) && is_cold_node(page))
1612 		goto redirty_out;
1613 
1614 	/* get old block addr of this node page */
1615 	nid = nid_of_node(page);
1616 	f2fs_bug_on(sbi, page->index != nid);
1617 
1618 	if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
1619 		goto redirty_out;
1620 
1621 	if (wbc->for_reclaim) {
1622 		if (!f2fs_down_read_trylock(&sbi->node_write))
1623 			goto redirty_out;
1624 	} else {
1625 		f2fs_down_read(&sbi->node_write);
1626 	}
1627 
1628 	/* This page is already truncated */
1629 	if (unlikely(ni.blk_addr == NULL_ADDR)) {
1630 		ClearPageUptodate(page);
1631 		dec_page_count(sbi, F2FS_DIRTY_NODES);
1632 		f2fs_up_read(&sbi->node_write);
1633 		unlock_page(page);
1634 		return 0;
1635 	}
1636 
1637 	if (__is_valid_data_blkaddr(ni.blk_addr) &&
1638 		!f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1639 					DATA_GENERIC_ENHANCE)) {
1640 		f2fs_up_read(&sbi->node_write);
1641 		goto redirty_out;
1642 	}
1643 
1644 	if (atomic && !test_opt(sbi, NOBARRIER))
1645 		fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1646 
1647 	/* should add to global list before clearing PAGECACHE status */
1648 	if (f2fs_in_warm_node_list(sbi, page)) {
1649 		seq = f2fs_add_fsync_node_entry(sbi, page);
1650 		if (seq_id)
1651 			*seq_id = seq;
1652 	}
1653 
1654 	set_page_writeback(page);
1655 	ClearPageError(page);
1656 
1657 	fio.old_blkaddr = ni.blk_addr;
1658 	f2fs_do_write_node_page(nid, &fio);
1659 	set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1660 	dec_page_count(sbi, F2FS_DIRTY_NODES);
1661 	f2fs_up_read(&sbi->node_write);
1662 
1663 	if (wbc->for_reclaim) {
1664 		f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1665 		submitted = NULL;
1666 	}
1667 
1668 	unlock_page(page);
1669 
1670 	if (unlikely(f2fs_cp_error(sbi))) {
1671 		f2fs_submit_merged_write(sbi, NODE);
1672 		submitted = NULL;
1673 	}
1674 	if (submitted)
1675 		*submitted = fio.submitted;
1676 
1677 	if (do_balance)
1678 		f2fs_balance_fs(sbi, false);
1679 	return 0;
1680 
1681 redirty_out:
1682 	redirty_page_for_writepage(wbc, page);
1683 	return AOP_WRITEPAGE_ACTIVATE;
1684 }
1685 
f2fs_move_node_page(struct page * node_page,int gc_type)1686 int f2fs_move_node_page(struct page *node_page, int gc_type)
1687 {
1688 	int err = 0;
1689 
1690 	if (gc_type == FG_GC) {
1691 		struct writeback_control wbc = {
1692 			.sync_mode = WB_SYNC_ALL,
1693 			.nr_to_write = 1,
1694 			.for_reclaim = 0,
1695 		};
1696 
1697 		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1698 
1699 		set_page_dirty(node_page);
1700 
1701 		if (!clear_page_dirty_for_io(node_page)) {
1702 			err = -EAGAIN;
1703 			goto out_page;
1704 		}
1705 
1706 		if (__write_node_page(node_page, false, NULL,
1707 					&wbc, false, FS_GC_NODE_IO, NULL)) {
1708 			err = -EAGAIN;
1709 			unlock_page(node_page);
1710 		}
1711 		goto release_page;
1712 	} else {
1713 		/* set page dirty and write it */
1714 		if (!PageWriteback(node_page))
1715 			set_page_dirty(node_page);
1716 	}
1717 out_page:
1718 	unlock_page(node_page);
1719 release_page:
1720 	f2fs_put_page(node_page, 0);
1721 	return err;
1722 }
1723 
f2fs_write_node_page(struct page * page,struct writeback_control * wbc)1724 static int f2fs_write_node_page(struct page *page,
1725 				struct writeback_control *wbc)
1726 {
1727 	return __write_node_page(page, false, NULL, wbc, false,
1728 						FS_NODE_IO, NULL);
1729 }
1730 
f2fs_fsync_node_pages(struct f2fs_sb_info * sbi,struct inode * inode,struct writeback_control * wbc,bool atomic,unsigned int * seq_id)1731 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1732 			struct writeback_control *wbc, bool atomic,
1733 			unsigned int *seq_id)
1734 {
1735 	pgoff_t index;
1736 	struct pagevec pvec;
1737 	int ret = 0;
1738 	struct page *last_page = NULL;
1739 	bool marked = false;
1740 	nid_t ino = inode->i_ino;
1741 	int nr_pages;
1742 	int nwritten = 0;
1743 
1744 	if (atomic) {
1745 		last_page = last_fsync_dnode(sbi, ino);
1746 		if (IS_ERR_OR_NULL(last_page))
1747 			return PTR_ERR_OR_ZERO(last_page);
1748 	}
1749 retry:
1750 	pagevec_init(&pvec);
1751 	index = 0;
1752 
1753 	while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1754 				PAGECACHE_TAG_DIRTY))) {
1755 		int i;
1756 
1757 		for (i = 0; i < nr_pages; i++) {
1758 			struct page *page = pvec.pages[i];
1759 			bool submitted = false;
1760 
1761 			if (unlikely(f2fs_cp_error(sbi))) {
1762 				f2fs_put_page(last_page, 0);
1763 				pagevec_release(&pvec);
1764 				ret = -EIO;
1765 				goto out;
1766 			}
1767 
1768 			if (!IS_DNODE(page) || !is_cold_node(page))
1769 				continue;
1770 			if (ino_of_node(page) != ino)
1771 				continue;
1772 
1773 			lock_page(page);
1774 
1775 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1776 continue_unlock:
1777 				unlock_page(page);
1778 				continue;
1779 			}
1780 			if (ino_of_node(page) != ino)
1781 				goto continue_unlock;
1782 
1783 			if (!PageDirty(page) && page != last_page) {
1784 				/* someone wrote it for us */
1785 				goto continue_unlock;
1786 			}
1787 
1788 			f2fs_wait_on_page_writeback(page, NODE, true, true);
1789 
1790 			set_fsync_mark(page, 0);
1791 			set_dentry_mark(page, 0);
1792 
1793 			if (!atomic || page == last_page) {
1794 				set_fsync_mark(page, 1);
1795 				if (IS_INODE(page)) {
1796 					if (is_inode_flag_set(inode,
1797 								FI_DIRTY_INODE))
1798 						f2fs_update_inode(inode, page);
1799 					set_dentry_mark(page,
1800 						f2fs_need_dentry_mark(sbi, ino));
1801 				}
1802 				/* may be written by other thread */
1803 				if (!PageDirty(page))
1804 					set_page_dirty(page);
1805 			}
1806 
1807 			if (!clear_page_dirty_for_io(page))
1808 				goto continue_unlock;
1809 
1810 			ret = __write_node_page(page, atomic &&
1811 						page == last_page,
1812 						&submitted, wbc, true,
1813 						FS_NODE_IO, seq_id);
1814 			if (ret) {
1815 				unlock_page(page);
1816 				f2fs_put_page(last_page, 0);
1817 				break;
1818 			} else if (submitted) {
1819 				nwritten++;
1820 			}
1821 
1822 			if (page == last_page) {
1823 				f2fs_put_page(page, 0);
1824 				marked = true;
1825 				break;
1826 			}
1827 		}
1828 		pagevec_release(&pvec);
1829 		cond_resched();
1830 
1831 		if (ret || marked)
1832 			break;
1833 	}
1834 	if (!ret && atomic && !marked) {
1835 		f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1836 			   ino, last_page->index);
1837 		lock_page(last_page);
1838 		f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1839 		set_page_dirty(last_page);
1840 		unlock_page(last_page);
1841 		goto retry;
1842 	}
1843 out:
1844 	if (nwritten)
1845 		f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1846 	return ret ? -EIO : 0;
1847 }
1848 
f2fs_match_ino(struct inode * inode,unsigned long ino,void * data)1849 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1850 {
1851 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1852 	bool clean;
1853 
1854 	if (inode->i_ino != ino)
1855 		return 0;
1856 
1857 	if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1858 		return 0;
1859 
1860 	spin_lock(&sbi->inode_lock[DIRTY_META]);
1861 	clean = list_empty(&F2FS_I(inode)->gdirty_list);
1862 	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1863 
1864 	if (clean)
1865 		return 0;
1866 
1867 	inode = igrab(inode);
1868 	if (!inode)
1869 		return 0;
1870 	return 1;
1871 }
1872 
flush_dirty_inode(struct page * page)1873 static bool flush_dirty_inode(struct page *page)
1874 {
1875 	struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1876 	struct inode *inode;
1877 	nid_t ino = ino_of_node(page);
1878 
1879 	inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1880 	if (!inode)
1881 		return false;
1882 
1883 	f2fs_update_inode(inode, page);
1884 	unlock_page(page);
1885 
1886 	iput(inode);
1887 	return true;
1888 }
1889 
f2fs_flush_inline_data(struct f2fs_sb_info * sbi)1890 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
1891 {
1892 	pgoff_t index = 0;
1893 	struct pagevec pvec;
1894 	int nr_pages;
1895 
1896 	pagevec_init(&pvec);
1897 
1898 	while ((nr_pages = pagevec_lookup_tag(&pvec,
1899 			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1900 		int i;
1901 
1902 		for (i = 0; i < nr_pages; i++) {
1903 			struct page *page = pvec.pages[i];
1904 
1905 			if (!IS_DNODE(page))
1906 				continue;
1907 
1908 			lock_page(page);
1909 
1910 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1911 continue_unlock:
1912 				unlock_page(page);
1913 				continue;
1914 			}
1915 
1916 			if (!PageDirty(page)) {
1917 				/* someone wrote it for us */
1918 				goto continue_unlock;
1919 			}
1920 
1921 			/* flush inline_data, if it's async context. */
1922 			if (page_private_inline(page)) {
1923 				clear_page_private_inline(page);
1924 				unlock_page(page);
1925 				flush_inline_data(sbi, ino_of_node(page));
1926 				continue;
1927 			}
1928 			unlock_page(page);
1929 		}
1930 		pagevec_release(&pvec);
1931 		cond_resched();
1932 	}
1933 }
1934 
f2fs_sync_node_pages(struct f2fs_sb_info * sbi,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type)1935 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1936 				struct writeback_control *wbc,
1937 				bool do_balance, enum iostat_type io_type)
1938 {
1939 	pgoff_t index;
1940 	struct pagevec pvec;
1941 	int step = 0;
1942 	int nwritten = 0;
1943 	int ret = 0;
1944 	int nr_pages, done = 0;
1945 
1946 	pagevec_init(&pvec);
1947 
1948 next_step:
1949 	index = 0;
1950 
1951 	while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1952 			NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1953 		int i;
1954 
1955 		for (i = 0; i < nr_pages; i++) {
1956 			struct page *page = pvec.pages[i];
1957 			bool submitted = false;
1958 			bool may_dirty = true;
1959 
1960 			/* give a priority to WB_SYNC threads */
1961 			if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1962 					wbc->sync_mode == WB_SYNC_NONE) {
1963 				done = 1;
1964 				break;
1965 			}
1966 
1967 			/*
1968 			 * flushing sequence with step:
1969 			 * 0. indirect nodes
1970 			 * 1. dentry dnodes
1971 			 * 2. file dnodes
1972 			 */
1973 			if (step == 0 && IS_DNODE(page))
1974 				continue;
1975 			if (step == 1 && (!IS_DNODE(page) ||
1976 						is_cold_node(page)))
1977 				continue;
1978 			if (step == 2 && (!IS_DNODE(page) ||
1979 						!is_cold_node(page)))
1980 				continue;
1981 lock_node:
1982 			if (wbc->sync_mode == WB_SYNC_ALL)
1983 				lock_page(page);
1984 			else if (!trylock_page(page))
1985 				continue;
1986 
1987 			if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1988 continue_unlock:
1989 				unlock_page(page);
1990 				continue;
1991 			}
1992 
1993 			if (!PageDirty(page)) {
1994 				/* someone wrote it for us */
1995 				goto continue_unlock;
1996 			}
1997 
1998 			/* flush inline_data/inode, if it's async context. */
1999 			if (!do_balance)
2000 				goto write_node;
2001 
2002 			/* flush inline_data */
2003 			if (page_private_inline(page)) {
2004 				clear_page_private_inline(page);
2005 				unlock_page(page);
2006 				flush_inline_data(sbi, ino_of_node(page));
2007 				goto lock_node;
2008 			}
2009 
2010 			/* flush dirty inode */
2011 			if (IS_INODE(page) && may_dirty) {
2012 				may_dirty = false;
2013 				if (flush_dirty_inode(page))
2014 					goto lock_node;
2015 			}
2016 write_node:
2017 			f2fs_wait_on_page_writeback(page, NODE, true, true);
2018 
2019 			if (!clear_page_dirty_for_io(page))
2020 				goto continue_unlock;
2021 
2022 			set_fsync_mark(page, 0);
2023 			set_dentry_mark(page, 0);
2024 
2025 			ret = __write_node_page(page, false, &submitted,
2026 						wbc, do_balance, io_type, NULL);
2027 			if (ret)
2028 				unlock_page(page);
2029 			else if (submitted)
2030 				nwritten++;
2031 
2032 			if (--wbc->nr_to_write == 0)
2033 				break;
2034 		}
2035 		pagevec_release(&pvec);
2036 		cond_resched();
2037 
2038 		if (wbc->nr_to_write == 0) {
2039 			step = 2;
2040 			break;
2041 		}
2042 	}
2043 
2044 	if (step < 2) {
2045 		if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2046 				wbc->sync_mode == WB_SYNC_NONE && step == 1)
2047 			goto out;
2048 		step++;
2049 		goto next_step;
2050 	}
2051 out:
2052 	if (nwritten)
2053 		f2fs_submit_merged_write(sbi, NODE);
2054 
2055 	if (unlikely(f2fs_cp_error(sbi)))
2056 		return -EIO;
2057 	return ret;
2058 }
2059 
f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info * sbi,unsigned int seq_id)2060 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2061 						unsigned int seq_id)
2062 {
2063 	struct fsync_node_entry *fn;
2064 	struct page *page;
2065 	struct list_head *head = &sbi->fsync_node_list;
2066 	unsigned long flags;
2067 	unsigned int cur_seq_id = 0;
2068 	int ret2, ret = 0;
2069 
2070 	while (seq_id && cur_seq_id < seq_id) {
2071 		spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2072 		if (list_empty(head)) {
2073 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2074 			break;
2075 		}
2076 		fn = list_first_entry(head, struct fsync_node_entry, list);
2077 		if (fn->seq_id > seq_id) {
2078 			spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2079 			break;
2080 		}
2081 		cur_seq_id = fn->seq_id;
2082 		page = fn->page;
2083 		get_page(page);
2084 		spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2085 
2086 		f2fs_wait_on_page_writeback(page, NODE, true, false);
2087 		if (TestClearPageError(page))
2088 			ret = -EIO;
2089 
2090 		put_page(page);
2091 
2092 		if (ret)
2093 			break;
2094 	}
2095 
2096 	ret2 = filemap_check_errors(NODE_MAPPING(sbi));
2097 	if (!ret)
2098 		ret = ret2;
2099 
2100 	return ret;
2101 }
2102 
f2fs_write_node_pages(struct address_space * mapping,struct writeback_control * wbc)2103 static int f2fs_write_node_pages(struct address_space *mapping,
2104 			    struct writeback_control *wbc)
2105 {
2106 	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2107 	struct blk_plug plug;
2108 	long diff;
2109 
2110 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2111 		goto skip_write;
2112 
2113 	/* balancing f2fs's metadata in background */
2114 	f2fs_balance_fs_bg(sbi, true);
2115 
2116 	/* collect a number of dirty node pages and write together */
2117 	if (wbc->sync_mode != WB_SYNC_ALL &&
2118 			get_pages(sbi, F2FS_DIRTY_NODES) <
2119 					nr_pages_to_skip(sbi, NODE))
2120 		goto skip_write;
2121 
2122 	if (wbc->sync_mode == WB_SYNC_ALL)
2123 		atomic_inc(&sbi->wb_sync_req[NODE]);
2124 	else if (atomic_read(&sbi->wb_sync_req[NODE])) {
2125 		/* to avoid potential deadlock */
2126 		if (current->plug)
2127 			blk_finish_plug(current->plug);
2128 		goto skip_write;
2129 	}
2130 
2131 	trace_f2fs_writepages(mapping->host, wbc, NODE);
2132 
2133 	diff = nr_pages_to_write(sbi, NODE, wbc);
2134 	blk_start_plug(&plug);
2135 	f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2136 	blk_finish_plug(&plug);
2137 	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2138 
2139 	if (wbc->sync_mode == WB_SYNC_ALL)
2140 		atomic_dec(&sbi->wb_sync_req[NODE]);
2141 	return 0;
2142 
2143 skip_write:
2144 	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2145 	trace_f2fs_writepages(mapping->host, wbc, NODE);
2146 	return 0;
2147 }
2148 
f2fs_set_node_page_dirty(struct page * page)2149 static int f2fs_set_node_page_dirty(struct page *page)
2150 {
2151 	trace_f2fs_set_page_dirty(page, NODE);
2152 
2153 	if (!PageUptodate(page))
2154 		SetPageUptodate(page);
2155 #ifdef CONFIG_F2FS_CHECK_FS
2156 	if (IS_INODE(page))
2157 		f2fs_inode_chksum_set(F2FS_P_SB(page), page);
2158 #endif
2159 	if (!PageDirty(page)) {
2160 		__set_page_dirty_nobuffers(page);
2161 		inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
2162 		set_page_private_reference(page);
2163 		return 1;
2164 	}
2165 	return 0;
2166 }
2167 
2168 /*
2169  * Structure of the f2fs node operations
2170  */
2171 const struct address_space_operations f2fs_node_aops = {
2172 	.writepage	= f2fs_write_node_page,
2173 	.writepages	= f2fs_write_node_pages,
2174 	.set_page_dirty	= f2fs_set_node_page_dirty,
2175 	.invalidatepage	= f2fs_invalidate_page,
2176 	.releasepage	= f2fs_release_page,
2177 #ifdef CONFIG_MIGRATION
2178 	.migratepage	= f2fs_migrate_page,
2179 #endif
2180 };
2181 
__lookup_free_nid_list(struct f2fs_nm_info * nm_i,nid_t n)2182 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2183 						nid_t n)
2184 {
2185 	return radix_tree_lookup(&nm_i->free_nid_root, n);
2186 }
2187 
__insert_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i)2188 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2189 				struct free_nid *i)
2190 {
2191 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2192 	int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2193 
2194 	if (err)
2195 		return err;
2196 
2197 	nm_i->nid_cnt[FREE_NID]++;
2198 	list_add_tail(&i->list, &nm_i->free_nid_list);
2199 	return 0;
2200 }
2201 
__remove_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state state)2202 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2203 			struct free_nid *i, enum nid_state state)
2204 {
2205 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2206 
2207 	f2fs_bug_on(sbi, state != i->state);
2208 	nm_i->nid_cnt[state]--;
2209 	if (state == FREE_NID)
2210 		list_del(&i->list);
2211 	radix_tree_delete(&nm_i->free_nid_root, i->nid);
2212 }
2213 
__move_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state org_state,enum nid_state dst_state)2214 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2215 			enum nid_state org_state, enum nid_state dst_state)
2216 {
2217 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2218 
2219 	f2fs_bug_on(sbi, org_state != i->state);
2220 	i->state = dst_state;
2221 	nm_i->nid_cnt[org_state]--;
2222 	nm_i->nid_cnt[dst_state]++;
2223 
2224 	switch (dst_state) {
2225 	case PREALLOC_NID:
2226 		list_del(&i->list);
2227 		break;
2228 	case FREE_NID:
2229 		list_add_tail(&i->list, &nm_i->free_nid_list);
2230 		break;
2231 	default:
2232 		BUG_ON(1);
2233 	}
2234 }
2235 
update_free_nid_bitmap(struct f2fs_sb_info * sbi,nid_t nid,bool set,bool build)2236 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2237 							bool set, bool build)
2238 {
2239 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2240 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2241 	unsigned int nid_ofs = nid - START_NID(nid);
2242 
2243 	if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2244 		return;
2245 
2246 	if (set) {
2247 		if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2248 			return;
2249 		__set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2250 		nm_i->free_nid_count[nat_ofs]++;
2251 	} else {
2252 		if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2253 			return;
2254 		__clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2255 		if (!build)
2256 			nm_i->free_nid_count[nat_ofs]--;
2257 	}
2258 }
2259 
2260 /* return if the nid is recognized as free */
add_free_nid(struct f2fs_sb_info * sbi,nid_t nid,bool build,bool update)2261 static bool add_free_nid(struct f2fs_sb_info *sbi,
2262 				nid_t nid, bool build, bool update)
2263 {
2264 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2265 	struct free_nid *i, *e;
2266 	struct nat_entry *ne;
2267 	int err = -EINVAL;
2268 	bool ret = false;
2269 
2270 	/* 0 nid should not be used */
2271 	if (unlikely(nid == 0))
2272 		return false;
2273 
2274 	if (unlikely(f2fs_check_nid_range(sbi, nid)))
2275 		return false;
2276 
2277 	i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
2278 	i->nid = nid;
2279 	i->state = FREE_NID;
2280 
2281 	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2282 
2283 	spin_lock(&nm_i->nid_list_lock);
2284 
2285 	if (build) {
2286 		/*
2287 		 *   Thread A             Thread B
2288 		 *  - f2fs_create
2289 		 *   - f2fs_new_inode
2290 		 *    - f2fs_alloc_nid
2291 		 *     - __insert_nid_to_list(PREALLOC_NID)
2292 		 *                     - f2fs_balance_fs_bg
2293 		 *                      - f2fs_build_free_nids
2294 		 *                       - __f2fs_build_free_nids
2295 		 *                        - scan_nat_page
2296 		 *                         - add_free_nid
2297 		 *                          - __lookup_nat_cache
2298 		 *  - f2fs_add_link
2299 		 *   - f2fs_init_inode_metadata
2300 		 *    - f2fs_new_inode_page
2301 		 *     - f2fs_new_node_page
2302 		 *      - set_node_addr
2303 		 *  - f2fs_alloc_nid_done
2304 		 *   - __remove_nid_from_list(PREALLOC_NID)
2305 		 *                         - __insert_nid_to_list(FREE_NID)
2306 		 */
2307 		ne = __lookup_nat_cache(nm_i, nid);
2308 		if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2309 				nat_get_blkaddr(ne) != NULL_ADDR))
2310 			goto err_out;
2311 
2312 		e = __lookup_free_nid_list(nm_i, nid);
2313 		if (e) {
2314 			if (e->state == FREE_NID)
2315 				ret = true;
2316 			goto err_out;
2317 		}
2318 	}
2319 	ret = true;
2320 	err = __insert_free_nid(sbi, i);
2321 err_out:
2322 	if (update) {
2323 		update_free_nid_bitmap(sbi, nid, ret, build);
2324 		if (!build)
2325 			nm_i->available_nids++;
2326 	}
2327 	spin_unlock(&nm_i->nid_list_lock);
2328 	radix_tree_preload_end();
2329 
2330 	if (err)
2331 		kmem_cache_free(free_nid_slab, i);
2332 	return ret;
2333 }
2334 
remove_free_nid(struct f2fs_sb_info * sbi,nid_t nid)2335 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2336 {
2337 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2338 	struct free_nid *i;
2339 	bool need_free = false;
2340 
2341 	spin_lock(&nm_i->nid_list_lock);
2342 	i = __lookup_free_nid_list(nm_i, nid);
2343 	if (i && i->state == FREE_NID) {
2344 		__remove_free_nid(sbi, i, FREE_NID);
2345 		need_free = true;
2346 	}
2347 	spin_unlock(&nm_i->nid_list_lock);
2348 
2349 	if (need_free)
2350 		kmem_cache_free(free_nid_slab, i);
2351 }
2352 
scan_nat_page(struct f2fs_sb_info * sbi,struct page * nat_page,nid_t start_nid)2353 static int scan_nat_page(struct f2fs_sb_info *sbi,
2354 			struct page *nat_page, nid_t start_nid)
2355 {
2356 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2357 	struct f2fs_nat_block *nat_blk = page_address(nat_page);
2358 	block_t blk_addr;
2359 	unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2360 	int i;
2361 
2362 	__set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2363 
2364 	i = start_nid % NAT_ENTRY_PER_BLOCK;
2365 
2366 	for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2367 		if (unlikely(start_nid >= nm_i->max_nid))
2368 			break;
2369 
2370 		blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2371 
2372 		if (blk_addr == NEW_ADDR)
2373 			return -EINVAL;
2374 
2375 		if (blk_addr == NULL_ADDR) {
2376 			add_free_nid(sbi, start_nid, true, true);
2377 		} else {
2378 			spin_lock(&NM_I(sbi)->nid_list_lock);
2379 			update_free_nid_bitmap(sbi, start_nid, false, true);
2380 			spin_unlock(&NM_I(sbi)->nid_list_lock);
2381 		}
2382 	}
2383 
2384 	return 0;
2385 }
2386 
scan_curseg_cache(struct f2fs_sb_info * sbi)2387 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2388 {
2389 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2390 	struct f2fs_journal *journal = curseg->journal;
2391 	int i;
2392 
2393 	down_read(&curseg->journal_rwsem);
2394 	for (i = 0; i < nats_in_cursum(journal); i++) {
2395 		block_t addr;
2396 		nid_t nid;
2397 
2398 		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2399 		nid = le32_to_cpu(nid_in_journal(journal, i));
2400 		if (addr == NULL_ADDR)
2401 			add_free_nid(sbi, nid, true, false);
2402 		else
2403 			remove_free_nid(sbi, nid);
2404 	}
2405 	up_read(&curseg->journal_rwsem);
2406 }
2407 
scan_free_nid_bits(struct f2fs_sb_info * sbi)2408 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2409 {
2410 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2411 	unsigned int i, idx;
2412 	nid_t nid;
2413 
2414 	f2fs_down_read(&nm_i->nat_tree_lock);
2415 
2416 	for (i = 0; i < nm_i->nat_blocks; i++) {
2417 		if (!test_bit_le(i, nm_i->nat_block_bitmap))
2418 			continue;
2419 		if (!nm_i->free_nid_count[i])
2420 			continue;
2421 		for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2422 			idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2423 						NAT_ENTRY_PER_BLOCK, idx);
2424 			if (idx >= NAT_ENTRY_PER_BLOCK)
2425 				break;
2426 
2427 			nid = i * NAT_ENTRY_PER_BLOCK + idx;
2428 			add_free_nid(sbi, nid, true, false);
2429 
2430 			if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2431 				goto out;
2432 		}
2433 	}
2434 out:
2435 	scan_curseg_cache(sbi);
2436 
2437 	f2fs_up_read(&nm_i->nat_tree_lock);
2438 }
2439 
__f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2440 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2441 						bool sync, bool mount)
2442 {
2443 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2444 	int i = 0, ret;
2445 	nid_t nid = nm_i->next_scan_nid;
2446 
2447 	if (unlikely(nid >= nm_i->max_nid))
2448 		nid = 0;
2449 
2450 	if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2451 		nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2452 
2453 	/* Enough entries */
2454 	if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2455 		return 0;
2456 
2457 	if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2458 		return 0;
2459 
2460 	if (!mount) {
2461 		/* try to find free nids in free_nid_bitmap */
2462 		scan_free_nid_bits(sbi);
2463 
2464 		if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2465 			return 0;
2466 	}
2467 
2468 	/* readahead nat pages to be scanned */
2469 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2470 							META_NAT, true);
2471 
2472 	f2fs_down_read(&nm_i->nat_tree_lock);
2473 
2474 	while (1) {
2475 		if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2476 						nm_i->nat_block_bitmap)) {
2477 			struct page *page = get_current_nat_page(sbi, nid);
2478 
2479 			if (IS_ERR(page)) {
2480 				ret = PTR_ERR(page);
2481 			} else {
2482 				ret = scan_nat_page(sbi, page, nid);
2483 				f2fs_put_page(page, 1);
2484 			}
2485 
2486 			if (ret) {
2487 				f2fs_up_read(&nm_i->nat_tree_lock);
2488 				f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2489 				return ret;
2490 			}
2491 		}
2492 
2493 		nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2494 		if (unlikely(nid >= nm_i->max_nid))
2495 			nid = 0;
2496 
2497 		if (++i >= FREE_NID_PAGES)
2498 			break;
2499 	}
2500 
2501 	/* go to the next free nat pages to find free nids abundantly */
2502 	nm_i->next_scan_nid = nid;
2503 
2504 	/* find free nids from current sum_pages */
2505 	scan_curseg_cache(sbi);
2506 
2507 	f2fs_up_read(&nm_i->nat_tree_lock);
2508 
2509 	f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2510 					nm_i->ra_nid_pages, META_NAT, false);
2511 
2512 	return 0;
2513 }
2514 
f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2515 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2516 {
2517 	int ret;
2518 
2519 	mutex_lock(&NM_I(sbi)->build_lock);
2520 	ret = __f2fs_build_free_nids(sbi, sync, mount);
2521 	mutex_unlock(&NM_I(sbi)->build_lock);
2522 
2523 	return ret;
2524 }
2525 
2526 /*
2527  * If this function returns success, caller can obtain a new nid
2528  * from second parameter of this function.
2529  * The returned nid could be used ino as well as nid when inode is created.
2530  */
f2fs_alloc_nid(struct f2fs_sb_info * sbi,nid_t * nid)2531 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2532 {
2533 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2534 	struct free_nid *i = NULL;
2535 retry:
2536 	if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2537 		f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
2538 		return false;
2539 	}
2540 
2541 	spin_lock(&nm_i->nid_list_lock);
2542 
2543 	if (unlikely(nm_i->available_nids == 0)) {
2544 		spin_unlock(&nm_i->nid_list_lock);
2545 		return false;
2546 	}
2547 
2548 	/* We should not use stale free nids created by f2fs_build_free_nids */
2549 	if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2550 		f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2551 		i = list_first_entry(&nm_i->free_nid_list,
2552 					struct free_nid, list);
2553 		*nid = i->nid;
2554 
2555 		__move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2556 		nm_i->available_nids--;
2557 
2558 		update_free_nid_bitmap(sbi, *nid, false, false);
2559 
2560 		spin_unlock(&nm_i->nid_list_lock);
2561 		return true;
2562 	}
2563 	spin_unlock(&nm_i->nid_list_lock);
2564 
2565 	/* Let's scan nat pages and its caches to get free nids */
2566 	if (!f2fs_build_free_nids(sbi, true, false))
2567 		goto retry;
2568 	return false;
2569 }
2570 
2571 /*
2572  * f2fs_alloc_nid() should be called prior to this function.
2573  */
f2fs_alloc_nid_done(struct f2fs_sb_info * sbi,nid_t nid)2574 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2575 {
2576 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2577 	struct free_nid *i;
2578 
2579 	spin_lock(&nm_i->nid_list_lock);
2580 	i = __lookup_free_nid_list(nm_i, nid);
2581 	f2fs_bug_on(sbi, !i);
2582 	__remove_free_nid(sbi, i, PREALLOC_NID);
2583 	spin_unlock(&nm_i->nid_list_lock);
2584 
2585 	kmem_cache_free(free_nid_slab, i);
2586 }
2587 
2588 /*
2589  * f2fs_alloc_nid() should be called prior to this function.
2590  */
f2fs_alloc_nid_failed(struct f2fs_sb_info * sbi,nid_t nid)2591 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2592 {
2593 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2594 	struct free_nid *i;
2595 	bool need_free = false;
2596 
2597 	if (!nid)
2598 		return;
2599 
2600 	spin_lock(&nm_i->nid_list_lock);
2601 	i = __lookup_free_nid_list(nm_i, nid);
2602 	f2fs_bug_on(sbi, !i);
2603 
2604 	if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2605 		__remove_free_nid(sbi, i, PREALLOC_NID);
2606 		need_free = true;
2607 	} else {
2608 		__move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2609 	}
2610 
2611 	nm_i->available_nids++;
2612 
2613 	update_free_nid_bitmap(sbi, nid, true, false);
2614 
2615 	spin_unlock(&nm_i->nid_list_lock);
2616 
2617 	if (need_free)
2618 		kmem_cache_free(free_nid_slab, i);
2619 }
2620 
f2fs_try_to_free_nids(struct f2fs_sb_info * sbi,int nr_shrink)2621 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2622 {
2623 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2624 	int nr = nr_shrink;
2625 
2626 	if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2627 		return 0;
2628 
2629 	if (!mutex_trylock(&nm_i->build_lock))
2630 		return 0;
2631 
2632 	while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2633 		struct free_nid *i, *next;
2634 		unsigned int batch = SHRINK_NID_BATCH_SIZE;
2635 
2636 		spin_lock(&nm_i->nid_list_lock);
2637 		list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2638 			if (!nr_shrink || !batch ||
2639 				nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2640 				break;
2641 			__remove_free_nid(sbi, i, FREE_NID);
2642 			kmem_cache_free(free_nid_slab, i);
2643 			nr_shrink--;
2644 			batch--;
2645 		}
2646 		spin_unlock(&nm_i->nid_list_lock);
2647 	}
2648 
2649 	mutex_unlock(&nm_i->build_lock);
2650 
2651 	return nr - nr_shrink;
2652 }
2653 
f2fs_recover_inline_xattr(struct inode * inode,struct page * page)2654 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2655 {
2656 	void *src_addr, *dst_addr;
2657 	size_t inline_size;
2658 	struct page *ipage;
2659 	struct f2fs_inode *ri;
2660 
2661 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2662 	if (IS_ERR(ipage))
2663 		return PTR_ERR(ipage);
2664 
2665 	ri = F2FS_INODE(page);
2666 	if (ri->i_inline & F2FS_INLINE_XATTR) {
2667 		if (!f2fs_has_inline_xattr(inode)) {
2668 			set_inode_flag(inode, FI_INLINE_XATTR);
2669 			stat_inc_inline_xattr(inode);
2670 		}
2671 	} else {
2672 		if (f2fs_has_inline_xattr(inode)) {
2673 			stat_dec_inline_xattr(inode);
2674 			clear_inode_flag(inode, FI_INLINE_XATTR);
2675 		}
2676 		goto update_inode;
2677 	}
2678 
2679 	dst_addr = inline_xattr_addr(inode, ipage);
2680 	src_addr = inline_xattr_addr(inode, page);
2681 	inline_size = inline_xattr_size(inode);
2682 
2683 	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2684 	memcpy(dst_addr, src_addr, inline_size);
2685 update_inode:
2686 	f2fs_update_inode(inode, ipage);
2687 	f2fs_put_page(ipage, 1);
2688 	return 0;
2689 }
2690 
f2fs_recover_xattr_data(struct inode * inode,struct page * page)2691 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2692 {
2693 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2694 	nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2695 	nid_t new_xnid;
2696 	struct dnode_of_data dn;
2697 	struct node_info ni;
2698 	struct page *xpage;
2699 	int err;
2700 
2701 	if (!prev_xnid)
2702 		goto recover_xnid;
2703 
2704 	/* 1: invalidate the previous xattr nid */
2705 	err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
2706 	if (err)
2707 		return err;
2708 
2709 	f2fs_invalidate_blocks(sbi, ni.blk_addr);
2710 	dec_valid_node_count(sbi, inode, false);
2711 	set_node_addr(sbi, &ni, NULL_ADDR, false);
2712 
2713 recover_xnid:
2714 	/* 2: update xattr nid in inode */
2715 	if (!f2fs_alloc_nid(sbi, &new_xnid))
2716 		return -ENOSPC;
2717 
2718 	set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2719 	xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2720 	if (IS_ERR(xpage)) {
2721 		f2fs_alloc_nid_failed(sbi, new_xnid);
2722 		return PTR_ERR(xpage);
2723 	}
2724 
2725 	f2fs_alloc_nid_done(sbi, new_xnid);
2726 	f2fs_update_inode_page(inode);
2727 
2728 	/* 3: update and set xattr node page dirty */
2729 	memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2730 
2731 	set_page_dirty(xpage);
2732 	f2fs_put_page(xpage, 1);
2733 
2734 	return 0;
2735 }
2736 
f2fs_recover_inode_page(struct f2fs_sb_info * sbi,struct page * page)2737 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2738 {
2739 	struct f2fs_inode *src, *dst;
2740 	nid_t ino = ino_of_node(page);
2741 	struct node_info old_ni, new_ni;
2742 	struct page *ipage;
2743 	int err;
2744 
2745 	err = f2fs_get_node_info(sbi, ino, &old_ni, false);
2746 	if (err)
2747 		return err;
2748 
2749 	if (unlikely(old_ni.blk_addr != NULL_ADDR))
2750 		return -EINVAL;
2751 retry:
2752 	ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2753 	if (!ipage) {
2754 		congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2755 		goto retry;
2756 	}
2757 
2758 	/* Should not use this inode from free nid list */
2759 	remove_free_nid(sbi, ino);
2760 
2761 	if (!PageUptodate(ipage))
2762 		SetPageUptodate(ipage);
2763 	fill_node_footer(ipage, ino, ino, 0, true);
2764 	set_cold_node(ipage, false);
2765 
2766 	src = F2FS_INODE(page);
2767 	dst = F2FS_INODE(ipage);
2768 
2769 	memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2770 	dst->i_size = 0;
2771 	dst->i_blocks = cpu_to_le64(1);
2772 	dst->i_links = cpu_to_le32(1);
2773 	dst->i_xattr_nid = 0;
2774 	dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2775 	if (dst->i_inline & F2FS_EXTRA_ATTR) {
2776 		dst->i_extra_isize = src->i_extra_isize;
2777 
2778 		if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2779 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2780 							i_inline_xattr_size))
2781 			dst->i_inline_xattr_size = src->i_inline_xattr_size;
2782 
2783 		if (f2fs_sb_has_project_quota(sbi) &&
2784 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2785 								i_projid))
2786 			dst->i_projid = src->i_projid;
2787 
2788 		if (f2fs_sb_has_inode_crtime(sbi) &&
2789 			F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2790 							i_crtime_nsec)) {
2791 			dst->i_crtime = src->i_crtime;
2792 			dst->i_crtime_nsec = src->i_crtime_nsec;
2793 		}
2794 	}
2795 
2796 	new_ni = old_ni;
2797 	new_ni.ino = ino;
2798 
2799 	if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2800 		WARN_ON(1);
2801 	set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2802 	inc_valid_inode_count(sbi);
2803 	set_page_dirty(ipage);
2804 	f2fs_put_page(ipage, 1);
2805 	return 0;
2806 }
2807 
f2fs_restore_node_summary(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_summary_block * sum)2808 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2809 			unsigned int segno, struct f2fs_summary_block *sum)
2810 {
2811 	struct f2fs_node *rn;
2812 	struct f2fs_summary *sum_entry;
2813 	block_t addr;
2814 	int i, idx, last_offset, nrpages;
2815 
2816 	/* scan the node segment */
2817 	last_offset = sbi->blocks_per_seg;
2818 	addr = START_BLOCK(sbi, segno);
2819 	sum_entry = &sum->entries[0];
2820 
2821 	for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2822 		nrpages = min(last_offset - i, BIO_MAX_PAGES);
2823 
2824 		/* readahead node pages */
2825 		f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2826 
2827 		for (idx = addr; idx < addr + nrpages; idx++) {
2828 			struct page *page = f2fs_get_tmp_page(sbi, idx);
2829 
2830 			if (IS_ERR(page))
2831 				return PTR_ERR(page);
2832 
2833 			rn = F2FS_NODE(page);
2834 			sum_entry->nid = rn->footer.nid;
2835 			sum_entry->version = 0;
2836 			sum_entry->ofs_in_node = 0;
2837 			sum_entry++;
2838 			f2fs_put_page(page, 1);
2839 		}
2840 
2841 		invalidate_mapping_pages(META_MAPPING(sbi), addr,
2842 							addr + nrpages);
2843 	}
2844 	return 0;
2845 }
2846 
remove_nats_in_journal(struct f2fs_sb_info * sbi)2847 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2848 {
2849 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2850 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2851 	struct f2fs_journal *journal = curseg->journal;
2852 	int i;
2853 
2854 	down_write(&curseg->journal_rwsem);
2855 	for (i = 0; i < nats_in_cursum(journal); i++) {
2856 		struct nat_entry *ne;
2857 		struct f2fs_nat_entry raw_ne;
2858 		nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2859 
2860 		if (f2fs_check_nid_range(sbi, nid))
2861 			continue;
2862 
2863 		raw_ne = nat_in_journal(journal, i);
2864 
2865 		ne = __lookup_nat_cache(nm_i, nid);
2866 		if (!ne) {
2867 			ne = __alloc_nat_entry(nid, true);
2868 			__init_nat_entry(nm_i, ne, &raw_ne, true);
2869 		}
2870 
2871 		/*
2872 		 * if a free nat in journal has not been used after last
2873 		 * checkpoint, we should remove it from available nids,
2874 		 * since later we will add it again.
2875 		 */
2876 		if (!get_nat_flag(ne, IS_DIRTY) &&
2877 				le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2878 			spin_lock(&nm_i->nid_list_lock);
2879 			nm_i->available_nids--;
2880 			spin_unlock(&nm_i->nid_list_lock);
2881 		}
2882 
2883 		__set_nat_cache_dirty(nm_i, ne);
2884 	}
2885 	update_nats_in_cursum(journal, -i);
2886 	up_write(&curseg->journal_rwsem);
2887 }
2888 
__adjust_nat_entry_set(struct nat_entry_set * nes,struct list_head * head,int max)2889 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2890 						struct list_head *head, int max)
2891 {
2892 	struct nat_entry_set *cur;
2893 
2894 	if (nes->entry_cnt >= max)
2895 		goto add_out;
2896 
2897 	list_for_each_entry(cur, head, set_list) {
2898 		if (cur->entry_cnt >= nes->entry_cnt) {
2899 			list_add(&nes->set_list, cur->set_list.prev);
2900 			return;
2901 		}
2902 	}
2903 add_out:
2904 	list_add_tail(&nes->set_list, head);
2905 }
2906 
__update_nat_bits(struct f2fs_sb_info * sbi,nid_t start_nid,struct page * page)2907 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2908 						struct page *page)
2909 {
2910 	struct f2fs_nm_info *nm_i = NM_I(sbi);
2911 	unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2912 	struct f2fs_nat_block *nat_blk = page_address(page);
2913 	int valid = 0;
2914 	int i = 0;
2915 
2916 	if (!enabled_nat_bits(sbi, NULL))
2917 		return;
2918 
2919 	if (nat_index == 0) {
2920 		valid = 1;
2921 		i = 1;
2922 	}
2923 	for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2924 		if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2925 			valid++;
2926 	}
2927 	if (valid == 0) {
2928 		__set_bit_le(nat_index, nm_i->empty_nat_bits);
2929 		__clear_bit_le(nat_index, nm_i->full_nat_bits);
2930 		return;
2931 	}
2932 
2933 	__clear_bit_le(nat_index, nm_i->empty_nat_bits);
2934 	if (valid == NAT_ENTRY_PER_BLOCK)
2935 		__set_bit_le(nat_index, nm_i->full_nat_bits);
2936 	else
2937 		__clear_bit_le(nat_index, nm_i->full_nat_bits);
2938 }
2939 
__flush_nat_entry_set(struct f2fs_sb_info * sbi,struct nat_entry_set * set,struct cp_control * cpc)2940 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2941 		struct nat_entry_set *set, struct cp_control *cpc)
2942 {
2943 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2944 	struct f2fs_journal *journal = curseg->journal;
2945 	nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2946 	bool to_journal = true;
2947 	struct f2fs_nat_block *nat_blk;
2948 	struct nat_entry *ne, *cur;
2949 	struct page *page = NULL;
2950 
2951 	/*
2952 	 * there are two steps to flush nat entries:
2953 	 * #1, flush nat entries to journal in current hot data summary block.
2954 	 * #2, flush nat entries to nat page.
2955 	 */
2956 	if (enabled_nat_bits(sbi, cpc) ||
2957 		!__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2958 		to_journal = false;
2959 
2960 	if (to_journal) {
2961 		down_write(&curseg->journal_rwsem);
2962 	} else {
2963 		page = get_next_nat_page(sbi, start_nid);
2964 		if (IS_ERR(page))
2965 			return PTR_ERR(page);
2966 
2967 		nat_blk = page_address(page);
2968 		f2fs_bug_on(sbi, !nat_blk);
2969 	}
2970 
2971 	/* flush dirty nats in nat entry set */
2972 	list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2973 		struct f2fs_nat_entry *raw_ne;
2974 		nid_t nid = nat_get_nid(ne);
2975 		int offset;
2976 
2977 		f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
2978 
2979 		if (to_journal) {
2980 			offset = f2fs_lookup_journal_in_cursum(journal,
2981 							NAT_JOURNAL, nid, 1);
2982 			f2fs_bug_on(sbi, offset < 0);
2983 			raw_ne = &nat_in_journal(journal, offset);
2984 			nid_in_journal(journal, offset) = cpu_to_le32(nid);
2985 		} else {
2986 			raw_ne = &nat_blk->entries[nid - start_nid];
2987 		}
2988 		raw_nat_from_node_info(raw_ne, &ne->ni);
2989 		nat_reset_flag(ne);
2990 		__clear_nat_cache_dirty(NM_I(sbi), set, ne);
2991 		if (nat_get_blkaddr(ne) == NULL_ADDR) {
2992 			add_free_nid(sbi, nid, false, true);
2993 		} else {
2994 			spin_lock(&NM_I(sbi)->nid_list_lock);
2995 			update_free_nid_bitmap(sbi, nid, false, false);
2996 			spin_unlock(&NM_I(sbi)->nid_list_lock);
2997 		}
2998 	}
2999 
3000 	if (to_journal) {
3001 		up_write(&curseg->journal_rwsem);
3002 	} else {
3003 		__update_nat_bits(sbi, start_nid, page);
3004 		f2fs_put_page(page, 1);
3005 	}
3006 
3007 	/* Allow dirty nats by node block allocation in write_begin */
3008 	if (!set->entry_cnt) {
3009 		radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3010 		kmem_cache_free(nat_entry_set_slab, set);
3011 	}
3012 	return 0;
3013 }
3014 
3015 /*
3016  * This function is called during the checkpointing process.
3017  */
f2fs_flush_nat_entries(struct f2fs_sb_info * sbi,struct cp_control * cpc)3018 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3019 {
3020 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3021 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3022 	struct f2fs_journal *journal = curseg->journal;
3023 	struct nat_entry_set *setvec[SETVEC_SIZE];
3024 	struct nat_entry_set *set, *tmp;
3025 	unsigned int found;
3026 	nid_t set_idx = 0;
3027 	LIST_HEAD(sets);
3028 	int err = 0;
3029 
3030 	/*
3031 	 * during unmount, let's flush nat_bits before checking
3032 	 * nat_cnt[DIRTY_NAT].
3033 	 */
3034 	if (enabled_nat_bits(sbi, cpc)) {
3035 		f2fs_down_write(&nm_i->nat_tree_lock);
3036 		remove_nats_in_journal(sbi);
3037 		f2fs_up_write(&nm_i->nat_tree_lock);
3038 	}
3039 
3040 	if (!nm_i->nat_cnt[DIRTY_NAT])
3041 		return 0;
3042 
3043 	f2fs_down_write(&nm_i->nat_tree_lock);
3044 
3045 	/*
3046 	 * if there are no enough space in journal to store dirty nat
3047 	 * entries, remove all entries from journal and merge them
3048 	 * into nat entry set.
3049 	 */
3050 	if (enabled_nat_bits(sbi, cpc) ||
3051 		!__has_cursum_space(journal,
3052 			nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3053 		remove_nats_in_journal(sbi);
3054 
3055 	while ((found = __gang_lookup_nat_set(nm_i,
3056 					set_idx, SETVEC_SIZE, setvec))) {
3057 		unsigned idx;
3058 
3059 		set_idx = setvec[found - 1]->set + 1;
3060 		for (idx = 0; idx < found; idx++)
3061 			__adjust_nat_entry_set(setvec[idx], &sets,
3062 						MAX_NAT_JENTRIES(journal));
3063 	}
3064 
3065 	/* flush dirty nats in nat entry set */
3066 	list_for_each_entry_safe(set, tmp, &sets, set_list) {
3067 		err = __flush_nat_entry_set(sbi, set, cpc);
3068 		if (err)
3069 			break;
3070 	}
3071 
3072 	f2fs_up_write(&nm_i->nat_tree_lock);
3073 	/* Allow dirty nats by node block allocation in write_begin */
3074 
3075 	return err;
3076 }
3077 
__get_nat_bitmaps(struct f2fs_sb_info * sbi)3078 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3079 {
3080 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3081 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3082 	unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3083 	unsigned int i;
3084 	__u64 cp_ver = cur_cp_version(ckpt);
3085 	block_t nat_bits_addr;
3086 
3087 	if (!enabled_nat_bits(sbi, NULL))
3088 		return 0;
3089 
3090 	nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3091 	nm_i->nat_bits = f2fs_kvzalloc(sbi,
3092 			nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
3093 	if (!nm_i->nat_bits)
3094 		return -ENOMEM;
3095 
3096 	nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
3097 						nm_i->nat_bits_blocks;
3098 	for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3099 		struct page *page;
3100 
3101 		page = f2fs_get_meta_page(sbi, nat_bits_addr++);
3102 		if (IS_ERR(page))
3103 			return PTR_ERR(page);
3104 
3105 		memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
3106 					page_address(page), F2FS_BLKSIZE);
3107 		f2fs_put_page(page, 1);
3108 	}
3109 
3110 	cp_ver |= (cur_cp_crc(ckpt) << 32);
3111 	if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3112 		disable_nat_bits(sbi, true);
3113 		return 0;
3114 	}
3115 
3116 	nm_i->full_nat_bits = nm_i->nat_bits + 8;
3117 	nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3118 
3119 	f2fs_notice(sbi, "Found nat_bits in checkpoint");
3120 	return 0;
3121 }
3122 
load_free_nid_bitmap(struct f2fs_sb_info * sbi)3123 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3124 {
3125 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3126 	unsigned int i = 0;
3127 	nid_t nid, last_nid;
3128 
3129 	if (!enabled_nat_bits(sbi, NULL))
3130 		return;
3131 
3132 	for (i = 0; i < nm_i->nat_blocks; i++) {
3133 		i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3134 		if (i >= nm_i->nat_blocks)
3135 			break;
3136 
3137 		__set_bit_le(i, nm_i->nat_block_bitmap);
3138 
3139 		nid = i * NAT_ENTRY_PER_BLOCK;
3140 		last_nid = nid + NAT_ENTRY_PER_BLOCK;
3141 
3142 		spin_lock(&NM_I(sbi)->nid_list_lock);
3143 		for (; nid < last_nid; nid++)
3144 			update_free_nid_bitmap(sbi, nid, true, true);
3145 		spin_unlock(&NM_I(sbi)->nid_list_lock);
3146 	}
3147 
3148 	for (i = 0; i < nm_i->nat_blocks; i++) {
3149 		i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3150 		if (i >= nm_i->nat_blocks)
3151 			break;
3152 
3153 		__set_bit_le(i, nm_i->nat_block_bitmap);
3154 	}
3155 }
3156 
init_node_manager(struct f2fs_sb_info * sbi)3157 static int init_node_manager(struct f2fs_sb_info *sbi)
3158 {
3159 	struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3160 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3161 	unsigned char *version_bitmap;
3162 	unsigned int nat_segs;
3163 	int err;
3164 
3165 	nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3166 
3167 	/* segment_count_nat includes pair segment so divide to 2. */
3168 	nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3169 	nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3170 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3171 
3172 	/* not used nids: 0, node, meta, (and root counted as valid node) */
3173 	nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3174 						F2FS_RESERVED_NODE_NUM;
3175 	nm_i->nid_cnt[FREE_NID] = 0;
3176 	nm_i->nid_cnt[PREALLOC_NID] = 0;
3177 	nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3178 	nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3179 	nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3180 
3181 	INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3182 	INIT_LIST_HEAD(&nm_i->free_nid_list);
3183 	INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3184 	INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3185 	INIT_LIST_HEAD(&nm_i->nat_entries);
3186 	spin_lock_init(&nm_i->nat_list_lock);
3187 
3188 	mutex_init(&nm_i->build_lock);
3189 	spin_lock_init(&nm_i->nid_list_lock);
3190 	init_f2fs_rwsem(&nm_i->nat_tree_lock);
3191 
3192 	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3193 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3194 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3195 	nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3196 					GFP_KERNEL);
3197 	if (!nm_i->nat_bitmap)
3198 		return -ENOMEM;
3199 
3200 	err = __get_nat_bitmaps(sbi);
3201 	if (err)
3202 		return err;
3203 
3204 #ifdef CONFIG_F2FS_CHECK_FS
3205 	nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3206 					GFP_KERNEL);
3207 	if (!nm_i->nat_bitmap_mir)
3208 		return -ENOMEM;
3209 #endif
3210 
3211 	return 0;
3212 }
3213 
init_free_nid_cache(struct f2fs_sb_info * sbi)3214 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3215 {
3216 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3217 	int i;
3218 
3219 	nm_i->free_nid_bitmap =
3220 		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3221 					      nm_i->nat_blocks),
3222 			      GFP_KERNEL);
3223 	if (!nm_i->free_nid_bitmap)
3224 		return -ENOMEM;
3225 
3226 	for (i = 0; i < nm_i->nat_blocks; i++) {
3227 		nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3228 			f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3229 		if (!nm_i->free_nid_bitmap[i])
3230 			return -ENOMEM;
3231 	}
3232 
3233 	nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3234 								GFP_KERNEL);
3235 	if (!nm_i->nat_block_bitmap)
3236 		return -ENOMEM;
3237 
3238 	nm_i->free_nid_count =
3239 		f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3240 					      nm_i->nat_blocks),
3241 			      GFP_KERNEL);
3242 	if (!nm_i->free_nid_count)
3243 		return -ENOMEM;
3244 	return 0;
3245 }
3246 
f2fs_build_node_manager(struct f2fs_sb_info * sbi)3247 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3248 {
3249 	int err;
3250 
3251 	sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3252 							GFP_KERNEL);
3253 	if (!sbi->nm_info)
3254 		return -ENOMEM;
3255 
3256 	err = init_node_manager(sbi);
3257 	if (err)
3258 		return err;
3259 
3260 	err = init_free_nid_cache(sbi);
3261 	if (err)
3262 		return err;
3263 
3264 	/* load free nid status from nat_bits table */
3265 	load_free_nid_bitmap(sbi);
3266 
3267 	return f2fs_build_free_nids(sbi, true, true);
3268 }
3269 
f2fs_destroy_node_manager(struct f2fs_sb_info * sbi)3270 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3271 {
3272 	struct f2fs_nm_info *nm_i = NM_I(sbi);
3273 	struct free_nid *i, *next_i;
3274 	struct nat_entry *natvec[NATVEC_SIZE];
3275 	struct nat_entry_set *setvec[SETVEC_SIZE];
3276 	nid_t nid = 0;
3277 	unsigned int found;
3278 
3279 	if (!nm_i)
3280 		return;
3281 
3282 	/* destroy free nid list */
3283 	spin_lock(&nm_i->nid_list_lock);
3284 	list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3285 		__remove_free_nid(sbi, i, FREE_NID);
3286 		spin_unlock(&nm_i->nid_list_lock);
3287 		kmem_cache_free(free_nid_slab, i);
3288 		spin_lock(&nm_i->nid_list_lock);
3289 	}
3290 	f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3291 	f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3292 	f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3293 	spin_unlock(&nm_i->nid_list_lock);
3294 
3295 	/* destroy nat cache */
3296 	f2fs_down_write(&nm_i->nat_tree_lock);
3297 	while ((found = __gang_lookup_nat_cache(nm_i,
3298 					nid, NATVEC_SIZE, natvec))) {
3299 		unsigned idx;
3300 
3301 		nid = nat_get_nid(natvec[found - 1]) + 1;
3302 		for (idx = 0; idx < found; idx++) {
3303 			spin_lock(&nm_i->nat_list_lock);
3304 			list_del(&natvec[idx]->list);
3305 			spin_unlock(&nm_i->nat_list_lock);
3306 
3307 			__del_from_nat_cache(nm_i, natvec[idx]);
3308 		}
3309 	}
3310 	f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3311 
3312 	/* destroy nat set cache */
3313 	nid = 0;
3314 	while ((found = __gang_lookup_nat_set(nm_i,
3315 					nid, SETVEC_SIZE, setvec))) {
3316 		unsigned idx;
3317 
3318 		nid = setvec[found - 1]->set + 1;
3319 		for (idx = 0; idx < found; idx++) {
3320 			/* entry_cnt is not zero, when cp_error was occurred */
3321 			f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3322 			radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3323 			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3324 		}
3325 	}
3326 	f2fs_up_write(&nm_i->nat_tree_lock);
3327 
3328 	kvfree(nm_i->nat_block_bitmap);
3329 	if (nm_i->free_nid_bitmap) {
3330 		int i;
3331 
3332 		for (i = 0; i < nm_i->nat_blocks; i++)
3333 			kvfree(nm_i->free_nid_bitmap[i]);
3334 		kvfree(nm_i->free_nid_bitmap);
3335 	}
3336 	kvfree(nm_i->free_nid_count);
3337 
3338 	kvfree(nm_i->nat_bitmap);
3339 	kvfree(nm_i->nat_bits);
3340 #ifdef CONFIG_F2FS_CHECK_FS
3341 	kvfree(nm_i->nat_bitmap_mir);
3342 #endif
3343 	sbi->nm_info = NULL;
3344 	kfree(nm_i);
3345 }
3346 
f2fs_create_node_manager_caches(void)3347 int __init f2fs_create_node_manager_caches(void)
3348 {
3349 	nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3350 			sizeof(struct nat_entry));
3351 	if (!nat_entry_slab)
3352 		goto fail;
3353 
3354 	free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3355 			sizeof(struct free_nid));
3356 	if (!free_nid_slab)
3357 		goto destroy_nat_entry;
3358 
3359 	nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3360 			sizeof(struct nat_entry_set));
3361 	if (!nat_entry_set_slab)
3362 		goto destroy_free_nid;
3363 
3364 	fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3365 			sizeof(struct fsync_node_entry));
3366 	if (!fsync_node_entry_slab)
3367 		goto destroy_nat_entry_set;
3368 	return 0;
3369 
3370 destroy_nat_entry_set:
3371 	kmem_cache_destroy(nat_entry_set_slab);
3372 destroy_free_nid:
3373 	kmem_cache_destroy(free_nid_slab);
3374 destroy_nat_entry:
3375 	kmem_cache_destroy(nat_entry_slab);
3376 fail:
3377 	return -ENOMEM;
3378 }
3379 
f2fs_destroy_node_manager_caches(void)3380 void f2fs_destroy_node_manager_caches(void)
3381 {
3382 	kmem_cache_destroy(fsync_node_entry_slab);
3383 	kmem_cache_destroy(nat_entry_set_slab);
3384 	kmem_cache_destroy(free_nid_slab);
3385 	kmem_cache_destroy(nat_entry_slab);
3386 }
3387