• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/inode.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/backing-dev.h>
12 #include <linux/writeback.h>
13 
14 #include "f2fs.h"
15 #include "node.h"
16 #include "segment.h"
17 #include "xattr.h"
18 
19 #include <trace/events/f2fs.h>
20 
21 #ifdef CONFIG_F2FS_FS_COMPRESSION
22 extern const struct address_space_operations f2fs_compress_aops;
23 #endif
24 
f2fs_mark_inode_dirty_sync(struct inode * inode,bool sync)25 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
26 {
27 	if (is_inode_flag_set(inode, FI_NEW_INODE))
28 		return;
29 
30 	if (f2fs_inode_dirtied(inode, sync))
31 		return;
32 
33 	mark_inode_dirty_sync(inode);
34 }
35 
f2fs_set_inode_flags(struct inode * inode)36 void f2fs_set_inode_flags(struct inode *inode)
37 {
38 	unsigned int flags = F2FS_I(inode)->i_flags;
39 	unsigned int new_fl = 0;
40 
41 	if (flags & F2FS_SYNC_FL)
42 		new_fl |= S_SYNC;
43 	if (flags & F2FS_APPEND_FL)
44 		new_fl |= S_APPEND;
45 	if (flags & F2FS_IMMUTABLE_FL)
46 		new_fl |= S_IMMUTABLE;
47 	if (flags & F2FS_NOATIME_FL)
48 		new_fl |= S_NOATIME;
49 	if (flags & F2FS_DIRSYNC_FL)
50 		new_fl |= S_DIRSYNC;
51 	if (file_is_encrypt(inode))
52 		new_fl |= S_ENCRYPTED;
53 	if (file_is_verity(inode))
54 		new_fl |= S_VERITY;
55 	if (flags & F2FS_CASEFOLD_FL)
56 		new_fl |= S_CASEFOLD;
57 	inode_set_flags(inode, new_fl,
58 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
59 			S_ENCRYPTED|S_VERITY|S_CASEFOLD);
60 }
61 
__get_inode_rdev(struct inode * inode,struct f2fs_inode * ri)62 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
63 {
64 	int extra_size = get_extra_isize(inode);
65 
66 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
67 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
68 		if (ri->i_addr[extra_size])
69 			inode->i_rdev = old_decode_dev(
70 				le32_to_cpu(ri->i_addr[extra_size]));
71 		else
72 			inode->i_rdev = new_decode_dev(
73 				le32_to_cpu(ri->i_addr[extra_size + 1]));
74 	}
75 }
76 
__written_first_block(struct f2fs_sb_info * sbi,struct f2fs_inode * ri)77 static int __written_first_block(struct f2fs_sb_info *sbi,
78 					struct f2fs_inode *ri)
79 {
80 	block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
81 
82 	if (!__is_valid_data_blkaddr(addr))
83 		return 1;
84 	if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
85 		return -EFSCORRUPTED;
86 	return 0;
87 }
88 
__set_inode_rdev(struct inode * inode,struct f2fs_inode * ri)89 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
90 {
91 	int extra_size = get_extra_isize(inode);
92 
93 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
94 		if (old_valid_dev(inode->i_rdev)) {
95 			ri->i_addr[extra_size] =
96 				cpu_to_le32(old_encode_dev(inode->i_rdev));
97 			ri->i_addr[extra_size + 1] = 0;
98 		} else {
99 			ri->i_addr[extra_size] = 0;
100 			ri->i_addr[extra_size + 1] =
101 				cpu_to_le32(new_encode_dev(inode->i_rdev));
102 			ri->i_addr[extra_size + 2] = 0;
103 		}
104 	}
105 }
106 
__recover_inline_status(struct inode * inode,struct page * ipage)107 static void __recover_inline_status(struct inode *inode, struct page *ipage)
108 {
109 	void *inline_data = inline_data_addr(inode, ipage);
110 	__le32 *start = inline_data;
111 	__le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
112 
113 	while (start < end) {
114 		if (*start++) {
115 			f2fs_wait_on_page_writeback(ipage, NODE, true, true);
116 
117 			set_inode_flag(inode, FI_DATA_EXIST);
118 			set_raw_inline(inode, F2FS_INODE(ipage));
119 			set_page_dirty(ipage);
120 			return;
121 		}
122 	}
123 	return;
124 }
125 
f2fs_enable_inode_chksum(struct f2fs_sb_info * sbi,struct page * page)126 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
127 {
128 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
129 
130 	if (!f2fs_sb_has_inode_chksum(sbi))
131 		return false;
132 
133 	if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
134 		return false;
135 
136 	if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
137 				i_inode_checksum))
138 		return false;
139 
140 	return true;
141 }
142 
f2fs_inode_chksum(struct f2fs_sb_info * sbi,struct page * page)143 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
144 {
145 	struct f2fs_node *node = F2FS_NODE(page);
146 	struct f2fs_inode *ri = &node->i;
147 	__le32 ino = node->footer.ino;
148 	__le32 gen = ri->i_generation;
149 	__u32 chksum, chksum_seed;
150 	__u32 dummy_cs = 0;
151 	unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
152 	unsigned int cs_size = sizeof(dummy_cs);
153 
154 	chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
155 							sizeof(ino));
156 	chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
157 
158 	chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
159 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
160 	offset += cs_size;
161 	chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
162 						F2FS_BLKSIZE - offset);
163 	return chksum;
164 }
165 
f2fs_inode_chksum_verify(struct f2fs_sb_info * sbi,struct page * page)166 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
167 {
168 	struct f2fs_inode *ri;
169 	__u32 provided, calculated;
170 
171 	if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
172 		return true;
173 
174 #ifdef CONFIG_F2FS_CHECK_FS
175 	if (!f2fs_enable_inode_chksum(sbi, page))
176 #else
177 	if (!f2fs_enable_inode_chksum(sbi, page) ||
178 			PageDirty(page) || PageWriteback(page))
179 #endif
180 		return true;
181 
182 	ri = &F2FS_NODE(page)->i;
183 	provided = le32_to_cpu(ri->i_inode_checksum);
184 	calculated = f2fs_inode_chksum(sbi, page);
185 
186 	if (provided != calculated)
187 		f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
188 			  page->index, ino_of_node(page), provided, calculated);
189 
190 	return provided == calculated;
191 }
192 
f2fs_inode_chksum_set(struct f2fs_sb_info * sbi,struct page * page)193 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
194 {
195 	struct f2fs_inode *ri = &F2FS_NODE(page)->i;
196 
197 	if (!f2fs_enable_inode_chksum(sbi, page))
198 		return;
199 
200 	ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
201 }
202 
sanity_check_inode(struct inode * inode,struct page * node_page)203 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
204 {
205 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
206 	struct f2fs_inode_info *fi = F2FS_I(inode);
207 	struct f2fs_inode *ri = F2FS_INODE(node_page);
208 	unsigned long long iblocks;
209 
210 	iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
211 	if (!iblocks) {
212 		set_sbi_flag(sbi, SBI_NEED_FSCK);
213 		f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
214 			  __func__, inode->i_ino, iblocks);
215 		return false;
216 	}
217 
218 	if (ino_of_node(node_page) != nid_of_node(node_page)) {
219 		set_sbi_flag(sbi, SBI_NEED_FSCK);
220 		f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
221 			  __func__, inode->i_ino,
222 			  ino_of_node(node_page), nid_of_node(node_page));
223 		return false;
224 	}
225 
226 	if (f2fs_sb_has_flexible_inline_xattr(sbi)
227 			&& !f2fs_has_extra_attr(inode)) {
228 		set_sbi_flag(sbi, SBI_NEED_FSCK);
229 		f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
230 			  __func__, inode->i_ino);
231 		return false;
232 	}
233 
234 	if (f2fs_has_extra_attr(inode) &&
235 			!f2fs_sb_has_extra_attr(sbi)) {
236 		set_sbi_flag(sbi, SBI_NEED_FSCK);
237 		f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
238 			  __func__, inode->i_ino);
239 		return false;
240 	}
241 
242 	if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
243 			fi->i_extra_isize % sizeof(__le32)) {
244 		set_sbi_flag(sbi, SBI_NEED_FSCK);
245 		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
246 			  __func__, inode->i_ino, fi->i_extra_isize,
247 			  F2FS_TOTAL_EXTRA_ATTR_SIZE);
248 		return false;
249 	}
250 
251 	if (f2fs_has_extra_attr(inode) &&
252 		f2fs_sb_has_flexible_inline_xattr(sbi) &&
253 		f2fs_has_inline_xattr(inode) &&
254 		(!fi->i_inline_xattr_size ||
255 		fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
256 		set_sbi_flag(sbi, SBI_NEED_FSCK);
257 		f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
258 			  __func__, inode->i_ino, fi->i_inline_xattr_size,
259 			  MAX_INLINE_XATTR_SIZE);
260 		return false;
261 	}
262 
263 	if (fi->extent_tree[EX_READ]) {
264 		struct extent_info *ei = &fi->extent_tree[EX_READ]->largest;
265 
266 		if (ei->len &&
267 			(!f2fs_is_valid_blkaddr(sbi, ei->blk,
268 						DATA_GENERIC_ENHANCE) ||
269 			!f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
270 						DATA_GENERIC_ENHANCE))) {
271 			set_sbi_flag(sbi, SBI_NEED_FSCK);
272 			f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
273 				  __func__, inode->i_ino,
274 				  ei->blk, ei->fofs, ei->len);
275 			return false;
276 		}
277 	}
278 
279 	if (f2fs_sanity_check_inline_data(inode)) {
280 		set_sbi_flag(sbi, SBI_NEED_FSCK);
281 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
282 			  __func__, inode->i_ino, inode->i_mode);
283 		return false;
284 	}
285 
286 	if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
287 		set_sbi_flag(sbi, SBI_NEED_FSCK);
288 		f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
289 			  __func__, inode->i_ino, inode->i_mode);
290 		return false;
291 	}
292 
293 	if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
294 		set_sbi_flag(sbi, SBI_NEED_FSCK);
295 		f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
296 			  __func__, inode->i_ino);
297 		return false;
298 	}
299 
300 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
301 			fi->i_flags & F2FS_COMPR_FL &&
302 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
303 						i_log_cluster_size)) {
304 		if (ri->i_compress_algorithm >= COMPRESS_MAX) {
305 			set_sbi_flag(sbi, SBI_NEED_FSCK);
306 			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
307 				"compress algorithm: %u, run fsck to fix",
308 				  __func__, inode->i_ino,
309 				  ri->i_compress_algorithm);
310 			return false;
311 		}
312 		if (le64_to_cpu(ri->i_compr_blocks) >
313 				SECTOR_TO_BLOCK(inode->i_blocks)) {
314 			set_sbi_flag(sbi, SBI_NEED_FSCK);
315 			f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
316 				"i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
317 				  __func__, inode->i_ino,
318 				  le64_to_cpu(ri->i_compr_blocks),
319 				  SECTOR_TO_BLOCK(inode->i_blocks));
320 			return false;
321 		}
322 		if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
323 			ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
324 			set_sbi_flag(sbi, SBI_NEED_FSCK);
325 			f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
326 				"log cluster size: %u, run fsck to fix",
327 				  __func__, inode->i_ino,
328 				  ri->i_log_cluster_size);
329 			return false;
330 		}
331 	}
332 
333 	return true;
334 }
335 
do_read_inode(struct inode * inode)336 static int do_read_inode(struct inode *inode)
337 {
338 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
339 	struct f2fs_inode_info *fi = F2FS_I(inode);
340 	struct page *node_page;
341 	struct f2fs_inode *ri;
342 	projid_t i_projid;
343 	int err;
344 
345 	/* Check if ino is within scope */
346 	if (f2fs_check_nid_range(sbi, inode->i_ino))
347 		return -EINVAL;
348 
349 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
350 	if (IS_ERR(node_page))
351 		return PTR_ERR(node_page);
352 
353 	ri = F2FS_INODE(node_page);
354 
355 	inode->i_mode = le16_to_cpu(ri->i_mode);
356 	i_uid_write(inode, le32_to_cpu(ri->i_uid));
357 	i_gid_write(inode, le32_to_cpu(ri->i_gid));
358 	set_nlink(inode, le32_to_cpu(ri->i_links));
359 	inode->i_size = le64_to_cpu(ri->i_size);
360 	inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
361 
362 	inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
363 	inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
364 	inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
365 	inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
366 	inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
367 	inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
368 	inode->i_generation = le32_to_cpu(ri->i_generation);
369 	if (S_ISDIR(inode->i_mode))
370 		fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
371 	else if (S_ISREG(inode->i_mode))
372 		fi->i_gc_failures[GC_FAILURE_PIN] =
373 					le16_to_cpu(ri->i_gc_failures);
374 	fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
375 	fi->i_flags = le32_to_cpu(ri->i_flags);
376 	if (S_ISREG(inode->i_mode))
377 		fi->i_flags &= ~F2FS_PROJINHERIT_FL;
378 	bitmap_zero(fi->flags, FI_MAX);
379 	fi->i_advise = ri->i_advise;
380 	fi->i_pino = le32_to_cpu(ri->i_pino);
381 	fi->i_dir_level = ri->i_dir_level;
382 
383 	get_inline_info(inode, ri);
384 
385 	fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
386 					le16_to_cpu(ri->i_extra_isize) : 0;
387 
388 	if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
389 		fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
390 	} else if (f2fs_has_inline_xattr(inode) ||
391 				f2fs_has_inline_dentry(inode)) {
392 		fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
393 	} else {
394 
395 		/*
396 		 * Previous inline data or directory always reserved 200 bytes
397 		 * in inode layout, even if inline_xattr is disabled. In order
398 		 * to keep inline_dentry's structure for backward compatibility,
399 		 * we get the space back only from inline_data.
400 		 */
401 		fi->i_inline_xattr_size = 0;
402 	}
403 
404 	/* check data exist */
405 	if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
406 		__recover_inline_status(inode, node_page);
407 
408 	/* try to recover cold bit for non-dir inode */
409 	if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
410 		f2fs_wait_on_page_writeback(node_page, NODE, true, true);
411 		set_cold_node(node_page, false);
412 		set_page_dirty(node_page);
413 	}
414 
415 	/* get rdev by using inline_info */
416 	__get_inode_rdev(inode, ri);
417 
418 	if (S_ISREG(inode->i_mode)) {
419 		err = __written_first_block(sbi, ri);
420 		if (err < 0) {
421 			f2fs_put_page(node_page, 1);
422 			return err;
423 		}
424 		if (!err)
425 			set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
426 	}
427 
428 	if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
429 		fi->last_disk_size = inode->i_size;
430 
431 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
432 		set_inode_flag(inode, FI_PROJ_INHERIT);
433 
434 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
435 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
436 		i_projid = (projid_t)le32_to_cpu(ri->i_projid);
437 	else
438 		i_projid = F2FS_DEF_PROJID;
439 	fi->i_projid = make_kprojid(&init_user_ns, i_projid);
440 
441 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
442 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
443 		fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
444 		fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
445 	}
446 
447 	if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
448 					(fi->i_flags & F2FS_COMPR_FL)) {
449 		if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
450 					i_log_cluster_size)) {
451 			atomic_set(&fi->i_compr_blocks,
452 					le64_to_cpu(ri->i_compr_blocks));
453 			fi->i_compress_algorithm = ri->i_compress_algorithm;
454 			fi->i_log_cluster_size = ri->i_log_cluster_size;
455 			fi->i_compress_flag = le16_to_cpu(ri->i_compress_flag);
456 			fi->i_cluster_size = 1 << fi->i_log_cluster_size;
457 			set_inode_flag(inode, FI_COMPRESSED_FILE);
458 		}
459 	}
460 
461 	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
462 	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
463 	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
464 	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
465 
466 	/* Need all the flag bits */
467 	f2fs_init_read_extent_tree(inode, node_page);
468 	f2fs_init_age_extent_tree(inode);
469 
470 	if (!sanity_check_inode(inode, node_page)) {
471 		f2fs_put_page(node_page, 1);
472 		return -EFSCORRUPTED;
473 	}
474 
475 	f2fs_put_page(node_page, 1);
476 
477 	stat_inc_inline_xattr(inode);
478 	stat_inc_inline_inode(inode);
479 	stat_inc_inline_dir(inode);
480 	stat_inc_compr_inode(inode);
481 	stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
482 
483 	return 0;
484 }
485 
f2fs_iget(struct super_block * sb,unsigned long ino)486 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
487 {
488 	struct f2fs_sb_info *sbi = F2FS_SB(sb);
489 	struct inode *inode;
490 	int ret = 0;
491 
492 	inode = iget_locked(sb, ino);
493 	if (!inode)
494 		return ERR_PTR(-ENOMEM);
495 
496 	if (!(inode->i_state & I_NEW)) {
497 		trace_f2fs_iget(inode);
498 		return inode;
499 	}
500 	if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
501 		goto make_now;
502 
503 #ifdef CONFIG_F2FS_FS_COMPRESSION
504 	if (ino == F2FS_COMPRESS_INO(sbi))
505 		goto make_now;
506 #endif
507 
508 	ret = do_read_inode(inode);
509 	if (ret)
510 		goto bad_inode;
511 make_now:
512 	if (ino == F2FS_NODE_INO(sbi)) {
513 		inode->i_mapping->a_ops = &f2fs_node_aops;
514 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
515 	} else if (ino == F2FS_META_INO(sbi)) {
516 		inode->i_mapping->a_ops = &f2fs_meta_aops;
517 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
518 	} else if (ino == F2FS_COMPRESS_INO(sbi)) {
519 #ifdef CONFIG_F2FS_FS_COMPRESSION
520 		inode->i_mapping->a_ops = &f2fs_compress_aops;
521 #endif
522 		mapping_set_gfp_mask(inode->i_mapping,
523 			GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
524 	} else if (S_ISREG(inode->i_mode)) {
525 		inode->i_op = &f2fs_file_inode_operations;
526 		inode->i_fop = &f2fs_file_operations;
527 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
528 	} else if (S_ISDIR(inode->i_mode)) {
529 		inode->i_op = &f2fs_dir_inode_operations;
530 		inode->i_fop = &f2fs_dir_operations;
531 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
532 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
533 	} else if (S_ISLNK(inode->i_mode)) {
534 		if (file_is_encrypt(inode))
535 			inode->i_op = &f2fs_encrypted_symlink_inode_operations;
536 		else
537 			inode->i_op = &f2fs_symlink_inode_operations;
538 		inode_nohighmem(inode);
539 		inode->i_mapping->a_ops = &f2fs_dblock_aops;
540 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
541 			S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
542 		inode->i_op = &f2fs_special_inode_operations;
543 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
544 	} else {
545 		ret = -EIO;
546 		goto bad_inode;
547 	}
548 	f2fs_set_inode_flags(inode);
549 	unlock_new_inode(inode);
550 	trace_f2fs_iget(inode);
551 	return inode;
552 
553 bad_inode:
554 	f2fs_inode_synced(inode);
555 	iget_failed(inode);
556 	trace_f2fs_iget_exit(inode, ret);
557 	return ERR_PTR(ret);
558 }
559 
f2fs_iget_retry(struct super_block * sb,unsigned long ino)560 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
561 {
562 	struct inode *inode;
563 retry:
564 	inode = f2fs_iget(sb, ino);
565 	if (IS_ERR(inode)) {
566 		if (PTR_ERR(inode) == -ENOMEM) {
567 			congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
568 			goto retry;
569 		}
570 	}
571 	return inode;
572 }
573 
f2fs_update_inode(struct inode * inode,struct page * node_page)574 void f2fs_update_inode(struct inode *inode, struct page *node_page)
575 {
576 	struct f2fs_inode *ri;
577 	struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ];
578 
579 	f2fs_wait_on_page_writeback(node_page, NODE, true, true);
580 	set_page_dirty(node_page);
581 
582 	f2fs_inode_synced(inode);
583 
584 	ri = F2FS_INODE(node_page);
585 
586 	ri->i_mode = cpu_to_le16(inode->i_mode);
587 	ri->i_advise = F2FS_I(inode)->i_advise;
588 	ri->i_uid = cpu_to_le32(i_uid_read(inode));
589 	ri->i_gid = cpu_to_le32(i_gid_read(inode));
590 	ri->i_links = cpu_to_le32(inode->i_nlink);
591 	ri->i_size = cpu_to_le64(i_size_read(inode));
592 	ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
593 
594 	if (et) {
595 		read_lock(&et->lock);
596 		set_raw_read_extent(&et->largest, &ri->i_ext);
597 		read_unlock(&et->lock);
598 	} else {
599 		memset(&ri->i_ext, 0, sizeof(ri->i_ext));
600 	}
601 	set_raw_inline(inode, ri);
602 
603 	ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
604 	ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
605 	ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
606 	ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
607 	ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
608 	ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
609 	if (S_ISDIR(inode->i_mode))
610 		ri->i_current_depth =
611 			cpu_to_le32(F2FS_I(inode)->i_current_depth);
612 	else if (S_ISREG(inode->i_mode))
613 		ri->i_gc_failures =
614 			cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
615 	ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
616 	ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
617 	ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
618 	ri->i_generation = cpu_to_le32(inode->i_generation);
619 	ri->i_dir_level = F2FS_I(inode)->i_dir_level;
620 
621 	if (f2fs_has_extra_attr(inode)) {
622 		ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
623 
624 		if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
625 			ri->i_inline_xattr_size =
626 				cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
627 
628 		if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
629 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
630 								i_projid)) {
631 			projid_t i_projid;
632 
633 			i_projid = from_kprojid(&init_user_ns,
634 						F2FS_I(inode)->i_projid);
635 			ri->i_projid = cpu_to_le32(i_projid);
636 		}
637 
638 		if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
639 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
640 								i_crtime)) {
641 			ri->i_crtime =
642 				cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
643 			ri->i_crtime_nsec =
644 				cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
645 		}
646 
647 		if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
648 			F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
649 							i_log_cluster_size)) {
650 			ri->i_compr_blocks =
651 				cpu_to_le64(atomic_read(
652 					&F2FS_I(inode)->i_compr_blocks));
653 			ri->i_compress_algorithm =
654 				F2FS_I(inode)->i_compress_algorithm;
655 			ri->i_compress_flag =
656 				cpu_to_le16(F2FS_I(inode)->i_compress_flag);
657 			ri->i_log_cluster_size =
658 				F2FS_I(inode)->i_log_cluster_size;
659 		}
660 	}
661 
662 	__set_inode_rdev(inode, ri);
663 
664 	/* deleted inode */
665 	if (inode->i_nlink == 0)
666 		clear_page_private_inline(node_page);
667 
668 	F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
669 	F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
670 	F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
671 	F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
672 
673 #ifdef CONFIG_F2FS_CHECK_FS
674 	f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
675 #endif
676 }
677 
f2fs_update_inode_page(struct inode * inode)678 void f2fs_update_inode_page(struct inode *inode)
679 {
680 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
681 	struct page *node_page;
682 retry:
683 	node_page = f2fs_get_node_page(sbi, inode->i_ino);
684 	if (IS_ERR(node_page)) {
685 		int err = PTR_ERR(node_page);
686 
687 		if (err == -ENOMEM) {
688 			cond_resched();
689 			goto retry;
690 		} else if (err != -ENOENT) {
691 			f2fs_stop_checkpoint(sbi, false,
692 					STOP_CP_REASON_UPDATE_INODE);
693 		}
694 		return;
695 	}
696 	f2fs_update_inode(inode, node_page);
697 	f2fs_put_page(node_page, 1);
698 }
699 
f2fs_write_inode(struct inode * inode,struct writeback_control * wbc)700 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
701 {
702 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
703 
704 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
705 			inode->i_ino == F2FS_META_INO(sbi))
706 		return 0;
707 
708 	/*
709 	 * atime could be updated without dirtying f2fs inode in lazytime mode
710 	 */
711 	if (f2fs_is_time_consistent(inode) &&
712 		!is_inode_flag_set(inode, FI_DIRTY_INODE))
713 		return 0;
714 
715 	if (!f2fs_is_checkpoint_ready(sbi))
716 		return -ENOSPC;
717 
718 	/*
719 	 * We need to balance fs here to prevent from producing dirty node pages
720 	 * during the urgent cleaning time when running out of free sections.
721 	 */
722 	f2fs_update_inode_page(inode);
723 	if (wbc && wbc->nr_to_write)
724 		f2fs_balance_fs(sbi, true);
725 	return 0;
726 }
727 
728 /*
729  * Called at the last iput() if i_nlink is zero
730  */
f2fs_evict_inode(struct inode * inode)731 void f2fs_evict_inode(struct inode *inode)
732 {
733 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
734 	nid_t xnid = F2FS_I(inode)->i_xattr_nid;
735 	int err = 0;
736 
737 	/* some remained atomic pages should discarded */
738 	if (f2fs_is_atomic_file(inode))
739 		f2fs_drop_inmem_pages(inode);
740 
741 	trace_f2fs_evict_inode(inode);
742 	truncate_inode_pages_final(&inode->i_data);
743 
744 	if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
745 		f2fs_invalidate_compress_pages(sbi, inode->i_ino);
746 
747 	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
748 			inode->i_ino == F2FS_META_INO(sbi) ||
749 			inode->i_ino == F2FS_COMPRESS_INO(sbi))
750 		goto out_clear;
751 
752 	f2fs_bug_on(sbi, get_dirty_pages(inode));
753 	f2fs_remove_dirty_inode(inode);
754 
755 	f2fs_destroy_extent_tree(inode);
756 
757 	if (inode->i_nlink || is_bad_inode(inode))
758 		goto no_delete;
759 
760 	err = dquot_initialize(inode);
761 	if (err) {
762 		err = 0;
763 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
764 	}
765 
766 	f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
767 	f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
768 	f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
769 
770 	if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
771 		sb_start_intwrite(inode->i_sb);
772 	set_inode_flag(inode, FI_NO_ALLOC);
773 	i_size_write(inode, 0);
774 retry:
775 	if (F2FS_HAS_BLOCKS(inode))
776 		err = f2fs_truncate(inode);
777 
778 	if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
779 		f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
780 		err = -EIO;
781 	}
782 
783 	if (!err) {
784 		f2fs_lock_op(sbi);
785 		err = f2fs_remove_inode_page(inode);
786 		f2fs_unlock_op(sbi);
787 		if (err == -ENOENT) {
788 			err = 0;
789 
790 			/*
791 			 * in fuzzed image, another node may has the same
792 			 * block address as inode's, if it was truncated
793 			 * previously, truncation of inode node will fail.
794 			 */
795 			if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
796 				f2fs_warn(F2FS_I_SB(inode),
797 					"f2fs_evict_inode: inconsistent node id, ino:%lu",
798 					inode->i_ino);
799 				f2fs_inode_synced(inode);
800 				set_sbi_flag(sbi, SBI_NEED_FSCK);
801 			}
802 		}
803 	}
804 
805 	/* give more chances, if ENOMEM case */
806 	if (err == -ENOMEM) {
807 		err = 0;
808 		goto retry;
809 	}
810 
811 	if (err) {
812 		f2fs_update_inode_page(inode);
813 		if (dquot_initialize_needed(inode))
814 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
815 	}
816 	if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
817 		sb_end_intwrite(inode->i_sb);
818 no_delete:
819 	dquot_drop(inode);
820 
821 	stat_dec_inline_xattr(inode);
822 	stat_dec_inline_dir(inode);
823 	stat_dec_inline_inode(inode);
824 	stat_dec_compr_inode(inode);
825 	stat_sub_compr_blocks(inode,
826 			atomic_read(&F2FS_I(inode)->i_compr_blocks));
827 
828 	if (likely(!f2fs_cp_error(sbi) &&
829 				!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
830 		f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
831 	else
832 		f2fs_inode_synced(inode);
833 
834 	/* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
835 	if (inode->i_ino)
836 		invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
837 							inode->i_ino);
838 	if (xnid)
839 		invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
840 	if (inode->i_nlink) {
841 		if (is_inode_flag_set(inode, FI_APPEND_WRITE))
842 			f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
843 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
844 			f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
845 	}
846 	if (is_inode_flag_set(inode, FI_FREE_NID)) {
847 		f2fs_alloc_nid_failed(sbi, inode->i_ino);
848 		clear_inode_flag(inode, FI_FREE_NID);
849 	} else {
850 		/*
851 		 * If xattr nid is corrupted, we can reach out error condition,
852 		 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
853 		 * In that case, f2fs_check_nid_range() is enough to give a clue.
854 		 */
855 	}
856 out_clear:
857 	fscrypt_put_encryption_info(inode);
858 	fsverity_cleanup_inode(inode);
859 	clear_inode(inode);
860 }
861 
862 /* caller should call f2fs_lock_op() */
f2fs_handle_failed_inode(struct inode * inode)863 void f2fs_handle_failed_inode(struct inode *inode)
864 {
865 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
866 	struct node_info ni;
867 	int err;
868 
869 	/*
870 	 * clear nlink of inode in order to release resource of inode
871 	 * immediately.
872 	 */
873 	clear_nlink(inode);
874 
875 	/*
876 	 * we must call this to avoid inode being remained as dirty, resulting
877 	 * in a panic when flushing dirty inodes in gdirty_list.
878 	 */
879 	f2fs_update_inode_page(inode);
880 	f2fs_inode_synced(inode);
881 
882 	/* don't make bad inode, since it becomes a regular file. */
883 	unlock_new_inode(inode);
884 
885 	/*
886 	 * Note: we should add inode to orphan list before f2fs_unlock_op()
887 	 * so we can prevent losing this orphan when encoutering checkpoint
888 	 * and following suddenly power-off.
889 	 */
890 	err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
891 	if (err) {
892 		set_sbi_flag(sbi, SBI_NEED_FSCK);
893 		set_inode_flag(inode, FI_FREE_NID);
894 		f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
895 		goto out;
896 	}
897 
898 	if (ni.blk_addr != NULL_ADDR) {
899 		err = f2fs_acquire_orphan_inode(sbi);
900 		if (err) {
901 			set_sbi_flag(sbi, SBI_NEED_FSCK);
902 			f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
903 		} else {
904 			f2fs_add_orphan_inode(inode);
905 		}
906 		f2fs_alloc_nid_done(sbi, inode->i_ino);
907 	} else {
908 		set_inode_flag(inode, FI_FREE_NID);
909 	}
910 
911 out:
912 	f2fs_unlock_op(sbi);
913 
914 	/* iput will drop the inode object */
915 	iput(inode);
916 }
917