• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/file.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 
26 #include "f2fs.h"
27 #include "node.h"
28 #include "segment.h"
29 #include "xattr.h"
30 #include "acl.h"
31 #include "gc.h"
32 #include "trace.h"
33 #include <trace/events/f2fs.h>
34 #include <uapi/linux/f2fs.h>
35 
f2fs_filemap_fault(struct vm_fault * vmf)36 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
37 {
38 	struct inode *inode = file_inode(vmf->vma->vm_file);
39 	vm_fault_t ret;
40 
41 	down_read(&F2FS_I(inode)->i_mmap_sem);
42 	ret = filemap_fault(vmf);
43 	up_read(&F2FS_I(inode)->i_mmap_sem);
44 
45 	if (!ret)
46 		f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
47 							F2FS_BLKSIZE);
48 
49 	trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
50 
51 	return ret;
52 }
53 
f2fs_vm_page_mkwrite(struct vm_fault * vmf)54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
55 {
56 	struct page *page = vmf->page;
57 	struct inode *inode = file_inode(vmf->vma->vm_file);
58 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 	struct dnode_of_data dn;
60 	bool need_alloc = true;
61 	int err = 0;
62 
63 	if (unlikely(IS_IMMUTABLE(inode)))
64 		return VM_FAULT_SIGBUS;
65 
66 	if (unlikely(f2fs_cp_error(sbi))) {
67 		err = -EIO;
68 		goto err;
69 	}
70 
71 	if (!f2fs_is_checkpoint_ready(sbi)) {
72 		err = -ENOSPC;
73 		goto err;
74 	}
75 
76 #ifdef CONFIG_F2FS_FS_COMPRESSION
77 	if (f2fs_compressed_file(inode)) {
78 		int ret = f2fs_is_compressed_cluster(inode, page->index);
79 
80 		if (ret < 0) {
81 			err = ret;
82 			goto err;
83 		} else if (ret) {
84 			if (ret < F2FS_I(inode)->i_cluster_size) {
85 				err = -EAGAIN;
86 				goto err;
87 			}
88 			need_alloc = false;
89 		}
90 	}
91 #endif
92 	/* should do out of any locked page */
93 	if (need_alloc)
94 		f2fs_balance_fs(sbi, true);
95 
96 	sb_start_pagefault(inode->i_sb);
97 
98 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
99 
100 	file_update_time(vmf->vma->vm_file);
101 	down_read(&F2FS_I(inode)->i_mmap_sem);
102 	lock_page(page);
103 	if (unlikely(page->mapping != inode->i_mapping ||
104 			page_offset(page) > i_size_read(inode) ||
105 			!PageUptodate(page))) {
106 		unlock_page(page);
107 		err = -EFAULT;
108 		goto out_sem;
109 	}
110 
111 	if (need_alloc) {
112 		/* block allocation */
113 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
114 		set_new_dnode(&dn, inode, NULL, NULL, 0);
115 		err = f2fs_get_block(&dn, page->index);
116 		f2fs_put_dnode(&dn);
117 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
118 	}
119 
120 #ifdef CONFIG_F2FS_FS_COMPRESSION
121 	if (!need_alloc) {
122 		set_new_dnode(&dn, inode, NULL, NULL, 0);
123 		err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
124 		f2fs_put_dnode(&dn);
125 	}
126 #endif
127 	if (err) {
128 		unlock_page(page);
129 		goto out_sem;
130 	}
131 
132 	f2fs_wait_on_page_writeback(page, DATA, false, true);
133 
134 	/* wait for GCed page writeback via META_MAPPING */
135 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
136 
137 	/*
138 	 * check to see if the page is mapped already (no holes)
139 	 */
140 	if (PageMappedToDisk(page))
141 		goto out_sem;
142 
143 	/* page is wholly or partially inside EOF */
144 	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
145 						i_size_read(inode)) {
146 		loff_t offset;
147 
148 		offset = i_size_read(inode) & ~PAGE_MASK;
149 		zero_user_segment(page, offset, PAGE_SIZE);
150 	}
151 	set_page_dirty(page);
152 	if (!PageUptodate(page))
153 		SetPageUptodate(page);
154 
155 	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
156 	f2fs_update_time(sbi, REQ_TIME);
157 
158 	trace_f2fs_vm_page_mkwrite(page, DATA);
159 out_sem:
160 	up_read(&F2FS_I(inode)->i_mmap_sem);
161 
162 	sb_end_pagefault(inode->i_sb);
163 err:
164 	return block_page_mkwrite_return(err);
165 }
166 
167 static const struct vm_operations_struct f2fs_file_vm_ops = {
168 	.fault		= f2fs_filemap_fault,
169 	.map_pages	= filemap_map_pages,
170 	.page_mkwrite	= f2fs_vm_page_mkwrite,
171 };
172 
get_parent_ino(struct inode * inode,nid_t * pino)173 static int get_parent_ino(struct inode *inode, nid_t *pino)
174 {
175 	struct dentry *dentry;
176 
177 	/*
178 	 * Make sure to get the non-deleted alias.  The alias associated with
179 	 * the open file descriptor being fsync()'ed may be deleted already.
180 	 */
181 	dentry = d_find_alias(inode);
182 	if (!dentry)
183 		return 0;
184 
185 	*pino = parent_ino(dentry);
186 	dput(dentry);
187 	return 1;
188 }
189 
need_do_checkpoint(struct inode * inode)190 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
191 {
192 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
193 	enum cp_reason_type cp_reason = CP_NO_NEEDED;
194 
195 	if (!S_ISREG(inode->i_mode))
196 		cp_reason = CP_NON_REGULAR;
197 	else if (f2fs_compressed_file(inode))
198 		cp_reason = CP_COMPRESSED;
199 	else if (inode->i_nlink != 1)
200 		cp_reason = CP_HARDLINK;
201 	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
202 		cp_reason = CP_SB_NEED_CP;
203 	else if (file_wrong_pino(inode))
204 		cp_reason = CP_WRONG_PINO;
205 	else if (!f2fs_space_for_roll_forward(sbi))
206 		cp_reason = CP_NO_SPC_ROLL;
207 	else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
208 		cp_reason = CP_NODE_NEED_CP;
209 	else if (test_opt(sbi, FASTBOOT))
210 		cp_reason = CP_FASTBOOT_MODE;
211 	else if (F2FS_OPTION(sbi).active_logs == 2)
212 		cp_reason = CP_SPEC_LOG_NUM;
213 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
214 		f2fs_need_dentry_mark(sbi, inode->i_ino) &&
215 		f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
216 							TRANS_DIR_INO))
217 		cp_reason = CP_RECOVER_DIR;
218 
219 	return cp_reason;
220 }
221 
need_inode_page_update(struct f2fs_sb_info * sbi,nid_t ino)222 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
223 {
224 	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
225 	bool ret = false;
226 	/* But we need to avoid that there are some inode updates */
227 	if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
228 		ret = true;
229 	f2fs_put_page(i, 0);
230 	return ret;
231 }
232 
try_to_fix_pino(struct inode * inode)233 static void try_to_fix_pino(struct inode *inode)
234 {
235 	struct f2fs_inode_info *fi = F2FS_I(inode);
236 	nid_t pino;
237 
238 	down_write(&fi->i_sem);
239 	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
240 			get_parent_ino(inode, &pino)) {
241 		f2fs_i_pino_write(inode, pino);
242 		file_got_pino(inode);
243 	}
244 	up_write(&fi->i_sem);
245 }
246 
f2fs_do_sync_file(struct file * file,loff_t start,loff_t end,int datasync,bool atomic)247 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
248 						int datasync, bool atomic)
249 {
250 	struct inode *inode = file->f_mapping->host;
251 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
252 	nid_t ino = inode->i_ino;
253 	int ret = 0;
254 	enum cp_reason_type cp_reason = 0;
255 	struct writeback_control wbc = {
256 		.sync_mode = WB_SYNC_ALL,
257 		.nr_to_write = LONG_MAX,
258 		.for_reclaim = 0,
259 	};
260 	unsigned int seq_id = 0;
261 
262 	if (unlikely(f2fs_readonly(inode->i_sb)))
263 		return 0;
264 
265 	trace_f2fs_sync_file_enter(inode);
266 
267 	if (S_ISDIR(inode->i_mode))
268 		goto go_write;
269 
270 	/* if fdatasync is triggered, let's do in-place-update */
271 	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
272 		set_inode_flag(inode, FI_NEED_IPU);
273 	ret = file_write_and_wait_range(file, start, end);
274 	clear_inode_flag(inode, FI_NEED_IPU);
275 
276 	if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
277 		trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
278 		return ret;
279 	}
280 
281 	/* if the inode is dirty, let's recover all the time */
282 	if (!f2fs_skip_inode_update(inode, datasync)) {
283 		f2fs_write_inode(inode, NULL);
284 		goto go_write;
285 	}
286 
287 	/*
288 	 * if there is no written data, don't waste time to write recovery info.
289 	 */
290 	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
291 			!f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
292 
293 		/* it may call write_inode just prior to fsync */
294 		if (need_inode_page_update(sbi, ino))
295 			goto go_write;
296 
297 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
298 				f2fs_exist_written_data(sbi, ino, UPDATE_INO))
299 			goto flush_out;
300 		goto out;
301 	}
302 go_write:
303 	/*
304 	 * Both of fdatasync() and fsync() are able to be recovered from
305 	 * sudden-power-off.
306 	 */
307 	down_read(&F2FS_I(inode)->i_sem);
308 	cp_reason = need_do_checkpoint(inode);
309 	up_read(&F2FS_I(inode)->i_sem);
310 
311 	if (cp_reason) {
312 		/* all the dirty node pages should be flushed for POR */
313 		ret = f2fs_sync_fs(inode->i_sb, 1);
314 
315 		/*
316 		 * We've secured consistency through sync_fs. Following pino
317 		 * will be used only for fsynced inodes after checkpoint.
318 		 */
319 		try_to_fix_pino(inode);
320 		clear_inode_flag(inode, FI_APPEND_WRITE);
321 		clear_inode_flag(inode, FI_UPDATE_WRITE);
322 		goto out;
323 	}
324 sync_nodes:
325 	atomic_inc(&sbi->wb_sync_req[NODE]);
326 	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
327 	atomic_dec(&sbi->wb_sync_req[NODE]);
328 	if (ret)
329 		goto out;
330 
331 	/* if cp_error was enabled, we should avoid infinite loop */
332 	if (unlikely(f2fs_cp_error(sbi))) {
333 		ret = -EIO;
334 		goto out;
335 	}
336 
337 	if (f2fs_need_inode_block_update(sbi, ino)) {
338 		f2fs_mark_inode_dirty_sync(inode, true);
339 		f2fs_write_inode(inode, NULL);
340 		goto sync_nodes;
341 	}
342 
343 	/*
344 	 * If it's atomic_write, it's just fine to keep write ordering. So
345 	 * here we don't need to wait for node write completion, since we use
346 	 * node chain which serializes node blocks. If one of node writes are
347 	 * reordered, we can see simply broken chain, resulting in stopping
348 	 * roll-forward recovery. It means we'll recover all or none node blocks
349 	 * given fsync mark.
350 	 */
351 	if (!atomic) {
352 		ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
353 		if (ret)
354 			goto out;
355 	}
356 
357 	/* once recovery info is written, don't need to tack this */
358 	f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
359 	clear_inode_flag(inode, FI_APPEND_WRITE);
360 flush_out:
361 	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
362 		ret = f2fs_issue_flush(sbi, inode->i_ino);
363 	if (!ret) {
364 		f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
365 		clear_inode_flag(inode, FI_UPDATE_WRITE);
366 		f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
367 	}
368 	f2fs_update_time(sbi, REQ_TIME);
369 out:
370 	trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
371 	f2fs_trace_ios(NULL, 1);
372 	return ret;
373 }
374 
f2fs_sync_file(struct file * file,loff_t start,loff_t end,int datasync)375 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
376 {
377 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
378 		return -EIO;
379 	return f2fs_do_sync_file(file, start, end, datasync, false);
380 }
381 
__found_offset(struct address_space * mapping,block_t blkaddr,pgoff_t index,int whence)382 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
383 				pgoff_t index, int whence)
384 {
385 	switch (whence) {
386 	case SEEK_DATA:
387 		if (__is_valid_data_blkaddr(blkaddr))
388 			return true;
389 		if (blkaddr == NEW_ADDR &&
390 		    xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
391 			return true;
392 		break;
393 	case SEEK_HOLE:
394 		if (blkaddr == NULL_ADDR)
395 			return true;
396 		break;
397 	}
398 	return false;
399 }
400 
f2fs_seek_block(struct file * file,loff_t offset,int whence)401 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
402 {
403 	struct inode *inode = file->f_mapping->host;
404 	loff_t maxbytes = inode->i_sb->s_maxbytes;
405 	struct dnode_of_data dn;
406 	pgoff_t pgofs, end_offset;
407 	loff_t data_ofs = offset;
408 	loff_t isize;
409 	int err = 0;
410 
411 	inode_lock(inode);
412 
413 	isize = i_size_read(inode);
414 	if (offset >= isize)
415 		goto fail;
416 
417 	/* handle inline data case */
418 	if (f2fs_has_inline_data(inode)) {
419 		if (whence == SEEK_HOLE) {
420 			data_ofs = isize;
421 			goto found;
422 		} else if (whence == SEEK_DATA) {
423 			data_ofs = offset;
424 			goto found;
425 		}
426 	}
427 
428 	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
429 
430 	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
431 		set_new_dnode(&dn, inode, NULL, NULL, 0);
432 		err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
433 		if (err && err != -ENOENT) {
434 			goto fail;
435 		} else if (err == -ENOENT) {
436 			/* direct node does not exists */
437 			if (whence == SEEK_DATA) {
438 				pgofs = f2fs_get_next_page_offset(&dn, pgofs);
439 				continue;
440 			} else {
441 				goto found;
442 			}
443 		}
444 
445 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
446 
447 		/* find data/hole in dnode block */
448 		for (; dn.ofs_in_node < end_offset;
449 				dn.ofs_in_node++, pgofs++,
450 				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
451 			block_t blkaddr;
452 
453 			blkaddr = f2fs_data_blkaddr(&dn);
454 
455 			if (__is_valid_data_blkaddr(blkaddr) &&
456 				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
457 					blkaddr, DATA_GENERIC_ENHANCE)) {
458 				f2fs_put_dnode(&dn);
459 				goto fail;
460 			}
461 
462 			if (__found_offset(file->f_mapping, blkaddr,
463 							pgofs, whence)) {
464 				f2fs_put_dnode(&dn);
465 				goto found;
466 			}
467 		}
468 		f2fs_put_dnode(&dn);
469 	}
470 
471 	if (whence == SEEK_DATA)
472 		goto fail;
473 found:
474 	if (whence == SEEK_HOLE && data_ofs > isize)
475 		data_ofs = isize;
476 	inode_unlock(inode);
477 	return vfs_setpos(file, data_ofs, maxbytes);
478 fail:
479 	inode_unlock(inode);
480 	return -ENXIO;
481 }
482 
f2fs_llseek(struct file * file,loff_t offset,int whence)483 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
484 {
485 	struct inode *inode = file->f_mapping->host;
486 	loff_t maxbytes = inode->i_sb->s_maxbytes;
487 
488 	switch (whence) {
489 	case SEEK_SET:
490 	case SEEK_CUR:
491 	case SEEK_END:
492 		return generic_file_llseek_size(file, offset, whence,
493 						maxbytes, i_size_read(inode));
494 	case SEEK_DATA:
495 	case SEEK_HOLE:
496 		if (offset < 0)
497 			return -ENXIO;
498 		return f2fs_seek_block(file, offset, whence);
499 	}
500 
501 	return -EINVAL;
502 }
503 
f2fs_file_mmap(struct file * file,struct vm_area_struct * vma)504 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
505 {
506 	struct inode *inode = file_inode(file);
507 	int err;
508 
509 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
510 		return -EIO;
511 
512 	if (!f2fs_is_compress_backend_ready(inode))
513 		return -EOPNOTSUPP;
514 
515 	/* we don't need to use inline_data strictly */
516 	err = f2fs_convert_inline_inode(inode);
517 	if (err)
518 		return err;
519 
520 	file_accessed(file);
521 	vma->vm_ops = &f2fs_file_vm_ops;
522 	set_inode_flag(inode, FI_MMAP_FILE);
523 	return 0;
524 }
525 
f2fs_file_open(struct inode * inode,struct file * filp)526 static int f2fs_file_open(struct inode *inode, struct file *filp)
527 {
528 	int err = fscrypt_file_open(inode, filp);
529 
530 	if (err)
531 		return err;
532 
533 	if (!f2fs_is_compress_backend_ready(inode))
534 		return -EOPNOTSUPP;
535 
536 	err = fsverity_file_open(inode, filp);
537 	if (err)
538 		return err;
539 
540 	filp->f_mode |= FMODE_NOWAIT;
541 
542 	return dquot_file_open(inode, filp);
543 }
544 
f2fs_truncate_data_blocks_range(struct dnode_of_data * dn,int count)545 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
546 {
547 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
548 	struct f2fs_node *raw_node;
549 	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
550 	__le32 *addr;
551 	int base = 0;
552 	bool compressed_cluster = false;
553 	int cluster_index = 0, valid_blocks = 0;
554 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
555 	bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
556 
557 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
558 		base = get_extra_isize(dn->inode);
559 
560 	raw_node = F2FS_NODE(dn->node_page);
561 	addr = blkaddr_in_node(raw_node) + base + ofs;
562 
563 	/* Assumption: truncateion starts with cluster */
564 	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
565 		block_t blkaddr = le32_to_cpu(*addr);
566 
567 		if (f2fs_compressed_file(dn->inode) &&
568 					!(cluster_index & (cluster_size - 1))) {
569 			if (compressed_cluster)
570 				f2fs_i_compr_blocks_update(dn->inode,
571 							valid_blocks, false);
572 			compressed_cluster = (blkaddr == COMPRESS_ADDR);
573 			valid_blocks = 0;
574 		}
575 
576 		if (blkaddr == NULL_ADDR)
577 			continue;
578 
579 		dn->data_blkaddr = NULL_ADDR;
580 		f2fs_set_data_blkaddr(dn);
581 
582 		if (__is_valid_data_blkaddr(blkaddr)) {
583 			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
584 					DATA_GENERIC_ENHANCE))
585 				continue;
586 			if (compressed_cluster)
587 				valid_blocks++;
588 		}
589 
590 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
591 			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
592 
593 		f2fs_invalidate_blocks(sbi, blkaddr);
594 
595 		if (!released || blkaddr != COMPRESS_ADDR)
596 			nr_free++;
597 	}
598 
599 	if (compressed_cluster)
600 		f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
601 
602 	if (nr_free) {
603 		pgoff_t fofs;
604 		/*
605 		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
606 		 * we will invalidate all blkaddr in the whole range.
607 		 */
608 		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
609 							dn->inode) + ofs;
610 		f2fs_update_extent_cache_range(dn, fofs, 0, len);
611 		dec_valid_block_count(sbi, dn->inode, nr_free);
612 	}
613 	dn->ofs_in_node = ofs;
614 
615 	f2fs_update_time(sbi, REQ_TIME);
616 	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
617 					 dn->ofs_in_node, nr_free);
618 }
619 
f2fs_truncate_data_blocks(struct dnode_of_data * dn)620 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
621 {
622 	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
623 }
624 
truncate_partial_data_page(struct inode * inode,u64 from,bool cache_only)625 static int truncate_partial_data_page(struct inode *inode, u64 from,
626 								bool cache_only)
627 {
628 	loff_t offset = from & (PAGE_SIZE - 1);
629 	pgoff_t index = from >> PAGE_SHIFT;
630 	struct address_space *mapping = inode->i_mapping;
631 	struct page *page;
632 
633 	if (!offset && !cache_only)
634 		return 0;
635 
636 	if (cache_only) {
637 		page = find_lock_page(mapping, index);
638 		if (page && PageUptodate(page))
639 			goto truncate_out;
640 		f2fs_put_page(page, 1);
641 		return 0;
642 	}
643 
644 	page = f2fs_get_lock_data_page(inode, index, true);
645 	if (IS_ERR(page))
646 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
647 truncate_out:
648 	f2fs_wait_on_page_writeback(page, DATA, true, true);
649 	zero_user(page, offset, PAGE_SIZE - offset);
650 
651 	/* An encrypted inode should have a key and truncate the last page. */
652 	f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
653 	if (!cache_only)
654 		set_page_dirty(page);
655 	f2fs_put_page(page, 1);
656 	return 0;
657 }
658 
f2fs_do_truncate_blocks(struct inode * inode,u64 from,bool lock)659 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
660 {
661 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
662 	struct dnode_of_data dn;
663 	pgoff_t free_from;
664 	int count = 0, err = 0;
665 	struct page *ipage;
666 	bool truncate_page = false;
667 
668 	trace_f2fs_truncate_blocks_enter(inode, from);
669 
670 	free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
671 
672 	if (free_from >= sbi->max_file_blocks)
673 		goto free_partial;
674 
675 	if (lock)
676 		f2fs_lock_op(sbi);
677 
678 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
679 	if (IS_ERR(ipage)) {
680 		err = PTR_ERR(ipage);
681 		goto out;
682 	}
683 
684 	if (f2fs_has_inline_data(inode)) {
685 		f2fs_truncate_inline_inode(inode, ipage, from);
686 		f2fs_put_page(ipage, 1);
687 		truncate_page = true;
688 		goto out;
689 	}
690 
691 	set_new_dnode(&dn, inode, ipage, NULL, 0);
692 	err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
693 	if (err) {
694 		if (err == -ENOENT)
695 			goto free_next;
696 		goto out;
697 	}
698 
699 	count = ADDRS_PER_PAGE(dn.node_page, inode);
700 
701 	count -= dn.ofs_in_node;
702 	f2fs_bug_on(sbi, count < 0);
703 
704 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
705 		f2fs_truncate_data_blocks_range(&dn, count);
706 		free_from += count;
707 	}
708 
709 	f2fs_put_dnode(&dn);
710 free_next:
711 	err = f2fs_truncate_inode_blocks(inode, free_from);
712 out:
713 	if (lock)
714 		f2fs_unlock_op(sbi);
715 free_partial:
716 	/* lastly zero out the first data page */
717 	if (!err)
718 		err = truncate_partial_data_page(inode, from, truncate_page);
719 
720 	trace_f2fs_truncate_blocks_exit(inode, err);
721 	return err;
722 }
723 
f2fs_truncate_blocks(struct inode * inode,u64 from,bool lock)724 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
725 {
726 	u64 free_from = from;
727 	int err;
728 
729 #ifdef CONFIG_F2FS_FS_COMPRESSION
730 	/*
731 	 * for compressed file, only support cluster size
732 	 * aligned truncation.
733 	 */
734 	if (f2fs_compressed_file(inode))
735 		free_from = round_up(from,
736 				F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
737 #endif
738 
739 	err = f2fs_do_truncate_blocks(inode, free_from, lock);
740 	if (err)
741 		return err;
742 
743 #ifdef CONFIG_F2FS_FS_COMPRESSION
744 	if (from != free_from) {
745 		err = f2fs_truncate_partial_cluster(inode, from, lock);
746 		if (err)
747 			return err;
748 	}
749 #endif
750 
751 	return 0;
752 }
753 
f2fs_truncate(struct inode * inode)754 int f2fs_truncate(struct inode *inode)
755 {
756 	int err;
757 
758 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
759 		return -EIO;
760 
761 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
762 				S_ISLNK(inode->i_mode)))
763 		return 0;
764 
765 	trace_f2fs_truncate(inode);
766 
767 	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
768 		f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
769 		return -EIO;
770 	}
771 
772 	err = dquot_initialize(inode);
773 	if (err)
774 		return err;
775 
776 	/* we should check inline_data size */
777 	if (!f2fs_may_inline_data(inode)) {
778 		err = f2fs_convert_inline_inode(inode);
779 		if (err)
780 			return err;
781 	}
782 
783 	err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
784 	if (err)
785 		return err;
786 
787 	inode->i_mtime = inode->i_ctime = current_time(inode);
788 	f2fs_mark_inode_dirty_sync(inode, false);
789 	return 0;
790 }
791 
f2fs_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)792 int f2fs_getattr(const struct path *path, struct kstat *stat,
793 		 u32 request_mask, unsigned int query_flags)
794 {
795 	struct inode *inode = d_inode(path->dentry);
796 	struct f2fs_inode_info *fi = F2FS_I(inode);
797 	struct f2fs_inode *ri;
798 	unsigned int flags;
799 
800 	if (f2fs_has_extra_attr(inode) &&
801 			f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
802 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
803 		stat->result_mask |= STATX_BTIME;
804 		stat->btime.tv_sec = fi->i_crtime.tv_sec;
805 		stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
806 	}
807 
808 	flags = fi->i_flags;
809 	if (flags & F2FS_COMPR_FL)
810 		stat->attributes |= STATX_ATTR_COMPRESSED;
811 	if (flags & F2FS_APPEND_FL)
812 		stat->attributes |= STATX_ATTR_APPEND;
813 	if (IS_ENCRYPTED(inode))
814 		stat->attributes |= STATX_ATTR_ENCRYPTED;
815 	if (flags & F2FS_IMMUTABLE_FL)
816 		stat->attributes |= STATX_ATTR_IMMUTABLE;
817 	if (flags & F2FS_NODUMP_FL)
818 		stat->attributes |= STATX_ATTR_NODUMP;
819 	if (IS_VERITY(inode))
820 		stat->attributes |= STATX_ATTR_VERITY;
821 
822 	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
823 				  STATX_ATTR_APPEND |
824 				  STATX_ATTR_ENCRYPTED |
825 				  STATX_ATTR_IMMUTABLE |
826 				  STATX_ATTR_NODUMP |
827 				  STATX_ATTR_VERITY);
828 
829 	generic_fillattr(inode, stat);
830 
831 	/* we need to show initial sectors used for inline_data/dentries */
832 	if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
833 					f2fs_has_inline_dentry(inode))
834 		stat->blocks += (stat->size + 511) >> 9;
835 
836 	return 0;
837 }
838 
839 #ifdef CONFIG_F2FS_FS_POSIX_ACL
__setattr_copy(struct inode * inode,const struct iattr * attr)840 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
841 {
842 	unsigned int ia_valid = attr->ia_valid;
843 
844 	if (ia_valid & ATTR_UID)
845 		inode->i_uid = attr->ia_uid;
846 	if (ia_valid & ATTR_GID)
847 		inode->i_gid = attr->ia_gid;
848 	if (ia_valid & ATTR_ATIME)
849 		inode->i_atime = attr->ia_atime;
850 	if (ia_valid & ATTR_MTIME)
851 		inode->i_mtime = attr->ia_mtime;
852 	if (ia_valid & ATTR_CTIME)
853 		inode->i_ctime = attr->ia_ctime;
854 	if (ia_valid & ATTR_MODE) {
855 		umode_t mode = attr->ia_mode;
856 
857 		if (!in_group_p(inode->i_gid) &&
858 			!capable_wrt_inode_uidgid(inode, CAP_FSETID))
859 			mode &= ~S_ISGID;
860 		set_acl_inode(inode, mode);
861 	}
862 }
863 #else
864 #define __setattr_copy setattr_copy
865 #endif
866 
f2fs_setattr(struct dentry * dentry,struct iattr * attr)867 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
868 {
869 	struct inode *inode = d_inode(dentry);
870 	int err;
871 
872 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
873 		return -EIO;
874 
875 	if (unlikely(IS_IMMUTABLE(inode)))
876 		return -EPERM;
877 
878 	if (unlikely(IS_APPEND(inode) &&
879 			(attr->ia_valid & (ATTR_MODE | ATTR_UID |
880 				  ATTR_GID | ATTR_TIMES_SET))))
881 		return -EPERM;
882 
883 	if ((attr->ia_valid & ATTR_SIZE) &&
884 		!f2fs_is_compress_backend_ready(inode))
885 		return -EOPNOTSUPP;
886 
887 	err = setattr_prepare(dentry, attr);
888 	if (err)
889 		return err;
890 
891 	err = fscrypt_prepare_setattr(dentry, attr);
892 	if (err)
893 		return err;
894 
895 	err = fsverity_prepare_setattr(dentry, attr);
896 	if (err)
897 		return err;
898 
899 	if (is_quota_modification(inode, attr)) {
900 		err = dquot_initialize(inode);
901 		if (err)
902 			return err;
903 	}
904 	if ((attr->ia_valid & ATTR_UID &&
905 		!uid_eq(attr->ia_uid, inode->i_uid)) ||
906 		(attr->ia_valid & ATTR_GID &&
907 		!gid_eq(attr->ia_gid, inode->i_gid))) {
908 		f2fs_lock_op(F2FS_I_SB(inode));
909 		err = dquot_transfer(inode, attr);
910 		if (err) {
911 			set_sbi_flag(F2FS_I_SB(inode),
912 					SBI_QUOTA_NEED_REPAIR);
913 			f2fs_unlock_op(F2FS_I_SB(inode));
914 			return err;
915 		}
916 		/*
917 		 * update uid/gid under lock_op(), so that dquot and inode can
918 		 * be updated atomically.
919 		 */
920 		if (attr->ia_valid & ATTR_UID)
921 			inode->i_uid = attr->ia_uid;
922 		if (attr->ia_valid & ATTR_GID)
923 			inode->i_gid = attr->ia_gid;
924 		f2fs_mark_inode_dirty_sync(inode, true);
925 		f2fs_unlock_op(F2FS_I_SB(inode));
926 	}
927 
928 	if (attr->ia_valid & ATTR_SIZE) {
929 		loff_t old_size = i_size_read(inode);
930 
931 		if (attr->ia_size > MAX_INLINE_DATA(inode)) {
932 			/*
933 			 * should convert inline inode before i_size_write to
934 			 * keep smaller than inline_data size with inline flag.
935 			 */
936 			err = f2fs_convert_inline_inode(inode);
937 			if (err)
938 				return err;
939 		}
940 
941 		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
942 		down_write(&F2FS_I(inode)->i_mmap_sem);
943 
944 		truncate_setsize(inode, attr->ia_size);
945 
946 		if (attr->ia_size <= old_size)
947 			err = f2fs_truncate(inode);
948 		/*
949 		 * do not trim all blocks after i_size if target size is
950 		 * larger than i_size.
951 		 */
952 		up_write(&F2FS_I(inode)->i_mmap_sem);
953 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
954 		if (err)
955 			return err;
956 
957 		spin_lock(&F2FS_I(inode)->i_size_lock);
958 		inode->i_mtime = inode->i_ctime = current_time(inode);
959 		F2FS_I(inode)->last_disk_size = i_size_read(inode);
960 		spin_unlock(&F2FS_I(inode)->i_size_lock);
961 	}
962 
963 	__setattr_copy(inode, attr);
964 
965 	if (attr->ia_valid & ATTR_MODE) {
966 		err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
967 		if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
968 			inode->i_mode = F2FS_I(inode)->i_acl_mode;
969 			clear_inode_flag(inode, FI_ACL_MODE);
970 		}
971 	}
972 
973 	/* file size may changed here */
974 	f2fs_mark_inode_dirty_sync(inode, true);
975 
976 	/* inode change will produce dirty node pages flushed by checkpoint */
977 	f2fs_balance_fs(F2FS_I_SB(inode), true);
978 
979 	return err;
980 }
981 
982 const struct inode_operations f2fs_file_inode_operations = {
983 	.getattr	= f2fs_getattr,
984 	.setattr	= f2fs_setattr,
985 	.get_acl	= f2fs_get_acl,
986 	.set_acl	= f2fs_set_acl,
987 	.listxattr	= f2fs_listxattr,
988 	.fiemap		= f2fs_fiemap,
989 };
990 
fill_zero(struct inode * inode,pgoff_t index,loff_t start,loff_t len)991 static int fill_zero(struct inode *inode, pgoff_t index,
992 					loff_t start, loff_t len)
993 {
994 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
995 	struct page *page;
996 
997 	if (!len)
998 		return 0;
999 
1000 	f2fs_balance_fs(sbi, true);
1001 
1002 	f2fs_lock_op(sbi);
1003 	page = f2fs_get_new_data_page(inode, NULL, index, false);
1004 	f2fs_unlock_op(sbi);
1005 
1006 	if (IS_ERR(page))
1007 		return PTR_ERR(page);
1008 
1009 	f2fs_wait_on_page_writeback(page, DATA, true, true);
1010 	zero_user(page, start, len);
1011 	set_page_dirty(page);
1012 	f2fs_put_page(page, 1);
1013 	return 0;
1014 }
1015 
f2fs_truncate_hole(struct inode * inode,pgoff_t pg_start,pgoff_t pg_end)1016 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1017 {
1018 	int err;
1019 
1020 	while (pg_start < pg_end) {
1021 		struct dnode_of_data dn;
1022 		pgoff_t end_offset, count;
1023 
1024 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1025 		err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1026 		if (err) {
1027 			if (err == -ENOENT) {
1028 				pg_start = f2fs_get_next_page_offset(&dn,
1029 								pg_start);
1030 				continue;
1031 			}
1032 			return err;
1033 		}
1034 
1035 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1036 		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1037 
1038 		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1039 
1040 		f2fs_truncate_data_blocks_range(&dn, count);
1041 		f2fs_put_dnode(&dn);
1042 
1043 		pg_start += count;
1044 	}
1045 	return 0;
1046 }
1047 
punch_hole(struct inode * inode,loff_t offset,loff_t len)1048 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1049 {
1050 	pgoff_t pg_start, pg_end;
1051 	loff_t off_start, off_end;
1052 	int ret;
1053 
1054 	ret = f2fs_convert_inline_inode(inode);
1055 	if (ret)
1056 		return ret;
1057 
1058 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1059 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1060 
1061 	off_start = offset & (PAGE_SIZE - 1);
1062 	off_end = (offset + len) & (PAGE_SIZE - 1);
1063 
1064 	if (pg_start == pg_end) {
1065 		ret = fill_zero(inode, pg_start, off_start,
1066 						off_end - off_start);
1067 		if (ret)
1068 			return ret;
1069 	} else {
1070 		if (off_start) {
1071 			ret = fill_zero(inode, pg_start++, off_start,
1072 						PAGE_SIZE - off_start);
1073 			if (ret)
1074 				return ret;
1075 		}
1076 		if (off_end) {
1077 			ret = fill_zero(inode, pg_end, 0, off_end);
1078 			if (ret)
1079 				return ret;
1080 		}
1081 
1082 		if (pg_start < pg_end) {
1083 			loff_t blk_start, blk_end;
1084 			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1085 
1086 			f2fs_balance_fs(sbi, true);
1087 
1088 			blk_start = (loff_t)pg_start << PAGE_SHIFT;
1089 			blk_end = (loff_t)pg_end << PAGE_SHIFT;
1090 
1091 			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1092 			down_write(&F2FS_I(inode)->i_mmap_sem);
1093 
1094 			truncate_pagecache_range(inode, blk_start, blk_end - 1);
1095 
1096 			f2fs_lock_op(sbi);
1097 			ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1098 			f2fs_unlock_op(sbi);
1099 
1100 			up_write(&F2FS_I(inode)->i_mmap_sem);
1101 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1102 		}
1103 	}
1104 
1105 	return ret;
1106 }
1107 
__read_out_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,pgoff_t len)1108 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1109 				int *do_replace, pgoff_t off, pgoff_t len)
1110 {
1111 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1112 	struct dnode_of_data dn;
1113 	int ret, done, i;
1114 
1115 next_dnode:
1116 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1117 	ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1118 	if (ret && ret != -ENOENT) {
1119 		return ret;
1120 	} else if (ret == -ENOENT) {
1121 		if (dn.max_level == 0)
1122 			return -ENOENT;
1123 		done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1124 						dn.ofs_in_node, len);
1125 		blkaddr += done;
1126 		do_replace += done;
1127 		goto next;
1128 	}
1129 
1130 	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1131 							dn.ofs_in_node, len);
1132 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1133 		*blkaddr = f2fs_data_blkaddr(&dn);
1134 
1135 		if (__is_valid_data_blkaddr(*blkaddr) &&
1136 			!f2fs_is_valid_blkaddr(sbi, *blkaddr,
1137 					DATA_GENERIC_ENHANCE)) {
1138 			f2fs_put_dnode(&dn);
1139 			return -EFSCORRUPTED;
1140 		}
1141 
1142 		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1143 
1144 			if (f2fs_lfs_mode(sbi)) {
1145 				f2fs_put_dnode(&dn);
1146 				return -EOPNOTSUPP;
1147 			}
1148 
1149 			/* do not invalidate this block address */
1150 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1151 			*do_replace = 1;
1152 		}
1153 	}
1154 	f2fs_put_dnode(&dn);
1155 next:
1156 	len -= done;
1157 	off += done;
1158 	if (len)
1159 		goto next_dnode;
1160 	return 0;
1161 }
1162 
__roll_back_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,int len)1163 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1164 				int *do_replace, pgoff_t off, int len)
1165 {
1166 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1167 	struct dnode_of_data dn;
1168 	int ret, i;
1169 
1170 	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1171 		if (*do_replace == 0)
1172 			continue;
1173 
1174 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1175 		ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1176 		if (ret) {
1177 			dec_valid_block_count(sbi, inode, 1);
1178 			f2fs_invalidate_blocks(sbi, *blkaddr);
1179 		} else {
1180 			f2fs_update_data_blkaddr(&dn, *blkaddr);
1181 		}
1182 		f2fs_put_dnode(&dn);
1183 	}
1184 	return 0;
1185 }
1186 
__clone_blkaddrs(struct inode * src_inode,struct inode * dst_inode,block_t * blkaddr,int * do_replace,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1187 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1188 			block_t *blkaddr, int *do_replace,
1189 			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1190 {
1191 	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1192 	pgoff_t i = 0;
1193 	int ret;
1194 
1195 	while (i < len) {
1196 		if (blkaddr[i] == NULL_ADDR && !full) {
1197 			i++;
1198 			continue;
1199 		}
1200 
1201 		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1202 			struct dnode_of_data dn;
1203 			struct node_info ni;
1204 			size_t new_size;
1205 			pgoff_t ilen;
1206 
1207 			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1208 			ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1209 			if (ret)
1210 				return ret;
1211 
1212 			ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1213 			if (ret) {
1214 				f2fs_put_dnode(&dn);
1215 				return ret;
1216 			}
1217 
1218 			ilen = min((pgoff_t)
1219 				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1220 						dn.ofs_in_node, len - i);
1221 			do {
1222 				dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1223 				f2fs_truncate_data_blocks_range(&dn, 1);
1224 
1225 				if (do_replace[i]) {
1226 					f2fs_i_blocks_write(src_inode,
1227 							1, false, false);
1228 					f2fs_i_blocks_write(dst_inode,
1229 							1, true, false);
1230 					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1231 					blkaddr[i], ni.version, true, false);
1232 
1233 					do_replace[i] = 0;
1234 				}
1235 				dn.ofs_in_node++;
1236 				i++;
1237 				new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1238 				if (dst_inode->i_size < new_size)
1239 					f2fs_i_size_write(dst_inode, new_size);
1240 			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1241 
1242 			f2fs_put_dnode(&dn);
1243 		} else {
1244 			struct page *psrc, *pdst;
1245 
1246 			psrc = f2fs_get_lock_data_page(src_inode,
1247 							src + i, true);
1248 			if (IS_ERR(psrc))
1249 				return PTR_ERR(psrc);
1250 			pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1251 								true);
1252 			if (IS_ERR(pdst)) {
1253 				f2fs_put_page(psrc, 1);
1254 				return PTR_ERR(pdst);
1255 			}
1256 			f2fs_copy_page(psrc, pdst);
1257 			set_page_dirty(pdst);
1258 			f2fs_put_page(pdst, 1);
1259 			f2fs_put_page(psrc, 1);
1260 
1261 			ret = f2fs_truncate_hole(src_inode,
1262 						src + i, src + i + 1);
1263 			if (ret)
1264 				return ret;
1265 			i++;
1266 		}
1267 	}
1268 	return 0;
1269 }
1270 
__exchange_data_block(struct inode * src_inode,struct inode * dst_inode,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1271 static int __exchange_data_block(struct inode *src_inode,
1272 			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1273 			pgoff_t len, bool full)
1274 {
1275 	block_t *src_blkaddr;
1276 	int *do_replace;
1277 	pgoff_t olen;
1278 	int ret;
1279 
1280 	while (len) {
1281 		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1282 
1283 		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1284 					array_size(olen, sizeof(block_t)),
1285 					GFP_NOFS);
1286 		if (!src_blkaddr)
1287 			return -ENOMEM;
1288 
1289 		do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1290 					array_size(olen, sizeof(int)),
1291 					GFP_NOFS);
1292 		if (!do_replace) {
1293 			kvfree(src_blkaddr);
1294 			return -ENOMEM;
1295 		}
1296 
1297 		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1298 					do_replace, src, olen);
1299 		if (ret)
1300 			goto roll_back;
1301 
1302 		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1303 					do_replace, src, dst, olen, full);
1304 		if (ret)
1305 			goto roll_back;
1306 
1307 		src += olen;
1308 		dst += olen;
1309 		len -= olen;
1310 
1311 		kvfree(src_blkaddr);
1312 		kvfree(do_replace);
1313 	}
1314 	return 0;
1315 
1316 roll_back:
1317 	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1318 	kvfree(src_blkaddr);
1319 	kvfree(do_replace);
1320 	return ret;
1321 }
1322 
f2fs_do_collapse(struct inode * inode,loff_t offset,loff_t len)1323 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1324 {
1325 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1326 	pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1327 	pgoff_t start = offset >> PAGE_SHIFT;
1328 	pgoff_t end = (offset + len) >> PAGE_SHIFT;
1329 	int ret;
1330 
1331 	f2fs_balance_fs(sbi, true);
1332 
1333 	/* avoid gc operation during block exchange */
1334 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1335 	down_write(&F2FS_I(inode)->i_mmap_sem);
1336 
1337 	f2fs_lock_op(sbi);
1338 	f2fs_drop_extent_tree(inode);
1339 	truncate_pagecache(inode, offset);
1340 	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1341 	f2fs_unlock_op(sbi);
1342 
1343 	up_write(&F2FS_I(inode)->i_mmap_sem);
1344 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1345 	return ret;
1346 }
1347 
f2fs_collapse_range(struct inode * inode,loff_t offset,loff_t len)1348 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1349 {
1350 	loff_t new_size;
1351 	int ret;
1352 
1353 	if (offset + len >= i_size_read(inode))
1354 		return -EINVAL;
1355 
1356 	/* collapse range should be aligned to block size of f2fs. */
1357 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1358 		return -EINVAL;
1359 
1360 	ret = f2fs_convert_inline_inode(inode);
1361 	if (ret)
1362 		return ret;
1363 
1364 	/* write out all dirty pages from offset */
1365 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1366 	if (ret)
1367 		return ret;
1368 
1369 	ret = f2fs_do_collapse(inode, offset, len);
1370 	if (ret)
1371 		return ret;
1372 
1373 	/* write out all moved pages, if possible */
1374 	down_write(&F2FS_I(inode)->i_mmap_sem);
1375 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1376 	truncate_pagecache(inode, offset);
1377 
1378 	new_size = i_size_read(inode) - len;
1379 	ret = f2fs_truncate_blocks(inode, new_size, true);
1380 	up_write(&F2FS_I(inode)->i_mmap_sem);
1381 	if (!ret)
1382 		f2fs_i_size_write(inode, new_size);
1383 	return ret;
1384 }
1385 
f2fs_do_zero_range(struct dnode_of_data * dn,pgoff_t start,pgoff_t end)1386 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1387 								pgoff_t end)
1388 {
1389 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1390 	pgoff_t index = start;
1391 	unsigned int ofs_in_node = dn->ofs_in_node;
1392 	blkcnt_t count = 0;
1393 	int ret;
1394 
1395 	for (; index < end; index++, dn->ofs_in_node++) {
1396 		if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1397 			count++;
1398 	}
1399 
1400 	dn->ofs_in_node = ofs_in_node;
1401 	ret = f2fs_reserve_new_blocks(dn, count);
1402 	if (ret)
1403 		return ret;
1404 
1405 	dn->ofs_in_node = ofs_in_node;
1406 	for (index = start; index < end; index++, dn->ofs_in_node++) {
1407 		dn->data_blkaddr = f2fs_data_blkaddr(dn);
1408 		/*
1409 		 * f2fs_reserve_new_blocks will not guarantee entire block
1410 		 * allocation.
1411 		 */
1412 		if (dn->data_blkaddr == NULL_ADDR) {
1413 			ret = -ENOSPC;
1414 			break;
1415 		}
1416 
1417 		if (dn->data_blkaddr == NEW_ADDR)
1418 			continue;
1419 
1420 		if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1421 					DATA_GENERIC_ENHANCE)) {
1422 			ret = -EFSCORRUPTED;
1423 			break;
1424 		}
1425 
1426 		f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1427 		dn->data_blkaddr = NEW_ADDR;
1428 		f2fs_set_data_blkaddr(dn);
1429 	}
1430 
1431 	f2fs_update_extent_cache_range(dn, start, 0, index - start);
1432 
1433 	return ret;
1434 }
1435 
f2fs_zero_range(struct inode * inode,loff_t offset,loff_t len,int mode)1436 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1437 								int mode)
1438 {
1439 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1440 	struct address_space *mapping = inode->i_mapping;
1441 	pgoff_t index, pg_start, pg_end;
1442 	loff_t new_size = i_size_read(inode);
1443 	loff_t off_start, off_end;
1444 	int ret = 0;
1445 
1446 	ret = inode_newsize_ok(inode, (len + offset));
1447 	if (ret)
1448 		return ret;
1449 
1450 	ret = f2fs_convert_inline_inode(inode);
1451 	if (ret)
1452 		return ret;
1453 
1454 	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1455 	if (ret)
1456 		return ret;
1457 
1458 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1459 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1460 
1461 	off_start = offset & (PAGE_SIZE - 1);
1462 	off_end = (offset + len) & (PAGE_SIZE - 1);
1463 
1464 	if (pg_start == pg_end) {
1465 		ret = fill_zero(inode, pg_start, off_start,
1466 						off_end - off_start);
1467 		if (ret)
1468 			return ret;
1469 
1470 		new_size = max_t(loff_t, new_size, offset + len);
1471 	} else {
1472 		if (off_start) {
1473 			ret = fill_zero(inode, pg_start++, off_start,
1474 						PAGE_SIZE - off_start);
1475 			if (ret)
1476 				return ret;
1477 
1478 			new_size = max_t(loff_t, new_size,
1479 					(loff_t)pg_start << PAGE_SHIFT);
1480 		}
1481 
1482 		for (index = pg_start; index < pg_end;) {
1483 			struct dnode_of_data dn;
1484 			unsigned int end_offset;
1485 			pgoff_t end;
1486 
1487 			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1488 			down_write(&F2FS_I(inode)->i_mmap_sem);
1489 
1490 			truncate_pagecache_range(inode,
1491 				(loff_t)index << PAGE_SHIFT,
1492 				((loff_t)pg_end << PAGE_SHIFT) - 1);
1493 
1494 			f2fs_lock_op(sbi);
1495 
1496 			set_new_dnode(&dn, inode, NULL, NULL, 0);
1497 			ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1498 			if (ret) {
1499 				f2fs_unlock_op(sbi);
1500 				up_write(&F2FS_I(inode)->i_mmap_sem);
1501 				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1502 				goto out;
1503 			}
1504 
1505 			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1506 			end = min(pg_end, end_offset - dn.ofs_in_node + index);
1507 
1508 			ret = f2fs_do_zero_range(&dn, index, end);
1509 			f2fs_put_dnode(&dn);
1510 
1511 			f2fs_unlock_op(sbi);
1512 			up_write(&F2FS_I(inode)->i_mmap_sem);
1513 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1514 
1515 			f2fs_balance_fs(sbi, dn.node_changed);
1516 
1517 			if (ret)
1518 				goto out;
1519 
1520 			index = end;
1521 			new_size = max_t(loff_t, new_size,
1522 					(loff_t)index << PAGE_SHIFT);
1523 		}
1524 
1525 		if (off_end) {
1526 			ret = fill_zero(inode, pg_end, 0, off_end);
1527 			if (ret)
1528 				goto out;
1529 
1530 			new_size = max_t(loff_t, new_size, offset + len);
1531 		}
1532 	}
1533 
1534 out:
1535 	if (new_size > i_size_read(inode)) {
1536 		if (mode & FALLOC_FL_KEEP_SIZE)
1537 			file_set_keep_isize(inode);
1538 		else
1539 			f2fs_i_size_write(inode, new_size);
1540 	}
1541 	return ret;
1542 }
1543 
f2fs_insert_range(struct inode * inode,loff_t offset,loff_t len)1544 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1545 {
1546 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1547 	pgoff_t nr, pg_start, pg_end, delta, idx;
1548 	loff_t new_size;
1549 	int ret = 0;
1550 
1551 	new_size = i_size_read(inode) + len;
1552 	ret = inode_newsize_ok(inode, new_size);
1553 	if (ret)
1554 		return ret;
1555 
1556 	if (offset >= i_size_read(inode))
1557 		return -EINVAL;
1558 
1559 	/* insert range should be aligned to block size of f2fs. */
1560 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1561 		return -EINVAL;
1562 
1563 	ret = f2fs_convert_inline_inode(inode);
1564 	if (ret)
1565 		return ret;
1566 
1567 	f2fs_balance_fs(sbi, true);
1568 
1569 	down_write(&F2FS_I(inode)->i_mmap_sem);
1570 	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1571 	up_write(&F2FS_I(inode)->i_mmap_sem);
1572 	if (ret)
1573 		return ret;
1574 
1575 	/* write out all dirty pages from offset */
1576 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1577 	if (ret)
1578 		return ret;
1579 
1580 	pg_start = offset >> PAGE_SHIFT;
1581 	pg_end = (offset + len) >> PAGE_SHIFT;
1582 	delta = pg_end - pg_start;
1583 	idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1584 
1585 	/* avoid gc operation during block exchange */
1586 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1587 	down_write(&F2FS_I(inode)->i_mmap_sem);
1588 	truncate_pagecache(inode, offset);
1589 
1590 	while (!ret && idx > pg_start) {
1591 		nr = idx - pg_start;
1592 		if (nr > delta)
1593 			nr = delta;
1594 		idx -= nr;
1595 
1596 		f2fs_lock_op(sbi);
1597 		f2fs_drop_extent_tree(inode);
1598 
1599 		ret = __exchange_data_block(inode, inode, idx,
1600 					idx + delta, nr, false);
1601 		f2fs_unlock_op(sbi);
1602 	}
1603 	up_write(&F2FS_I(inode)->i_mmap_sem);
1604 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1605 
1606 	/* write out all moved pages, if possible */
1607 	down_write(&F2FS_I(inode)->i_mmap_sem);
1608 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1609 	truncate_pagecache(inode, offset);
1610 	up_write(&F2FS_I(inode)->i_mmap_sem);
1611 
1612 	if (!ret)
1613 		f2fs_i_size_write(inode, new_size);
1614 	return ret;
1615 }
1616 
expand_inode_data(struct inode * inode,loff_t offset,loff_t len,int mode)1617 static int expand_inode_data(struct inode *inode, loff_t offset,
1618 					loff_t len, int mode)
1619 {
1620 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1621 	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1622 			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1623 			.m_may_create = true };
1624 	pgoff_t pg_start, pg_end;
1625 	loff_t new_size = i_size_read(inode);
1626 	loff_t off_end;
1627 	block_t expanded = 0;
1628 	int err;
1629 
1630 	err = inode_newsize_ok(inode, (len + offset));
1631 	if (err)
1632 		return err;
1633 
1634 	err = f2fs_convert_inline_inode(inode);
1635 	if (err)
1636 		return err;
1637 
1638 	f2fs_balance_fs(sbi, true);
1639 
1640 	pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1641 	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1642 	off_end = (offset + len) & (PAGE_SIZE - 1);
1643 
1644 	map.m_lblk = pg_start;
1645 	map.m_len = pg_end - pg_start;
1646 	if (off_end)
1647 		map.m_len++;
1648 
1649 	if (!map.m_len)
1650 		return 0;
1651 
1652 	if (f2fs_is_pinned_file(inode)) {
1653 		block_t sec_blks = BLKS_PER_SEC(sbi);
1654 		block_t sec_len = roundup(map.m_len, sec_blks);
1655 
1656 		map.m_len = sec_blks;
1657 next_alloc:
1658 		if (has_not_enough_free_secs(sbi, 0,
1659 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1660 			down_write(&sbi->gc_lock);
1661 			err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1662 			if (err && err != -ENODATA && err != -EAGAIN)
1663 				goto out_err;
1664 		}
1665 
1666 		down_write(&sbi->pin_sem);
1667 
1668 		f2fs_lock_op(sbi);
1669 		f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
1670 		f2fs_unlock_op(sbi);
1671 
1672 		map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1673 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1674 
1675 		up_write(&sbi->pin_sem);
1676 
1677 		expanded += map.m_len;
1678 		sec_len -= map.m_len;
1679 		map.m_lblk += map.m_len;
1680 		if (!err && sec_len)
1681 			goto next_alloc;
1682 
1683 		map.m_len = expanded;
1684 	} else {
1685 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1686 		expanded = map.m_len;
1687 	}
1688 out_err:
1689 	if (err) {
1690 		pgoff_t last_off;
1691 
1692 		if (!expanded)
1693 			return err;
1694 
1695 		last_off = pg_start + expanded - 1;
1696 
1697 		/* update new size to the failed position */
1698 		new_size = (last_off == pg_end) ? offset + len :
1699 					(loff_t)(last_off + 1) << PAGE_SHIFT;
1700 	} else {
1701 		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1702 	}
1703 
1704 	if (new_size > i_size_read(inode)) {
1705 		if (mode & FALLOC_FL_KEEP_SIZE)
1706 			file_set_keep_isize(inode);
1707 		else
1708 			f2fs_i_size_write(inode, new_size);
1709 	}
1710 
1711 	return err;
1712 }
1713 
f2fs_fallocate(struct file * file,int mode,loff_t offset,loff_t len)1714 static long f2fs_fallocate(struct file *file, int mode,
1715 				loff_t offset, loff_t len)
1716 {
1717 	struct inode *inode = file_inode(file);
1718 	long ret = 0;
1719 
1720 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1721 		return -EIO;
1722 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1723 		return -ENOSPC;
1724 	if (!f2fs_is_compress_backend_ready(inode))
1725 		return -EOPNOTSUPP;
1726 
1727 	/* f2fs only support ->fallocate for regular file */
1728 	if (!S_ISREG(inode->i_mode))
1729 		return -EINVAL;
1730 
1731 	if (IS_ENCRYPTED(inode) &&
1732 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1733 		return -EOPNOTSUPP;
1734 
1735 	if (f2fs_compressed_file(inode) &&
1736 		(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1737 			FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1738 		return -EOPNOTSUPP;
1739 
1740 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1741 			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1742 			FALLOC_FL_INSERT_RANGE))
1743 		return -EOPNOTSUPP;
1744 
1745 	inode_lock(inode);
1746 
1747 	ret = file_modified(file);
1748 	if (ret)
1749 		goto out;
1750 
1751 	if (mode & FALLOC_FL_PUNCH_HOLE) {
1752 		if (offset >= inode->i_size)
1753 			goto out;
1754 
1755 		ret = punch_hole(inode, offset, len);
1756 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1757 		ret = f2fs_collapse_range(inode, offset, len);
1758 	} else if (mode & FALLOC_FL_ZERO_RANGE) {
1759 		ret = f2fs_zero_range(inode, offset, len, mode);
1760 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
1761 		ret = f2fs_insert_range(inode, offset, len);
1762 	} else {
1763 		ret = expand_inode_data(inode, offset, len, mode);
1764 	}
1765 
1766 	if (!ret) {
1767 		inode->i_mtime = inode->i_ctime = current_time(inode);
1768 		f2fs_mark_inode_dirty_sync(inode, false);
1769 		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1770 	}
1771 
1772 out:
1773 	inode_unlock(inode);
1774 
1775 	trace_f2fs_fallocate(inode, mode, offset, len, ret);
1776 	return ret;
1777 }
1778 
f2fs_release_file(struct inode * inode,struct file * filp)1779 static int f2fs_release_file(struct inode *inode, struct file *filp)
1780 {
1781 	/*
1782 	 * f2fs_relase_file is called at every close calls. So we should
1783 	 * not drop any inmemory pages by close called by other process.
1784 	 */
1785 	if (!(filp->f_mode & FMODE_WRITE) ||
1786 			atomic_read(&inode->i_writecount) != 1)
1787 		return 0;
1788 
1789 	/* some remained atomic pages should discarded */
1790 	if (f2fs_is_atomic_file(inode))
1791 		f2fs_drop_inmem_pages(inode);
1792 	if (f2fs_is_volatile_file(inode)) {
1793 		set_inode_flag(inode, FI_DROP_CACHE);
1794 		filemap_fdatawrite(inode->i_mapping);
1795 		clear_inode_flag(inode, FI_DROP_CACHE);
1796 		clear_inode_flag(inode, FI_VOLATILE_FILE);
1797 		stat_dec_volatile_write(inode);
1798 	}
1799 	return 0;
1800 }
1801 
f2fs_file_flush(struct file * file,fl_owner_t id)1802 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1803 {
1804 	struct inode *inode = file_inode(file);
1805 
1806 	/*
1807 	 * If the process doing a transaction is crashed, we should do
1808 	 * roll-back. Otherwise, other reader/write can see corrupted database
1809 	 * until all the writers close its file. Since this should be done
1810 	 * before dropping file lock, it needs to do in ->flush.
1811 	 */
1812 	if (f2fs_is_atomic_file(inode) &&
1813 			F2FS_I(inode)->inmem_task == current)
1814 		f2fs_drop_inmem_pages(inode);
1815 	return 0;
1816 }
1817 
f2fs_setflags_common(struct inode * inode,u32 iflags,u32 mask)1818 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1819 {
1820 	struct f2fs_inode_info *fi = F2FS_I(inode);
1821 	u32 masked_flags = fi->i_flags & mask;
1822 
1823 	f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1824 
1825 	/* Is it quota file? Do not allow user to mess with it */
1826 	if (IS_NOQUOTA(inode))
1827 		return -EPERM;
1828 
1829 	if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1830 		if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1831 			return -EOPNOTSUPP;
1832 		if (!f2fs_empty_dir(inode))
1833 			return -ENOTEMPTY;
1834 	}
1835 
1836 	if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1837 		if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1838 			return -EOPNOTSUPP;
1839 		if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1840 			return -EINVAL;
1841 	}
1842 
1843 	if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1844 		if (masked_flags & F2FS_COMPR_FL) {
1845 			if (!f2fs_disable_compressed_file(inode))
1846 				return -EINVAL;
1847 		} else {
1848 			if (!f2fs_may_compress(inode))
1849 				return -EINVAL;
1850 			if (S_ISREG(inode->i_mode) && inode->i_size)
1851 				return -EINVAL;
1852 
1853 			set_compress_context(inode);
1854 		}
1855 	}
1856 
1857 	fi->i_flags = iflags | (fi->i_flags & ~mask);
1858 	f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1859 					(fi->i_flags & F2FS_NOCOMP_FL));
1860 
1861 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
1862 		set_inode_flag(inode, FI_PROJ_INHERIT);
1863 	else
1864 		clear_inode_flag(inode, FI_PROJ_INHERIT);
1865 
1866 	inode->i_ctime = current_time(inode);
1867 	f2fs_set_inode_flags(inode);
1868 	f2fs_mark_inode_dirty_sync(inode, true);
1869 	return 0;
1870 }
1871 
1872 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1873 
1874 /*
1875  * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1876  * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1877  * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1878  * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1879  */
1880 
1881 static const struct {
1882 	u32 iflag;
1883 	u32 fsflag;
1884 } f2fs_fsflags_map[] = {
1885 	{ F2FS_COMPR_FL,	FS_COMPR_FL },
1886 	{ F2FS_SYNC_FL,		FS_SYNC_FL },
1887 	{ F2FS_IMMUTABLE_FL,	FS_IMMUTABLE_FL },
1888 	{ F2FS_APPEND_FL,	FS_APPEND_FL },
1889 	{ F2FS_NODUMP_FL,	FS_NODUMP_FL },
1890 	{ F2FS_NOATIME_FL,	FS_NOATIME_FL },
1891 	{ F2FS_NOCOMP_FL,	FS_NOCOMP_FL },
1892 	{ F2FS_INDEX_FL,	FS_INDEX_FL },
1893 	{ F2FS_DIRSYNC_FL,	FS_DIRSYNC_FL },
1894 	{ F2FS_PROJINHERIT_FL,	FS_PROJINHERIT_FL },
1895 	{ F2FS_CASEFOLD_FL,	FS_CASEFOLD_FL },
1896 };
1897 
1898 #define F2FS_GETTABLE_FS_FL (		\
1899 		FS_COMPR_FL |		\
1900 		FS_SYNC_FL |		\
1901 		FS_IMMUTABLE_FL |	\
1902 		FS_APPEND_FL |		\
1903 		FS_NODUMP_FL |		\
1904 		FS_NOATIME_FL |		\
1905 		FS_NOCOMP_FL |		\
1906 		FS_INDEX_FL |		\
1907 		FS_DIRSYNC_FL |		\
1908 		FS_PROJINHERIT_FL |	\
1909 		FS_ENCRYPT_FL |		\
1910 		FS_INLINE_DATA_FL |	\
1911 		FS_NOCOW_FL |		\
1912 		FS_VERITY_FL |		\
1913 		FS_CASEFOLD_FL)
1914 
1915 #define F2FS_SETTABLE_FS_FL (		\
1916 		FS_COMPR_FL |		\
1917 		FS_SYNC_FL |		\
1918 		FS_IMMUTABLE_FL |	\
1919 		FS_APPEND_FL |		\
1920 		FS_NODUMP_FL |		\
1921 		FS_NOATIME_FL |		\
1922 		FS_NOCOMP_FL |		\
1923 		FS_DIRSYNC_FL |		\
1924 		FS_PROJINHERIT_FL |	\
1925 		FS_CASEFOLD_FL)
1926 
1927 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
f2fs_iflags_to_fsflags(u32 iflags)1928 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1929 {
1930 	u32 fsflags = 0;
1931 	int i;
1932 
1933 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1934 		if (iflags & f2fs_fsflags_map[i].iflag)
1935 			fsflags |= f2fs_fsflags_map[i].fsflag;
1936 
1937 	return fsflags;
1938 }
1939 
1940 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
f2fs_fsflags_to_iflags(u32 fsflags)1941 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1942 {
1943 	u32 iflags = 0;
1944 	int i;
1945 
1946 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1947 		if (fsflags & f2fs_fsflags_map[i].fsflag)
1948 			iflags |= f2fs_fsflags_map[i].iflag;
1949 
1950 	return iflags;
1951 }
1952 
f2fs_ioc_getflags(struct file * filp,unsigned long arg)1953 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1954 {
1955 	struct inode *inode = file_inode(filp);
1956 	struct f2fs_inode_info *fi = F2FS_I(inode);
1957 	u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1958 
1959 	if (IS_ENCRYPTED(inode))
1960 		fsflags |= FS_ENCRYPT_FL;
1961 	if (IS_VERITY(inode))
1962 		fsflags |= FS_VERITY_FL;
1963 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1964 		fsflags |= FS_INLINE_DATA_FL;
1965 	if (is_inode_flag_set(inode, FI_PIN_FILE))
1966 		fsflags |= FS_NOCOW_FL;
1967 
1968 	fsflags &= F2FS_GETTABLE_FS_FL;
1969 
1970 	return put_user(fsflags, (int __user *)arg);
1971 }
1972 
f2fs_ioc_setflags(struct file * filp,unsigned long arg)1973 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1974 {
1975 	struct inode *inode = file_inode(filp);
1976 	struct f2fs_inode_info *fi = F2FS_I(inode);
1977 	u32 fsflags, old_fsflags;
1978 	u32 iflags;
1979 	int ret;
1980 
1981 	if (!inode_owner_or_capable(inode))
1982 		return -EACCES;
1983 
1984 	if (get_user(fsflags, (int __user *)arg))
1985 		return -EFAULT;
1986 
1987 	if (fsflags & ~F2FS_GETTABLE_FS_FL)
1988 		return -EOPNOTSUPP;
1989 	fsflags &= F2FS_SETTABLE_FS_FL;
1990 
1991 	iflags = f2fs_fsflags_to_iflags(fsflags);
1992 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1993 		return -EOPNOTSUPP;
1994 
1995 	ret = mnt_want_write_file(filp);
1996 	if (ret)
1997 		return ret;
1998 
1999 	inode_lock(inode);
2000 
2001 	old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2002 	ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2003 	if (ret)
2004 		goto out;
2005 
2006 	ret = f2fs_setflags_common(inode, iflags,
2007 			f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2008 out:
2009 	inode_unlock(inode);
2010 	mnt_drop_write_file(filp);
2011 	return ret;
2012 }
2013 
f2fs_ioc_getversion(struct file * filp,unsigned long arg)2014 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2015 {
2016 	struct inode *inode = file_inode(filp);
2017 
2018 	return put_user(inode->i_generation, (int __user *)arg);
2019 }
2020 
f2fs_ioc_start_atomic_write(struct file * filp)2021 static int f2fs_ioc_start_atomic_write(struct file *filp)
2022 {
2023 	struct inode *inode = file_inode(filp);
2024 	struct f2fs_inode_info *fi = F2FS_I(inode);
2025 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2026 	int ret;
2027 
2028 	if (!inode_owner_or_capable(inode))
2029 		return -EACCES;
2030 
2031 	if (!S_ISREG(inode->i_mode))
2032 		return -EINVAL;
2033 
2034 	if (filp->f_flags & O_DIRECT)
2035 		return -EINVAL;
2036 
2037 	ret = mnt_want_write_file(filp);
2038 	if (ret)
2039 		return ret;
2040 
2041 	inode_lock(inode);
2042 
2043 	if (!f2fs_disable_compressed_file(inode)) {
2044 		ret = -EINVAL;
2045 		goto out;
2046 	}
2047 
2048 	if (f2fs_is_atomic_file(inode)) {
2049 		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2050 			ret = -EINVAL;
2051 		goto out;
2052 	}
2053 
2054 	ret = f2fs_convert_inline_inode(inode);
2055 	if (ret)
2056 		goto out;
2057 
2058 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2059 
2060 	/*
2061 	 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2062 	 * f2fs_is_atomic_file.
2063 	 */
2064 	if (get_dirty_pages(inode))
2065 		f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2066 			  inode->i_ino, get_dirty_pages(inode));
2067 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2068 	if (ret) {
2069 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2070 		goto out;
2071 	}
2072 
2073 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2074 	if (list_empty(&fi->inmem_ilist))
2075 		list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2076 	sbi->atomic_files++;
2077 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2078 
2079 	/* add inode in inmem_list first and set atomic_file */
2080 	set_inode_flag(inode, FI_ATOMIC_FILE);
2081 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2082 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2083 
2084 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2085 	F2FS_I(inode)->inmem_task = current;
2086 	stat_update_max_atomic_write(inode);
2087 out:
2088 	inode_unlock(inode);
2089 	mnt_drop_write_file(filp);
2090 	return ret;
2091 }
2092 
f2fs_ioc_commit_atomic_write(struct file * filp)2093 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2094 {
2095 	struct inode *inode = file_inode(filp);
2096 	int ret;
2097 
2098 	if (!inode_owner_or_capable(inode))
2099 		return -EACCES;
2100 
2101 	ret = mnt_want_write_file(filp);
2102 	if (ret)
2103 		return ret;
2104 
2105 	f2fs_balance_fs(F2FS_I_SB(inode), true);
2106 
2107 	inode_lock(inode);
2108 
2109 	if (f2fs_is_volatile_file(inode)) {
2110 		ret = -EINVAL;
2111 		goto err_out;
2112 	}
2113 
2114 	if (f2fs_is_atomic_file(inode)) {
2115 		ret = f2fs_commit_inmem_pages(inode);
2116 		if (ret)
2117 			goto err_out;
2118 
2119 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2120 		if (!ret)
2121 			f2fs_drop_inmem_pages(inode);
2122 	} else {
2123 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2124 	}
2125 err_out:
2126 	if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2127 		clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2128 		ret = -EINVAL;
2129 	}
2130 	inode_unlock(inode);
2131 	mnt_drop_write_file(filp);
2132 	return ret;
2133 }
2134 
f2fs_ioc_start_volatile_write(struct file * filp)2135 static int f2fs_ioc_start_volatile_write(struct file *filp)
2136 {
2137 	struct inode *inode = file_inode(filp);
2138 	int ret;
2139 
2140 	if (!inode_owner_or_capable(inode))
2141 		return -EACCES;
2142 
2143 	if (!S_ISREG(inode->i_mode))
2144 		return -EINVAL;
2145 
2146 	ret = mnt_want_write_file(filp);
2147 	if (ret)
2148 		return ret;
2149 
2150 	inode_lock(inode);
2151 
2152 	if (f2fs_is_volatile_file(inode))
2153 		goto out;
2154 
2155 	ret = f2fs_convert_inline_inode(inode);
2156 	if (ret)
2157 		goto out;
2158 
2159 	stat_inc_volatile_write(inode);
2160 	stat_update_max_volatile_write(inode);
2161 
2162 	set_inode_flag(inode, FI_VOLATILE_FILE);
2163 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2164 out:
2165 	inode_unlock(inode);
2166 	mnt_drop_write_file(filp);
2167 	return ret;
2168 }
2169 
f2fs_ioc_release_volatile_write(struct file * filp)2170 static int f2fs_ioc_release_volatile_write(struct file *filp)
2171 {
2172 	struct inode *inode = file_inode(filp);
2173 	int ret;
2174 
2175 	if (!inode_owner_or_capable(inode))
2176 		return -EACCES;
2177 
2178 	ret = mnt_want_write_file(filp);
2179 	if (ret)
2180 		return ret;
2181 
2182 	inode_lock(inode);
2183 
2184 	if (!f2fs_is_volatile_file(inode))
2185 		goto out;
2186 
2187 	if (!f2fs_is_first_block_written(inode)) {
2188 		ret = truncate_partial_data_page(inode, 0, true);
2189 		goto out;
2190 	}
2191 
2192 	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2193 out:
2194 	inode_unlock(inode);
2195 	mnt_drop_write_file(filp);
2196 	return ret;
2197 }
2198 
f2fs_ioc_abort_volatile_write(struct file * filp)2199 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2200 {
2201 	struct inode *inode = file_inode(filp);
2202 	int ret;
2203 
2204 	if (!inode_owner_or_capable(inode))
2205 		return -EACCES;
2206 
2207 	ret = mnt_want_write_file(filp);
2208 	if (ret)
2209 		return ret;
2210 
2211 	inode_lock(inode);
2212 
2213 	if (f2fs_is_atomic_file(inode))
2214 		f2fs_drop_inmem_pages(inode);
2215 	if (f2fs_is_volatile_file(inode)) {
2216 		clear_inode_flag(inode, FI_VOLATILE_FILE);
2217 		stat_dec_volatile_write(inode);
2218 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2219 	}
2220 
2221 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2222 
2223 	inode_unlock(inode);
2224 
2225 	mnt_drop_write_file(filp);
2226 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2227 	return ret;
2228 }
2229 
f2fs_ioc_shutdown(struct file * filp,unsigned long arg)2230 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2231 {
2232 	struct inode *inode = file_inode(filp);
2233 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2234 	struct super_block *sb = sbi->sb;
2235 	__u32 in;
2236 	int ret = 0;
2237 
2238 	if (!capable(CAP_SYS_ADMIN))
2239 		return -EPERM;
2240 
2241 	if (get_user(in, (__u32 __user *)arg))
2242 		return -EFAULT;
2243 
2244 	if (in != F2FS_GOING_DOWN_FULLSYNC) {
2245 		ret = mnt_want_write_file(filp);
2246 		if (ret) {
2247 			if (ret == -EROFS) {
2248 				ret = 0;
2249 				f2fs_stop_checkpoint(sbi, false);
2250 				set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2251 				trace_f2fs_shutdown(sbi, in, ret);
2252 			}
2253 			return ret;
2254 		}
2255 	}
2256 
2257 	switch (in) {
2258 	case F2FS_GOING_DOWN_FULLSYNC:
2259 		sb = freeze_bdev(sb->s_bdev);
2260 		if (IS_ERR(sb)) {
2261 			ret = PTR_ERR(sb);
2262 			goto out;
2263 		}
2264 		if (sb) {
2265 			f2fs_stop_checkpoint(sbi, false);
2266 			set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2267 			thaw_bdev(sb->s_bdev, sb);
2268 		}
2269 		break;
2270 	case F2FS_GOING_DOWN_METASYNC:
2271 		/* do checkpoint only */
2272 		ret = f2fs_sync_fs(sb, 1);
2273 		if (ret)
2274 			goto out;
2275 		f2fs_stop_checkpoint(sbi, false);
2276 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2277 		break;
2278 	case F2FS_GOING_DOWN_NOSYNC:
2279 		f2fs_stop_checkpoint(sbi, false);
2280 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2281 		break;
2282 	case F2FS_GOING_DOWN_METAFLUSH:
2283 		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2284 		f2fs_stop_checkpoint(sbi, false);
2285 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2286 		break;
2287 	case F2FS_GOING_DOWN_NEED_FSCK:
2288 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2289 		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2290 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2291 		/* do checkpoint only */
2292 		ret = f2fs_sync_fs(sb, 1);
2293 		goto out;
2294 	default:
2295 		ret = -EINVAL;
2296 		goto out;
2297 	}
2298 
2299 	f2fs_stop_gc_thread(sbi);
2300 	f2fs_stop_discard_thread(sbi);
2301 
2302 	f2fs_drop_discard_cmd(sbi);
2303 	clear_opt(sbi, DISCARD);
2304 
2305 	f2fs_update_time(sbi, REQ_TIME);
2306 out:
2307 	if (in != F2FS_GOING_DOWN_FULLSYNC)
2308 		mnt_drop_write_file(filp);
2309 
2310 	trace_f2fs_shutdown(sbi, in, ret);
2311 
2312 	return ret;
2313 }
2314 
f2fs_ioc_fitrim(struct file * filp,unsigned long arg)2315 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2316 {
2317 	struct inode *inode = file_inode(filp);
2318 	struct super_block *sb = inode->i_sb;
2319 	struct request_queue *q = bdev_get_queue(sb->s_bdev);
2320 	struct fstrim_range range;
2321 	int ret;
2322 
2323 	if (!capable(CAP_SYS_ADMIN))
2324 		return -EPERM;
2325 
2326 	if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2327 		return -EOPNOTSUPP;
2328 
2329 	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2330 				sizeof(range)))
2331 		return -EFAULT;
2332 
2333 	ret = mnt_want_write_file(filp);
2334 	if (ret)
2335 		return ret;
2336 
2337 	range.minlen = max((unsigned int)range.minlen,
2338 				q->limits.discard_granularity);
2339 	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2340 	mnt_drop_write_file(filp);
2341 	if (ret < 0)
2342 		return ret;
2343 
2344 	if (copy_to_user((struct fstrim_range __user *)arg, &range,
2345 				sizeof(range)))
2346 		return -EFAULT;
2347 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2348 	return 0;
2349 }
2350 
uuid_is_nonzero(__u8 u[16])2351 static bool uuid_is_nonzero(__u8 u[16])
2352 {
2353 	int i;
2354 
2355 	for (i = 0; i < 16; i++)
2356 		if (u[i])
2357 			return true;
2358 	return false;
2359 }
2360 
f2fs_ioc_set_encryption_policy(struct file * filp,unsigned long arg)2361 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2362 {
2363 	struct inode *inode = file_inode(filp);
2364 
2365 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2366 		return -EOPNOTSUPP;
2367 
2368 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2369 
2370 	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2371 }
2372 
f2fs_ioc_get_encryption_policy(struct file * filp,unsigned long arg)2373 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2374 {
2375 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2376 		return -EOPNOTSUPP;
2377 	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2378 }
2379 
f2fs_ioc_get_encryption_pwsalt(struct file * filp,unsigned long arg)2380 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2381 {
2382 	struct inode *inode = file_inode(filp);
2383 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2384 	int err;
2385 
2386 	if (!f2fs_sb_has_encrypt(sbi))
2387 		return -EOPNOTSUPP;
2388 
2389 	err = mnt_want_write_file(filp);
2390 	if (err)
2391 		return err;
2392 
2393 	down_write(&sbi->sb_lock);
2394 
2395 	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2396 		goto got_it;
2397 
2398 	/* update superblock with uuid */
2399 	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2400 
2401 	err = f2fs_commit_super(sbi, false);
2402 	if (err) {
2403 		/* undo new data */
2404 		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2405 		goto out_err;
2406 	}
2407 got_it:
2408 	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2409 									16))
2410 		err = -EFAULT;
2411 out_err:
2412 	up_write(&sbi->sb_lock);
2413 	mnt_drop_write_file(filp);
2414 	return err;
2415 }
2416 
f2fs_ioc_get_encryption_policy_ex(struct file * filp,unsigned long arg)2417 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2418 					     unsigned long arg)
2419 {
2420 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2421 		return -EOPNOTSUPP;
2422 
2423 	return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2424 }
2425 
f2fs_ioc_add_encryption_key(struct file * filp,unsigned long arg)2426 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2427 {
2428 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2429 		return -EOPNOTSUPP;
2430 
2431 	return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2432 }
2433 
f2fs_ioc_remove_encryption_key(struct file * filp,unsigned long arg)2434 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2435 {
2436 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2437 		return -EOPNOTSUPP;
2438 
2439 	return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2440 }
2441 
f2fs_ioc_remove_encryption_key_all_users(struct file * filp,unsigned long arg)2442 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2443 						    unsigned long arg)
2444 {
2445 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2446 		return -EOPNOTSUPP;
2447 
2448 	return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2449 }
2450 
f2fs_ioc_get_encryption_key_status(struct file * filp,unsigned long arg)2451 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2452 					      unsigned long arg)
2453 {
2454 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2455 		return -EOPNOTSUPP;
2456 
2457 	return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2458 }
2459 
f2fs_ioc_get_encryption_nonce(struct file * filp,unsigned long arg)2460 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2461 {
2462 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2463 		return -EOPNOTSUPP;
2464 
2465 	return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2466 }
2467 
f2fs_ioc_gc(struct file * filp,unsigned long arg)2468 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2469 {
2470 	struct inode *inode = file_inode(filp);
2471 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2472 	__u32 sync;
2473 	int ret;
2474 
2475 	if (!capable(CAP_SYS_ADMIN))
2476 		return -EPERM;
2477 
2478 	if (get_user(sync, (__u32 __user *)arg))
2479 		return -EFAULT;
2480 
2481 	if (f2fs_readonly(sbi->sb))
2482 		return -EROFS;
2483 
2484 	ret = mnt_want_write_file(filp);
2485 	if (ret)
2486 		return ret;
2487 
2488 	if (!sync) {
2489 		if (!down_write_trylock(&sbi->gc_lock)) {
2490 			ret = -EBUSY;
2491 			goto out;
2492 		}
2493 	} else {
2494 		down_write(&sbi->gc_lock);
2495 	}
2496 
2497 	ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2498 out:
2499 	mnt_drop_write_file(filp);
2500 	return ret;
2501 }
2502 
__f2fs_ioc_gc_range(struct file * filp,struct f2fs_gc_range * range)2503 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2504 {
2505 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2506 	u64 end;
2507 	int ret;
2508 
2509 	if (!capable(CAP_SYS_ADMIN))
2510 		return -EPERM;
2511 	if (f2fs_readonly(sbi->sb))
2512 		return -EROFS;
2513 
2514 	end = range->start + range->len;
2515 	if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2516 					end >= MAX_BLKADDR(sbi))
2517 		return -EINVAL;
2518 
2519 	ret = mnt_want_write_file(filp);
2520 	if (ret)
2521 		return ret;
2522 
2523 do_more:
2524 	if (!range->sync) {
2525 		if (!down_write_trylock(&sbi->gc_lock)) {
2526 			ret = -EBUSY;
2527 			goto out;
2528 		}
2529 	} else {
2530 		down_write(&sbi->gc_lock);
2531 	}
2532 
2533 	ret = f2fs_gc(sbi, range->sync, true, false,
2534 				GET_SEGNO(sbi, range->start));
2535 	if (ret) {
2536 		if (ret == -EBUSY)
2537 			ret = -EAGAIN;
2538 		goto out;
2539 	}
2540 	range->start += BLKS_PER_SEC(sbi);
2541 	if (range->start <= end)
2542 		goto do_more;
2543 out:
2544 	mnt_drop_write_file(filp);
2545 	return ret;
2546 }
2547 
f2fs_ioc_gc_range(struct file * filp,unsigned long arg)2548 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2549 {
2550 	struct f2fs_gc_range range;
2551 
2552 	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2553 							sizeof(range)))
2554 		return -EFAULT;
2555 	return __f2fs_ioc_gc_range(filp, &range);
2556 }
2557 
f2fs_ioc_write_checkpoint(struct file * filp,unsigned long arg)2558 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2559 {
2560 	struct inode *inode = file_inode(filp);
2561 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2562 	int ret;
2563 
2564 	if (!capable(CAP_SYS_ADMIN))
2565 		return -EPERM;
2566 
2567 	if (f2fs_readonly(sbi->sb))
2568 		return -EROFS;
2569 
2570 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2571 		f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2572 		return -EINVAL;
2573 	}
2574 
2575 	ret = mnt_want_write_file(filp);
2576 	if (ret)
2577 		return ret;
2578 
2579 	ret = f2fs_sync_fs(sbi->sb, 1);
2580 
2581 	mnt_drop_write_file(filp);
2582 	return ret;
2583 }
2584 
f2fs_defragment_range(struct f2fs_sb_info * sbi,struct file * filp,struct f2fs_defragment * range)2585 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2586 					struct file *filp,
2587 					struct f2fs_defragment *range)
2588 {
2589 	struct inode *inode = file_inode(filp);
2590 	struct f2fs_map_blocks map = { .m_next_extent = NULL,
2591 					.m_seg_type = NO_CHECK_TYPE ,
2592 					.m_may_create = false };
2593 	struct extent_info ei = {0, 0, 0};
2594 	pgoff_t pg_start, pg_end, next_pgofs;
2595 	unsigned int blk_per_seg = sbi->blocks_per_seg;
2596 	unsigned int total = 0, sec_num;
2597 	block_t blk_end = 0;
2598 	bool fragmented = false;
2599 	int err;
2600 
2601 	/* if in-place-update policy is enabled, don't waste time here */
2602 	if (f2fs_should_update_inplace(inode, NULL))
2603 		return -EINVAL;
2604 
2605 	pg_start = range->start >> PAGE_SHIFT;
2606 	pg_end = (range->start + range->len) >> PAGE_SHIFT;
2607 
2608 	f2fs_balance_fs(sbi, true);
2609 
2610 	inode_lock(inode);
2611 
2612 	/* writeback all dirty pages in the range */
2613 	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2614 						range->start + range->len - 1);
2615 	if (err)
2616 		goto out;
2617 
2618 	/*
2619 	 * lookup mapping info in extent cache, skip defragmenting if physical
2620 	 * block addresses are continuous.
2621 	 */
2622 	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2623 		if (ei.fofs + ei.len >= pg_end)
2624 			goto out;
2625 	}
2626 
2627 	map.m_lblk = pg_start;
2628 	map.m_next_pgofs = &next_pgofs;
2629 
2630 	/*
2631 	 * lookup mapping info in dnode page cache, skip defragmenting if all
2632 	 * physical block addresses are continuous even if there are hole(s)
2633 	 * in logical blocks.
2634 	 */
2635 	while (map.m_lblk < pg_end) {
2636 		map.m_len = pg_end - map.m_lblk;
2637 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2638 		if (err)
2639 			goto out;
2640 
2641 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2642 			map.m_lblk = next_pgofs;
2643 			continue;
2644 		}
2645 
2646 		if (blk_end && blk_end != map.m_pblk)
2647 			fragmented = true;
2648 
2649 		/* record total count of block that we're going to move */
2650 		total += map.m_len;
2651 
2652 		blk_end = map.m_pblk + map.m_len;
2653 
2654 		map.m_lblk += map.m_len;
2655 	}
2656 
2657 	if (!fragmented) {
2658 		total = 0;
2659 		goto out;
2660 	}
2661 
2662 	sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2663 
2664 	/*
2665 	 * make sure there are enough free section for LFS allocation, this can
2666 	 * avoid defragment running in SSR mode when free section are allocated
2667 	 * intensively
2668 	 */
2669 	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2670 		err = -EAGAIN;
2671 		goto out;
2672 	}
2673 
2674 	map.m_lblk = pg_start;
2675 	map.m_len = pg_end - pg_start;
2676 	total = 0;
2677 
2678 	while (map.m_lblk < pg_end) {
2679 		pgoff_t idx;
2680 		int cnt = 0;
2681 
2682 do_map:
2683 		map.m_len = pg_end - map.m_lblk;
2684 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2685 		if (err)
2686 			goto clear_out;
2687 
2688 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2689 			map.m_lblk = next_pgofs;
2690 			goto check;
2691 		}
2692 
2693 		set_inode_flag(inode, FI_DO_DEFRAG);
2694 
2695 		idx = map.m_lblk;
2696 		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2697 			struct page *page;
2698 
2699 			page = f2fs_get_lock_data_page(inode, idx, true);
2700 			if (IS_ERR(page)) {
2701 				err = PTR_ERR(page);
2702 				goto clear_out;
2703 			}
2704 
2705 			set_page_dirty(page);
2706 			f2fs_put_page(page, 1);
2707 
2708 			idx++;
2709 			cnt++;
2710 			total++;
2711 		}
2712 
2713 		map.m_lblk = idx;
2714 check:
2715 		if (map.m_lblk < pg_end && cnt < blk_per_seg)
2716 			goto do_map;
2717 
2718 		clear_inode_flag(inode, FI_DO_DEFRAG);
2719 
2720 		err = filemap_fdatawrite(inode->i_mapping);
2721 		if (err)
2722 			goto out;
2723 	}
2724 clear_out:
2725 	clear_inode_flag(inode, FI_DO_DEFRAG);
2726 out:
2727 	inode_unlock(inode);
2728 	if (!err)
2729 		range->len = (u64)total << PAGE_SHIFT;
2730 	return err;
2731 }
2732 
f2fs_ioc_defragment(struct file * filp,unsigned long arg)2733 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2734 {
2735 	struct inode *inode = file_inode(filp);
2736 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2737 	struct f2fs_defragment range;
2738 	int err;
2739 
2740 	if (!capable(CAP_SYS_ADMIN))
2741 		return -EPERM;
2742 
2743 	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2744 		return -EINVAL;
2745 
2746 	if (f2fs_readonly(sbi->sb))
2747 		return -EROFS;
2748 
2749 	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2750 							sizeof(range)))
2751 		return -EFAULT;
2752 
2753 	/* verify alignment of offset & size */
2754 	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2755 		return -EINVAL;
2756 
2757 	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2758 					sbi->max_file_blocks))
2759 		return -EINVAL;
2760 
2761 	err = mnt_want_write_file(filp);
2762 	if (err)
2763 		return err;
2764 
2765 	err = f2fs_defragment_range(sbi, filp, &range);
2766 	mnt_drop_write_file(filp);
2767 
2768 	f2fs_update_time(sbi, REQ_TIME);
2769 	if (err < 0)
2770 		return err;
2771 
2772 	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2773 							sizeof(range)))
2774 		return -EFAULT;
2775 
2776 	return 0;
2777 }
2778 
f2fs_move_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t len)2779 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2780 			struct file *file_out, loff_t pos_out, size_t len)
2781 {
2782 	struct inode *src = file_inode(file_in);
2783 	struct inode *dst = file_inode(file_out);
2784 	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2785 	size_t olen = len, dst_max_i_size = 0;
2786 	size_t dst_osize;
2787 	int ret;
2788 
2789 	if (file_in->f_path.mnt != file_out->f_path.mnt ||
2790 				src->i_sb != dst->i_sb)
2791 		return -EXDEV;
2792 
2793 	if (unlikely(f2fs_readonly(src->i_sb)))
2794 		return -EROFS;
2795 
2796 	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2797 		return -EINVAL;
2798 
2799 	if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2800 		return -EOPNOTSUPP;
2801 
2802 	if (pos_out < 0 || pos_in < 0)
2803 		return -EINVAL;
2804 
2805 	if (src == dst) {
2806 		if (pos_in == pos_out)
2807 			return 0;
2808 		if (pos_out > pos_in && pos_out < pos_in + len)
2809 			return -EINVAL;
2810 	}
2811 
2812 	inode_lock(src);
2813 	if (src != dst) {
2814 		ret = -EBUSY;
2815 		if (!inode_trylock(dst))
2816 			goto out;
2817 	}
2818 
2819 	ret = -EINVAL;
2820 	if (pos_in + len > src->i_size || pos_in + len < pos_in)
2821 		goto out_unlock;
2822 	if (len == 0)
2823 		olen = len = src->i_size - pos_in;
2824 	if (pos_in + len == src->i_size)
2825 		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2826 	if (len == 0) {
2827 		ret = 0;
2828 		goto out_unlock;
2829 	}
2830 
2831 	dst_osize = dst->i_size;
2832 	if (pos_out + olen > dst->i_size)
2833 		dst_max_i_size = pos_out + olen;
2834 
2835 	/* verify the end result is block aligned */
2836 	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2837 			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2838 			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2839 		goto out_unlock;
2840 
2841 	ret = f2fs_convert_inline_inode(src);
2842 	if (ret)
2843 		goto out_unlock;
2844 
2845 	ret = f2fs_convert_inline_inode(dst);
2846 	if (ret)
2847 		goto out_unlock;
2848 
2849 	/* write out all dirty pages from offset */
2850 	ret = filemap_write_and_wait_range(src->i_mapping,
2851 					pos_in, pos_in + len);
2852 	if (ret)
2853 		goto out_unlock;
2854 
2855 	ret = filemap_write_and_wait_range(dst->i_mapping,
2856 					pos_out, pos_out + len);
2857 	if (ret)
2858 		goto out_unlock;
2859 
2860 	f2fs_balance_fs(sbi, true);
2861 
2862 	down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2863 	if (src != dst) {
2864 		ret = -EBUSY;
2865 		if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2866 			goto out_src;
2867 	}
2868 
2869 	f2fs_lock_op(sbi);
2870 	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2871 				pos_out >> F2FS_BLKSIZE_BITS,
2872 				len >> F2FS_BLKSIZE_BITS, false);
2873 
2874 	if (!ret) {
2875 		if (dst_max_i_size)
2876 			f2fs_i_size_write(dst, dst_max_i_size);
2877 		else if (dst_osize != dst->i_size)
2878 			f2fs_i_size_write(dst, dst_osize);
2879 	}
2880 	f2fs_unlock_op(sbi);
2881 
2882 	if (src != dst)
2883 		up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2884 out_src:
2885 	up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2886 out_unlock:
2887 	if (src != dst)
2888 		inode_unlock(dst);
2889 out:
2890 	inode_unlock(src);
2891 	return ret;
2892 }
2893 
__f2fs_ioc_move_range(struct file * filp,struct f2fs_move_range * range)2894 static int __f2fs_ioc_move_range(struct file *filp,
2895 				struct f2fs_move_range *range)
2896 {
2897 	struct fd dst;
2898 	int err;
2899 
2900 	if (!(filp->f_mode & FMODE_READ) ||
2901 			!(filp->f_mode & FMODE_WRITE))
2902 		return -EBADF;
2903 
2904 	dst = fdget(range->dst_fd);
2905 	if (!dst.file)
2906 		return -EBADF;
2907 
2908 	if (!(dst.file->f_mode & FMODE_WRITE)) {
2909 		err = -EBADF;
2910 		goto err_out;
2911 	}
2912 
2913 	err = mnt_want_write_file(filp);
2914 	if (err)
2915 		goto err_out;
2916 
2917 	err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2918 					range->pos_out, range->len);
2919 
2920 	mnt_drop_write_file(filp);
2921 err_out:
2922 	fdput(dst);
2923 	return err;
2924 }
2925 
f2fs_ioc_move_range(struct file * filp,unsigned long arg)2926 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2927 {
2928 	struct f2fs_move_range range;
2929 
2930 	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2931 							sizeof(range)))
2932 		return -EFAULT;
2933 	return __f2fs_ioc_move_range(filp, &range);
2934 }
2935 
f2fs_ioc_flush_device(struct file * filp,unsigned long arg)2936 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2937 {
2938 	struct inode *inode = file_inode(filp);
2939 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2940 	struct sit_info *sm = SIT_I(sbi);
2941 	unsigned int start_segno = 0, end_segno = 0;
2942 	unsigned int dev_start_segno = 0, dev_end_segno = 0;
2943 	struct f2fs_flush_device range;
2944 	int ret;
2945 
2946 	if (!capable(CAP_SYS_ADMIN))
2947 		return -EPERM;
2948 
2949 	if (f2fs_readonly(sbi->sb))
2950 		return -EROFS;
2951 
2952 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2953 		return -EINVAL;
2954 
2955 	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2956 							sizeof(range)))
2957 		return -EFAULT;
2958 
2959 	if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2960 			__is_large_section(sbi)) {
2961 		f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2962 			  range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2963 		return -EINVAL;
2964 	}
2965 
2966 	ret = mnt_want_write_file(filp);
2967 	if (ret)
2968 		return ret;
2969 
2970 	if (range.dev_num != 0)
2971 		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2972 	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2973 
2974 	start_segno = sm->last_victim[FLUSH_DEVICE];
2975 	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2976 		start_segno = dev_start_segno;
2977 	end_segno = min(start_segno + range.segments, dev_end_segno);
2978 
2979 	while (start_segno < end_segno) {
2980 		if (!down_write_trylock(&sbi->gc_lock)) {
2981 			ret = -EBUSY;
2982 			goto out;
2983 		}
2984 		sm->last_victim[GC_CB] = end_segno + 1;
2985 		sm->last_victim[GC_GREEDY] = end_segno + 1;
2986 		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2987 		ret = f2fs_gc(sbi, true, true, true, start_segno);
2988 		if (ret == -EAGAIN)
2989 			ret = 0;
2990 		else if (ret < 0)
2991 			break;
2992 		start_segno++;
2993 	}
2994 out:
2995 	mnt_drop_write_file(filp);
2996 	return ret;
2997 }
2998 
f2fs_ioc_get_features(struct file * filp,unsigned long arg)2999 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3000 {
3001 	struct inode *inode = file_inode(filp);
3002 	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3003 
3004 	/* Must validate to set it with SQLite behavior in Android. */
3005 	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3006 
3007 	return put_user(sb_feature, (u32 __user *)arg);
3008 }
3009 
3010 #ifdef CONFIG_QUOTA
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3011 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3012 {
3013 	struct dquot *transfer_to[MAXQUOTAS] = {};
3014 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3015 	struct super_block *sb = sbi->sb;
3016 	int err = 0;
3017 
3018 	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3019 	if (!IS_ERR(transfer_to[PRJQUOTA])) {
3020 		err = __dquot_transfer(inode, transfer_to);
3021 		if (err)
3022 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3023 		dqput(transfer_to[PRJQUOTA]);
3024 	}
3025 	return err;
3026 }
3027 
f2fs_ioc_setproject(struct file * filp,__u32 projid)3028 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3029 {
3030 	struct inode *inode = file_inode(filp);
3031 	struct f2fs_inode_info *fi = F2FS_I(inode);
3032 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3033 	struct page *ipage;
3034 	kprojid_t kprojid;
3035 	int err;
3036 
3037 	if (!f2fs_sb_has_project_quota(sbi)) {
3038 		if (projid != F2FS_DEF_PROJID)
3039 			return -EOPNOTSUPP;
3040 		else
3041 			return 0;
3042 	}
3043 
3044 	if (!f2fs_has_extra_attr(inode))
3045 		return -EOPNOTSUPP;
3046 
3047 	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3048 
3049 	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3050 		return 0;
3051 
3052 	err = -EPERM;
3053 	/* Is it quota file? Do not allow user to mess with it */
3054 	if (IS_NOQUOTA(inode))
3055 		return err;
3056 
3057 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
3058 	if (IS_ERR(ipage))
3059 		return PTR_ERR(ipage);
3060 
3061 	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3062 								i_projid)) {
3063 		err = -EOVERFLOW;
3064 		f2fs_put_page(ipage, 1);
3065 		return err;
3066 	}
3067 	f2fs_put_page(ipage, 1);
3068 
3069 	err = dquot_initialize(inode);
3070 	if (err)
3071 		return err;
3072 
3073 	f2fs_lock_op(sbi);
3074 	err = f2fs_transfer_project_quota(inode, kprojid);
3075 	if (err)
3076 		goto out_unlock;
3077 
3078 	F2FS_I(inode)->i_projid = kprojid;
3079 	inode->i_ctime = current_time(inode);
3080 	f2fs_mark_inode_dirty_sync(inode, true);
3081 out_unlock:
3082 	f2fs_unlock_op(sbi);
3083 	return err;
3084 }
3085 #else
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3086 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3087 {
3088 	return 0;
3089 }
3090 
f2fs_ioc_setproject(struct file * filp,__u32 projid)3091 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3092 {
3093 	if (projid != F2FS_DEF_PROJID)
3094 		return -EOPNOTSUPP;
3095 	return 0;
3096 }
3097 #endif
3098 
3099 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3100 
3101 /*
3102  * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3103  * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3104  * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3105  */
3106 
3107 static const struct {
3108 	u32 iflag;
3109 	u32 xflag;
3110 } f2fs_xflags_map[] = {
3111 	{ F2FS_SYNC_FL,		FS_XFLAG_SYNC },
3112 	{ F2FS_IMMUTABLE_FL,	FS_XFLAG_IMMUTABLE },
3113 	{ F2FS_APPEND_FL,	FS_XFLAG_APPEND },
3114 	{ F2FS_NODUMP_FL,	FS_XFLAG_NODUMP },
3115 	{ F2FS_NOATIME_FL,	FS_XFLAG_NOATIME },
3116 	{ F2FS_PROJINHERIT_FL,	FS_XFLAG_PROJINHERIT },
3117 };
3118 
3119 #define F2FS_SUPPORTED_XFLAGS (		\
3120 		FS_XFLAG_SYNC |		\
3121 		FS_XFLAG_IMMUTABLE |	\
3122 		FS_XFLAG_APPEND |	\
3123 		FS_XFLAG_NODUMP |	\
3124 		FS_XFLAG_NOATIME |	\
3125 		FS_XFLAG_PROJINHERIT)
3126 
3127 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
f2fs_iflags_to_xflags(u32 iflags)3128 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3129 {
3130 	u32 xflags = 0;
3131 	int i;
3132 
3133 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3134 		if (iflags & f2fs_xflags_map[i].iflag)
3135 			xflags |= f2fs_xflags_map[i].xflag;
3136 
3137 	return xflags;
3138 }
3139 
3140 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
f2fs_xflags_to_iflags(u32 xflags)3141 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3142 {
3143 	u32 iflags = 0;
3144 	int i;
3145 
3146 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3147 		if (xflags & f2fs_xflags_map[i].xflag)
3148 			iflags |= f2fs_xflags_map[i].iflag;
3149 
3150 	return iflags;
3151 }
3152 
f2fs_fill_fsxattr(struct inode * inode,struct fsxattr * fa)3153 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3154 {
3155 	struct f2fs_inode_info *fi = F2FS_I(inode);
3156 
3157 	simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3158 
3159 	if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3160 		fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3161 }
3162 
f2fs_ioc_fsgetxattr(struct file * filp,unsigned long arg)3163 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3164 {
3165 	struct inode *inode = file_inode(filp);
3166 	struct fsxattr fa;
3167 
3168 	f2fs_fill_fsxattr(inode, &fa);
3169 
3170 	if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3171 		return -EFAULT;
3172 	return 0;
3173 }
3174 
f2fs_ioc_fssetxattr(struct file * filp,unsigned long arg)3175 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3176 {
3177 	struct inode *inode = file_inode(filp);
3178 	struct fsxattr fa, old_fa;
3179 	u32 iflags;
3180 	int err;
3181 
3182 	if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3183 		return -EFAULT;
3184 
3185 	/* Make sure caller has proper permission */
3186 	if (!inode_owner_or_capable(inode))
3187 		return -EACCES;
3188 
3189 	if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3190 		return -EOPNOTSUPP;
3191 
3192 	iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3193 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3194 		return -EOPNOTSUPP;
3195 
3196 	err = mnt_want_write_file(filp);
3197 	if (err)
3198 		return err;
3199 
3200 	inode_lock(inode);
3201 
3202 	f2fs_fill_fsxattr(inode, &old_fa);
3203 	err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3204 	if (err)
3205 		goto out;
3206 
3207 	err = f2fs_setflags_common(inode, iflags,
3208 			f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3209 	if (err)
3210 		goto out;
3211 
3212 	err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3213 out:
3214 	inode_unlock(inode);
3215 	mnt_drop_write_file(filp);
3216 	return err;
3217 }
3218 
f2fs_pin_file_control(struct inode * inode,bool inc)3219 int f2fs_pin_file_control(struct inode *inode, bool inc)
3220 {
3221 	struct f2fs_inode_info *fi = F2FS_I(inode);
3222 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3223 
3224 	/* Use i_gc_failures for normal file as a risk signal. */
3225 	if (inc)
3226 		f2fs_i_gc_failures_write(inode,
3227 				fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3228 
3229 	if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3230 		f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3231 			  __func__, inode->i_ino,
3232 			  fi->i_gc_failures[GC_FAILURE_PIN]);
3233 		clear_inode_flag(inode, FI_PIN_FILE);
3234 		return -EAGAIN;
3235 	}
3236 	return 0;
3237 }
3238 
f2fs_ioc_set_pin_file(struct file * filp,unsigned long arg)3239 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3240 {
3241 	struct inode *inode = file_inode(filp);
3242 	__u32 pin;
3243 	int ret = 0;
3244 
3245 	if (get_user(pin, (__u32 __user *)arg))
3246 		return -EFAULT;
3247 
3248 	if (!S_ISREG(inode->i_mode))
3249 		return -EINVAL;
3250 
3251 	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3252 		return -EROFS;
3253 
3254 	ret = mnt_want_write_file(filp);
3255 	if (ret)
3256 		return ret;
3257 
3258 	inode_lock(inode);
3259 
3260 	if (f2fs_should_update_outplace(inode, NULL)) {
3261 		ret = -EINVAL;
3262 		goto out;
3263 	}
3264 
3265 	if (!pin) {
3266 		clear_inode_flag(inode, FI_PIN_FILE);
3267 		f2fs_i_gc_failures_write(inode, 0);
3268 		goto done;
3269 	}
3270 
3271 	if (f2fs_pin_file_control(inode, false)) {
3272 		ret = -EAGAIN;
3273 		goto out;
3274 	}
3275 
3276 	ret = f2fs_convert_inline_inode(inode);
3277 	if (ret)
3278 		goto out;
3279 
3280 	if (!f2fs_disable_compressed_file(inode)) {
3281 		ret = -EOPNOTSUPP;
3282 		goto out;
3283 	}
3284 
3285 	set_inode_flag(inode, FI_PIN_FILE);
3286 	ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3287 done:
3288 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3289 out:
3290 	inode_unlock(inode);
3291 	mnt_drop_write_file(filp);
3292 	return ret;
3293 }
3294 
f2fs_ioc_get_pin_file(struct file * filp,unsigned long arg)3295 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3296 {
3297 	struct inode *inode = file_inode(filp);
3298 	__u32 pin = 0;
3299 
3300 	if (is_inode_flag_set(inode, FI_PIN_FILE))
3301 		pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3302 	return put_user(pin, (u32 __user *)arg);
3303 }
3304 
f2fs_precache_extents(struct inode * inode)3305 int f2fs_precache_extents(struct inode *inode)
3306 {
3307 	struct f2fs_inode_info *fi = F2FS_I(inode);
3308 	struct f2fs_map_blocks map;
3309 	pgoff_t m_next_extent;
3310 	loff_t end;
3311 	int err;
3312 
3313 	if (is_inode_flag_set(inode, FI_NO_EXTENT))
3314 		return -EOPNOTSUPP;
3315 
3316 	map.m_lblk = 0;
3317 	map.m_next_pgofs = NULL;
3318 	map.m_next_extent = &m_next_extent;
3319 	map.m_seg_type = NO_CHECK_TYPE;
3320 	map.m_may_create = false;
3321 	end = F2FS_I_SB(inode)->max_file_blocks;
3322 
3323 	while (map.m_lblk < end) {
3324 		map.m_len = end - map.m_lblk;
3325 
3326 		down_write(&fi->i_gc_rwsem[WRITE]);
3327 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3328 		up_write(&fi->i_gc_rwsem[WRITE]);
3329 		if (err)
3330 			return err;
3331 
3332 		map.m_lblk = m_next_extent;
3333 	}
3334 
3335 	return err;
3336 }
3337 
f2fs_ioc_precache_extents(struct file * filp,unsigned long arg)3338 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3339 {
3340 	return f2fs_precache_extents(file_inode(filp));
3341 }
3342 
f2fs_ioc_resize_fs(struct file * filp,unsigned long arg)3343 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3344 {
3345 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3346 	__u64 block_count;
3347 
3348 	if (!capable(CAP_SYS_ADMIN))
3349 		return -EPERM;
3350 
3351 	if (f2fs_readonly(sbi->sb))
3352 		return -EROFS;
3353 
3354 	if (copy_from_user(&block_count, (void __user *)arg,
3355 			   sizeof(block_count)))
3356 		return -EFAULT;
3357 
3358 	return f2fs_resize_fs(sbi, block_count);
3359 }
3360 
f2fs_ioc_enable_verity(struct file * filp,unsigned long arg)3361 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3362 {
3363 	struct inode *inode = file_inode(filp);
3364 
3365 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3366 
3367 	if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3368 		f2fs_warn(F2FS_I_SB(inode),
3369 			  "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3370 			  inode->i_ino);
3371 		return -EOPNOTSUPP;
3372 	}
3373 
3374 	return fsverity_ioctl_enable(filp, (const void __user *)arg);
3375 }
3376 
f2fs_ioc_measure_verity(struct file * filp,unsigned long arg)3377 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3378 {
3379 	if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3380 		return -EOPNOTSUPP;
3381 
3382 	return fsverity_ioctl_measure(filp, (void __user *)arg);
3383 }
3384 
f2fs_ioc_getfslabel(struct file * filp,unsigned long arg)3385 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3386 {
3387 	struct inode *inode = file_inode(filp);
3388 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3389 	char *vbuf;
3390 	int count;
3391 	int err = 0;
3392 
3393 	vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3394 	if (!vbuf)
3395 		return -ENOMEM;
3396 
3397 	down_read(&sbi->sb_lock);
3398 	count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3399 			ARRAY_SIZE(sbi->raw_super->volume_name),
3400 			UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3401 	up_read(&sbi->sb_lock);
3402 
3403 	if (copy_to_user((char __user *)arg, vbuf,
3404 				min(FSLABEL_MAX, count)))
3405 		err = -EFAULT;
3406 
3407 	kfree(vbuf);
3408 	return err;
3409 }
3410 
f2fs_ioc_setfslabel(struct file * filp,unsigned long arg)3411 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3412 {
3413 	struct inode *inode = file_inode(filp);
3414 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3415 	char *vbuf;
3416 	int err = 0;
3417 
3418 	if (!capable(CAP_SYS_ADMIN))
3419 		return -EPERM;
3420 
3421 	vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3422 	if (IS_ERR(vbuf))
3423 		return PTR_ERR(vbuf);
3424 
3425 	err = mnt_want_write_file(filp);
3426 	if (err)
3427 		goto out;
3428 
3429 	down_write(&sbi->sb_lock);
3430 
3431 	memset(sbi->raw_super->volume_name, 0,
3432 			sizeof(sbi->raw_super->volume_name));
3433 	utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3434 			sbi->raw_super->volume_name,
3435 			ARRAY_SIZE(sbi->raw_super->volume_name));
3436 
3437 	err = f2fs_commit_super(sbi, false);
3438 
3439 	up_write(&sbi->sb_lock);
3440 
3441 	mnt_drop_write_file(filp);
3442 out:
3443 	kfree(vbuf);
3444 	return err;
3445 }
3446 
f2fs_get_compress_blocks(struct file * filp,unsigned long arg)3447 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3448 {
3449 	struct inode *inode = file_inode(filp);
3450 	__u64 blocks;
3451 
3452 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3453 		return -EOPNOTSUPP;
3454 
3455 	if (!f2fs_compressed_file(inode))
3456 		return -EINVAL;
3457 
3458 	blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3459 	return put_user(blocks, (u64 __user *)arg);
3460 }
3461 
release_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3462 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3463 {
3464 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3465 	unsigned int released_blocks = 0;
3466 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3467 	block_t blkaddr;
3468 	int i;
3469 
3470 	for (i = 0; i < count; i++) {
3471 		blkaddr = data_blkaddr(dn->inode, dn->node_page,
3472 						dn->ofs_in_node + i);
3473 
3474 		if (!__is_valid_data_blkaddr(blkaddr))
3475 			continue;
3476 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3477 					DATA_GENERIC_ENHANCE)))
3478 			return -EFSCORRUPTED;
3479 	}
3480 
3481 	while (count) {
3482 		int compr_blocks = 0;
3483 
3484 		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3485 			blkaddr = f2fs_data_blkaddr(dn);
3486 
3487 			if (i == 0) {
3488 				if (blkaddr == COMPRESS_ADDR)
3489 					continue;
3490 				dn->ofs_in_node += cluster_size;
3491 				goto next;
3492 			}
3493 
3494 			if (__is_valid_data_blkaddr(blkaddr))
3495 				compr_blocks++;
3496 
3497 			if (blkaddr != NEW_ADDR)
3498 				continue;
3499 
3500 			dn->data_blkaddr = NULL_ADDR;
3501 			f2fs_set_data_blkaddr(dn);
3502 		}
3503 
3504 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3505 		dec_valid_block_count(sbi, dn->inode,
3506 					cluster_size - compr_blocks);
3507 
3508 		released_blocks += cluster_size - compr_blocks;
3509 next:
3510 		count -= cluster_size;
3511 	}
3512 
3513 	return released_blocks;
3514 }
3515 
f2fs_release_compress_blocks(struct file * filp,unsigned long arg)3516 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3517 {
3518 	struct inode *inode = file_inode(filp);
3519 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3520 	pgoff_t page_idx = 0, last_idx;
3521 	unsigned int released_blocks = 0;
3522 	int ret;
3523 	int writecount;
3524 
3525 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3526 		return -EOPNOTSUPP;
3527 
3528 	if (!f2fs_compressed_file(inode))
3529 		return -EINVAL;
3530 
3531 	if (f2fs_readonly(sbi->sb))
3532 		return -EROFS;
3533 
3534 	ret = mnt_want_write_file(filp);
3535 	if (ret)
3536 		return ret;
3537 
3538 	f2fs_balance_fs(F2FS_I_SB(inode), true);
3539 
3540 	inode_lock(inode);
3541 
3542 	writecount = atomic_read(&inode->i_writecount);
3543 	if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3544 			(!(filp->f_mode & FMODE_WRITE) && writecount)) {
3545 		ret = -EBUSY;
3546 		goto out;
3547 	}
3548 
3549 	if (IS_IMMUTABLE(inode)) {
3550 		ret = -EINVAL;
3551 		goto out;
3552 	}
3553 
3554 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3555 	if (ret)
3556 		goto out;
3557 
3558 	F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3559 	f2fs_set_inode_flags(inode);
3560 	inode->i_ctime = current_time(inode);
3561 	f2fs_mark_inode_dirty_sync(inode, true);
3562 
3563 	if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3564 		goto out;
3565 
3566 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3567 	down_write(&F2FS_I(inode)->i_mmap_sem);
3568 
3569 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3570 
3571 	while (page_idx < last_idx) {
3572 		struct dnode_of_data dn;
3573 		pgoff_t end_offset, count;
3574 
3575 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3576 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3577 		if (ret) {
3578 			if (ret == -ENOENT) {
3579 				page_idx = f2fs_get_next_page_offset(&dn,
3580 								page_idx);
3581 				ret = 0;
3582 				continue;
3583 			}
3584 			break;
3585 		}
3586 
3587 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3588 		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3589 		count = round_up(count, F2FS_I(inode)->i_cluster_size);
3590 
3591 		ret = release_compress_blocks(&dn, count);
3592 
3593 		f2fs_put_dnode(&dn);
3594 
3595 		if (ret < 0)
3596 			break;
3597 
3598 		page_idx += count;
3599 		released_blocks += ret;
3600 	}
3601 
3602 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3603 	up_write(&F2FS_I(inode)->i_mmap_sem);
3604 out:
3605 	inode_unlock(inode);
3606 
3607 	mnt_drop_write_file(filp);
3608 
3609 	if (ret >= 0) {
3610 		ret = put_user(released_blocks, (u64 __user *)arg);
3611 	} else if (released_blocks &&
3612 			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3613 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3614 		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3615 			"iblocks=%llu, released=%u, compr_blocks=%u, "
3616 			"run fsck to fix.",
3617 			__func__, inode->i_ino, inode->i_blocks,
3618 			released_blocks,
3619 			atomic_read(&F2FS_I(inode)->i_compr_blocks));
3620 	}
3621 
3622 	return ret;
3623 }
3624 
reserve_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3625 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3626 {
3627 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3628 	unsigned int reserved_blocks = 0;
3629 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3630 	block_t blkaddr;
3631 	int i;
3632 
3633 	for (i = 0; i < count; i++) {
3634 		blkaddr = data_blkaddr(dn->inode, dn->node_page,
3635 						dn->ofs_in_node + i);
3636 
3637 		if (!__is_valid_data_blkaddr(blkaddr))
3638 			continue;
3639 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3640 					DATA_GENERIC_ENHANCE)))
3641 			return -EFSCORRUPTED;
3642 	}
3643 
3644 	while (count) {
3645 		int compr_blocks = 0;
3646 		blkcnt_t reserved;
3647 		int ret;
3648 
3649 		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3650 			blkaddr = f2fs_data_blkaddr(dn);
3651 
3652 			if (i == 0) {
3653 				if (blkaddr == COMPRESS_ADDR)
3654 					continue;
3655 				dn->ofs_in_node += cluster_size;
3656 				goto next;
3657 			}
3658 
3659 			if (__is_valid_data_blkaddr(blkaddr)) {
3660 				compr_blocks++;
3661 				continue;
3662 			}
3663 
3664 			dn->data_blkaddr = NEW_ADDR;
3665 			f2fs_set_data_blkaddr(dn);
3666 		}
3667 
3668 		reserved = cluster_size - compr_blocks;
3669 		ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3670 		if (ret)
3671 			return ret;
3672 
3673 		if (reserved != cluster_size - compr_blocks)
3674 			return -ENOSPC;
3675 
3676 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3677 
3678 		reserved_blocks += reserved;
3679 next:
3680 		count -= cluster_size;
3681 	}
3682 
3683 	return reserved_blocks;
3684 }
3685 
f2fs_reserve_compress_blocks(struct file * filp,unsigned long arg)3686 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3687 {
3688 	struct inode *inode = file_inode(filp);
3689 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3690 	pgoff_t page_idx = 0, last_idx;
3691 	unsigned int reserved_blocks = 0;
3692 	int ret;
3693 
3694 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3695 		return -EOPNOTSUPP;
3696 
3697 	if (!f2fs_compressed_file(inode))
3698 		return -EINVAL;
3699 
3700 	if (f2fs_readonly(sbi->sb))
3701 		return -EROFS;
3702 
3703 	ret = mnt_want_write_file(filp);
3704 	if (ret)
3705 		return ret;
3706 
3707 	if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3708 		goto out;
3709 
3710 	f2fs_balance_fs(F2FS_I_SB(inode), true);
3711 
3712 	inode_lock(inode);
3713 
3714 	if (!IS_IMMUTABLE(inode)) {
3715 		ret = -EINVAL;
3716 		goto unlock_inode;
3717 	}
3718 
3719 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3720 	down_write(&F2FS_I(inode)->i_mmap_sem);
3721 
3722 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3723 
3724 	while (page_idx < last_idx) {
3725 		struct dnode_of_data dn;
3726 		pgoff_t end_offset, count;
3727 
3728 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3729 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3730 		if (ret) {
3731 			if (ret == -ENOENT) {
3732 				page_idx = f2fs_get_next_page_offset(&dn,
3733 								page_idx);
3734 				ret = 0;
3735 				continue;
3736 			}
3737 			break;
3738 		}
3739 
3740 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3741 		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3742 		count = round_up(count, F2FS_I(inode)->i_cluster_size);
3743 
3744 		ret = reserve_compress_blocks(&dn, count);
3745 
3746 		f2fs_put_dnode(&dn);
3747 
3748 		if (ret < 0)
3749 			break;
3750 
3751 		page_idx += count;
3752 		reserved_blocks += ret;
3753 	}
3754 
3755 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3756 	up_write(&F2FS_I(inode)->i_mmap_sem);
3757 
3758 	if (ret >= 0) {
3759 		F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3760 		f2fs_set_inode_flags(inode);
3761 		inode->i_ctime = current_time(inode);
3762 		f2fs_mark_inode_dirty_sync(inode, true);
3763 	}
3764 unlock_inode:
3765 	inode_unlock(inode);
3766 out:
3767 	mnt_drop_write_file(filp);
3768 
3769 	if (ret >= 0) {
3770 		ret = put_user(reserved_blocks, (u64 __user *)arg);
3771 	} else if (reserved_blocks &&
3772 			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3773 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3774 		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3775 			"iblocks=%llu, reserved=%u, compr_blocks=%u, "
3776 			"run fsck to fix.",
3777 			__func__, inode->i_ino, inode->i_blocks,
3778 			reserved_blocks,
3779 			atomic_read(&F2FS_I(inode)->i_compr_blocks));
3780 	}
3781 
3782 	return ret;
3783 }
3784 
f2fs_secure_erase(struct block_device * bdev,struct inode * inode,pgoff_t off,block_t block,block_t len,u32 flags)3785 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3786 		pgoff_t off, block_t block, block_t len, u32 flags)
3787 {
3788 	struct request_queue *q = bdev_get_queue(bdev);
3789 	sector_t sector = SECTOR_FROM_BLOCK(block);
3790 	sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3791 	int ret = 0;
3792 
3793 	if (!q)
3794 		return -ENXIO;
3795 
3796 	if (flags & F2FS_TRIM_FILE_DISCARD)
3797 		ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3798 						blk_queue_secure_erase(q) ?
3799 						BLKDEV_DISCARD_SECURE : 0);
3800 
3801 	if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3802 		if (IS_ENCRYPTED(inode))
3803 			ret = fscrypt_zeroout_range(inode, off, block, len);
3804 		else
3805 			ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3806 					GFP_NOFS, 0);
3807 	}
3808 
3809 	return ret;
3810 }
3811 
f2fs_sec_trim_file(struct file * filp,unsigned long arg)3812 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3813 {
3814 	struct inode *inode = file_inode(filp);
3815 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3816 	struct address_space *mapping = inode->i_mapping;
3817 	struct block_device *prev_bdev = NULL;
3818 	struct f2fs_sectrim_range range;
3819 	pgoff_t index, pg_end, prev_index = 0;
3820 	block_t prev_block = 0, len = 0;
3821 	loff_t end_addr;
3822 	bool to_end = false;
3823 	int ret = 0;
3824 
3825 	if (!(filp->f_mode & FMODE_WRITE))
3826 		return -EBADF;
3827 
3828 	if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3829 				sizeof(range)))
3830 		return -EFAULT;
3831 
3832 	if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3833 			!S_ISREG(inode->i_mode))
3834 		return -EINVAL;
3835 
3836 	if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3837 			!f2fs_hw_support_discard(sbi)) ||
3838 			((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3839 			 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3840 		return -EOPNOTSUPP;
3841 
3842 	file_start_write(filp);
3843 	inode_lock(inode);
3844 
3845 	if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3846 			range.start >= inode->i_size) {
3847 		ret = -EINVAL;
3848 		goto err;
3849 	}
3850 
3851 	if (range.len == 0)
3852 		goto err;
3853 
3854 	if (inode->i_size - range.start > range.len) {
3855 		end_addr = range.start + range.len;
3856 	} else {
3857 		end_addr = range.len == (u64)-1 ?
3858 			sbi->sb->s_maxbytes : inode->i_size;
3859 		to_end = true;
3860 	}
3861 
3862 	if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3863 			(!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3864 		ret = -EINVAL;
3865 		goto err;
3866 	}
3867 
3868 	index = F2FS_BYTES_TO_BLK(range.start);
3869 	pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3870 
3871 	ret = f2fs_convert_inline_inode(inode);
3872 	if (ret)
3873 		goto err;
3874 
3875 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3876 	down_write(&F2FS_I(inode)->i_mmap_sem);
3877 
3878 	ret = filemap_write_and_wait_range(mapping, range.start,
3879 			to_end ? LLONG_MAX : end_addr - 1);
3880 	if (ret)
3881 		goto out;
3882 
3883 	truncate_inode_pages_range(mapping, range.start,
3884 			to_end ? -1 : end_addr - 1);
3885 
3886 	while (index < pg_end) {
3887 		struct dnode_of_data dn;
3888 		pgoff_t end_offset, count;
3889 		int i;
3890 
3891 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3892 		ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3893 		if (ret) {
3894 			if (ret == -ENOENT) {
3895 				index = f2fs_get_next_page_offset(&dn, index);
3896 				continue;
3897 			}
3898 			goto out;
3899 		}
3900 
3901 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3902 		count = min(end_offset - dn.ofs_in_node, pg_end - index);
3903 		for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3904 			struct block_device *cur_bdev;
3905 			block_t blkaddr = f2fs_data_blkaddr(&dn);
3906 
3907 			if (!__is_valid_data_blkaddr(blkaddr))
3908 				continue;
3909 
3910 			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3911 						DATA_GENERIC_ENHANCE)) {
3912 				ret = -EFSCORRUPTED;
3913 				f2fs_put_dnode(&dn);
3914 				goto out;
3915 			}
3916 
3917 			cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3918 			if (f2fs_is_multi_device(sbi)) {
3919 				int di = f2fs_target_device_index(sbi, blkaddr);
3920 
3921 				blkaddr -= FDEV(di).start_blk;
3922 			}
3923 
3924 			if (len) {
3925 				if (prev_bdev == cur_bdev &&
3926 						index == prev_index + len &&
3927 						blkaddr == prev_block + len) {
3928 					len++;
3929 				} else {
3930 					ret = f2fs_secure_erase(prev_bdev,
3931 						inode, prev_index, prev_block,
3932 						len, range.flags);
3933 					if (ret) {
3934 						f2fs_put_dnode(&dn);
3935 						goto out;
3936 					}
3937 
3938 					len = 0;
3939 				}
3940 			}
3941 
3942 			if (!len) {
3943 				prev_bdev = cur_bdev;
3944 				prev_index = index;
3945 				prev_block = blkaddr;
3946 				len = 1;
3947 			}
3948 		}
3949 
3950 		f2fs_put_dnode(&dn);
3951 
3952 		if (fatal_signal_pending(current)) {
3953 			ret = -EINTR;
3954 			goto out;
3955 		}
3956 		cond_resched();
3957 	}
3958 
3959 	if (len)
3960 		ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3961 				prev_block, len, range.flags);
3962 out:
3963 	up_write(&F2FS_I(inode)->i_mmap_sem);
3964 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3965 err:
3966 	inode_unlock(inode);
3967 	file_end_write(filp);
3968 
3969 	return ret;
3970 }
3971 
__f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)3972 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3973 {
3974 	switch (cmd) {
3975 	case FS_IOC_GETFLAGS:
3976 		return f2fs_ioc_getflags(filp, arg);
3977 	case FS_IOC_SETFLAGS:
3978 		return f2fs_ioc_setflags(filp, arg);
3979 	case FS_IOC_GETVERSION:
3980 		return f2fs_ioc_getversion(filp, arg);
3981 	case F2FS_IOC_START_ATOMIC_WRITE:
3982 		return f2fs_ioc_start_atomic_write(filp);
3983 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3984 		return f2fs_ioc_commit_atomic_write(filp);
3985 	case F2FS_IOC_START_VOLATILE_WRITE:
3986 		return f2fs_ioc_start_volatile_write(filp);
3987 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3988 		return f2fs_ioc_release_volatile_write(filp);
3989 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
3990 		return f2fs_ioc_abort_volatile_write(filp);
3991 	case F2FS_IOC_SHUTDOWN:
3992 		return f2fs_ioc_shutdown(filp, arg);
3993 	case FITRIM:
3994 		return f2fs_ioc_fitrim(filp, arg);
3995 	case FS_IOC_SET_ENCRYPTION_POLICY:
3996 		return f2fs_ioc_set_encryption_policy(filp, arg);
3997 	case FS_IOC_GET_ENCRYPTION_POLICY:
3998 		return f2fs_ioc_get_encryption_policy(filp, arg);
3999 	case FS_IOC_GET_ENCRYPTION_PWSALT:
4000 		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4001 	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4002 		return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4003 	case FS_IOC_ADD_ENCRYPTION_KEY:
4004 		return f2fs_ioc_add_encryption_key(filp, arg);
4005 	case FS_IOC_REMOVE_ENCRYPTION_KEY:
4006 		return f2fs_ioc_remove_encryption_key(filp, arg);
4007 	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4008 		return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4009 	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4010 		return f2fs_ioc_get_encryption_key_status(filp, arg);
4011 	case FS_IOC_GET_ENCRYPTION_NONCE:
4012 		return f2fs_ioc_get_encryption_nonce(filp, arg);
4013 	case F2FS_IOC_GARBAGE_COLLECT:
4014 		return f2fs_ioc_gc(filp, arg);
4015 	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4016 		return f2fs_ioc_gc_range(filp, arg);
4017 	case F2FS_IOC_WRITE_CHECKPOINT:
4018 		return f2fs_ioc_write_checkpoint(filp, arg);
4019 	case F2FS_IOC_DEFRAGMENT:
4020 		return f2fs_ioc_defragment(filp, arg);
4021 	case F2FS_IOC_MOVE_RANGE:
4022 		return f2fs_ioc_move_range(filp, arg);
4023 	case F2FS_IOC_FLUSH_DEVICE:
4024 		return f2fs_ioc_flush_device(filp, arg);
4025 	case F2FS_IOC_GET_FEATURES:
4026 		return f2fs_ioc_get_features(filp, arg);
4027 	case FS_IOC_FSGETXATTR:
4028 		return f2fs_ioc_fsgetxattr(filp, arg);
4029 	case FS_IOC_FSSETXATTR:
4030 		return f2fs_ioc_fssetxattr(filp, arg);
4031 	case F2FS_IOC_GET_PIN_FILE:
4032 		return f2fs_ioc_get_pin_file(filp, arg);
4033 	case F2FS_IOC_SET_PIN_FILE:
4034 		return f2fs_ioc_set_pin_file(filp, arg);
4035 	case F2FS_IOC_PRECACHE_EXTENTS:
4036 		return f2fs_ioc_precache_extents(filp, arg);
4037 	case F2FS_IOC_RESIZE_FS:
4038 		return f2fs_ioc_resize_fs(filp, arg);
4039 	case FS_IOC_ENABLE_VERITY:
4040 		return f2fs_ioc_enable_verity(filp, arg);
4041 	case FS_IOC_MEASURE_VERITY:
4042 		return f2fs_ioc_measure_verity(filp, arg);
4043 	case FS_IOC_GETFSLABEL:
4044 		return f2fs_ioc_getfslabel(filp, arg);
4045 	case FS_IOC_SETFSLABEL:
4046 		return f2fs_ioc_setfslabel(filp, arg);
4047 	case F2FS_IOC_GET_COMPRESS_BLOCKS:
4048 		return f2fs_get_compress_blocks(filp, arg);
4049 	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4050 		return f2fs_release_compress_blocks(filp, arg);
4051 	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4052 		return f2fs_reserve_compress_blocks(filp, arg);
4053 	case F2FS_IOC_SEC_TRIM_FILE:
4054 		return f2fs_sec_trim_file(filp, arg);
4055 	default:
4056 		return -ENOTTY;
4057 	}
4058 }
4059 
f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4060 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4061 {
4062 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4063 		return -EIO;
4064 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4065 		return -ENOSPC;
4066 
4067 	return __f2fs_ioctl(filp, cmd, arg);
4068 }
4069 
f2fs_file_read_iter(struct kiocb * iocb,struct iov_iter * iter)4070 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4071 {
4072 	struct file *file = iocb->ki_filp;
4073 	struct inode *inode = file_inode(file);
4074 	int ret;
4075 
4076 	if (!f2fs_is_compress_backend_ready(inode))
4077 		return -EOPNOTSUPP;
4078 
4079 	ret = generic_file_read_iter(iocb, iter);
4080 
4081 	if (ret > 0)
4082 		f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4083 
4084 	return ret;
4085 }
4086 
f2fs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)4087 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4088 {
4089 	struct file *file = iocb->ki_filp;
4090 	struct inode *inode = file_inode(file);
4091 	ssize_t ret;
4092 
4093 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4094 		ret = -EIO;
4095 		goto out;
4096 	}
4097 
4098 	if (!f2fs_is_compress_backend_ready(inode)) {
4099 		ret = -EOPNOTSUPP;
4100 		goto out;
4101 	}
4102 
4103 	if (iocb->ki_flags & IOCB_NOWAIT) {
4104 		if (!inode_trylock(inode)) {
4105 			ret = -EAGAIN;
4106 			goto out;
4107 		}
4108 	} else {
4109 		inode_lock(inode);
4110 	}
4111 
4112 	if (unlikely(IS_IMMUTABLE(inode))) {
4113 		ret = -EPERM;
4114 		goto unlock;
4115 	}
4116 
4117 	ret = generic_write_checks(iocb, from);
4118 	if (ret > 0) {
4119 		bool preallocated = false;
4120 		size_t target_size = 0;
4121 		int err;
4122 
4123 		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4124 			set_inode_flag(inode, FI_NO_PREALLOC);
4125 
4126 		if ((iocb->ki_flags & IOCB_NOWAIT)) {
4127 			if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4128 						iov_iter_count(from)) ||
4129 				f2fs_has_inline_data(inode) ||
4130 				f2fs_force_buffered_io(inode, iocb, from)) {
4131 				clear_inode_flag(inode, FI_NO_PREALLOC);
4132 				inode_unlock(inode);
4133 				ret = -EAGAIN;
4134 				goto out;
4135 			}
4136 			goto write;
4137 		}
4138 
4139 		if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4140 			goto write;
4141 
4142 		if (iocb->ki_flags & IOCB_DIRECT) {
4143 			/*
4144 			 * Convert inline data for Direct I/O before entering
4145 			 * f2fs_direct_IO().
4146 			 */
4147 			err = f2fs_convert_inline_inode(inode);
4148 			if (err)
4149 				goto out_err;
4150 			/*
4151 			 * If force_buffere_io() is true, we have to allocate
4152 			 * blocks all the time, since f2fs_direct_IO will fall
4153 			 * back to buffered IO.
4154 			 */
4155 			if (!f2fs_force_buffered_io(inode, iocb, from) &&
4156 					allow_outplace_dio(inode, iocb, from))
4157 				goto write;
4158 		}
4159 		preallocated = true;
4160 		target_size = iocb->ki_pos + iov_iter_count(from);
4161 
4162 		err = f2fs_preallocate_blocks(iocb, from);
4163 		if (err) {
4164 out_err:
4165 			clear_inode_flag(inode, FI_NO_PREALLOC);
4166 			inode_unlock(inode);
4167 			ret = err;
4168 			goto out;
4169 		}
4170 write:
4171 		ret = __generic_file_write_iter(iocb, from);
4172 		clear_inode_flag(inode, FI_NO_PREALLOC);
4173 
4174 		/* if we couldn't write data, we should deallocate blocks. */
4175 		if (preallocated && i_size_read(inode) < target_size)
4176 			f2fs_truncate(inode);
4177 
4178 		if (ret > 0)
4179 			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4180 	}
4181 unlock:
4182 	inode_unlock(inode);
4183 out:
4184 	trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4185 					iov_iter_count(from), ret);
4186 	if (ret > 0)
4187 		ret = generic_write_sync(iocb, ret);
4188 	return ret;
4189 }
4190 
4191 #ifdef CONFIG_COMPAT
4192 struct compat_f2fs_gc_range {
4193 	u32 sync;
4194 	compat_u64 start;
4195 	compat_u64 len;
4196 };
4197 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE	_IOW(F2FS_IOCTL_MAGIC, 11,\
4198 						struct compat_f2fs_gc_range)
4199 
f2fs_compat_ioc_gc_range(struct file * file,unsigned long arg)4200 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4201 {
4202 	struct compat_f2fs_gc_range __user *urange;
4203 	struct f2fs_gc_range range;
4204 	int err;
4205 
4206 	urange = compat_ptr(arg);
4207 	err = get_user(range.sync, &urange->sync);
4208 	err |= get_user(range.start, &urange->start);
4209 	err |= get_user(range.len, &urange->len);
4210 	if (err)
4211 		return -EFAULT;
4212 
4213 	return __f2fs_ioc_gc_range(file, &range);
4214 }
4215 
4216 struct compat_f2fs_move_range {
4217 	u32 dst_fd;
4218 	compat_u64 pos_in;
4219 	compat_u64 pos_out;
4220 	compat_u64 len;
4221 };
4222 #define F2FS_IOC32_MOVE_RANGE		_IOWR(F2FS_IOCTL_MAGIC, 9,	\
4223 					struct compat_f2fs_move_range)
4224 
f2fs_compat_ioc_move_range(struct file * file,unsigned long arg)4225 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4226 {
4227 	struct compat_f2fs_move_range __user *urange;
4228 	struct f2fs_move_range range;
4229 	int err;
4230 
4231 	urange = compat_ptr(arg);
4232 	err = get_user(range.dst_fd, &urange->dst_fd);
4233 	err |= get_user(range.pos_in, &urange->pos_in);
4234 	err |= get_user(range.pos_out, &urange->pos_out);
4235 	err |= get_user(range.len, &urange->len);
4236 	if (err)
4237 		return -EFAULT;
4238 
4239 	return __f2fs_ioc_move_range(file, &range);
4240 }
4241 
f2fs_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)4242 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4243 {
4244 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4245 		return -EIO;
4246 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4247 		return -ENOSPC;
4248 
4249 	switch (cmd) {
4250 	case FS_IOC32_GETFLAGS:
4251 		cmd = FS_IOC_GETFLAGS;
4252 		break;
4253 	case FS_IOC32_SETFLAGS:
4254 		cmd = FS_IOC_SETFLAGS;
4255 		break;
4256 	case FS_IOC32_GETVERSION:
4257 		cmd = FS_IOC_GETVERSION;
4258 		break;
4259 	case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4260 		return f2fs_compat_ioc_gc_range(file, arg);
4261 	case F2FS_IOC32_MOVE_RANGE:
4262 		return f2fs_compat_ioc_move_range(file, arg);
4263 	case F2FS_IOC_START_ATOMIC_WRITE:
4264 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4265 	case F2FS_IOC_START_VOLATILE_WRITE:
4266 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4267 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
4268 	case F2FS_IOC_SHUTDOWN:
4269 	case FITRIM:
4270 	case FS_IOC_SET_ENCRYPTION_POLICY:
4271 	case FS_IOC_GET_ENCRYPTION_PWSALT:
4272 	case FS_IOC_GET_ENCRYPTION_POLICY:
4273 	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4274 	case FS_IOC_ADD_ENCRYPTION_KEY:
4275 	case FS_IOC_REMOVE_ENCRYPTION_KEY:
4276 	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4277 	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4278 	case FS_IOC_GET_ENCRYPTION_NONCE:
4279 	case F2FS_IOC_GARBAGE_COLLECT:
4280 	case F2FS_IOC_WRITE_CHECKPOINT:
4281 	case F2FS_IOC_DEFRAGMENT:
4282 	case F2FS_IOC_FLUSH_DEVICE:
4283 	case F2FS_IOC_GET_FEATURES:
4284 	case FS_IOC_FSGETXATTR:
4285 	case FS_IOC_FSSETXATTR:
4286 	case F2FS_IOC_GET_PIN_FILE:
4287 	case F2FS_IOC_SET_PIN_FILE:
4288 	case F2FS_IOC_PRECACHE_EXTENTS:
4289 	case F2FS_IOC_RESIZE_FS:
4290 	case FS_IOC_ENABLE_VERITY:
4291 	case FS_IOC_MEASURE_VERITY:
4292 	case FS_IOC_GETFSLABEL:
4293 	case FS_IOC_SETFSLABEL:
4294 	case F2FS_IOC_GET_COMPRESS_BLOCKS:
4295 	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4296 	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4297 	case F2FS_IOC_SEC_TRIM_FILE:
4298 		break;
4299 	default:
4300 		return -ENOIOCTLCMD;
4301 	}
4302 	return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4303 }
4304 #endif
4305 
4306 const struct file_operations f2fs_file_operations = {
4307 	.llseek		= f2fs_llseek,
4308 	.read_iter	= f2fs_file_read_iter,
4309 	.write_iter	= f2fs_file_write_iter,
4310 	.open		= f2fs_file_open,
4311 	.release	= f2fs_release_file,
4312 	.mmap		= f2fs_file_mmap,
4313 	.flush		= f2fs_file_flush,
4314 	.fsync		= f2fs_sync_file,
4315 	.fallocate	= f2fs_fallocate,
4316 	.unlocked_ioctl	= f2fs_ioctl,
4317 #ifdef CONFIG_COMPAT
4318 	.compat_ioctl	= f2fs_compat_ioctl,
4319 #endif
4320 	.splice_read	= generic_file_splice_read,
4321 	.splice_write	= iter_file_splice_write,
4322 };
4323