• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/file.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 
26 #include "f2fs.h"
27 #include "node.h"
28 #include "segment.h"
29 #include "xattr.h"
30 #include "acl.h"
31 #include "gc.h"
32 #include "trace.h"
33 #include <trace/events/f2fs.h>
34 #include <uapi/linux/f2fs.h>
35 
f2fs_filemap_fault(struct vm_fault * vmf)36 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
37 {
38 	struct inode *inode = file_inode(vmf->vma->vm_file);
39 	vm_fault_t ret;
40 
41 	down_read(&F2FS_I(inode)->i_mmap_sem);
42 	ret = filemap_fault(vmf);
43 	up_read(&F2FS_I(inode)->i_mmap_sem);
44 
45 	if (!ret)
46 		f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
47 							F2FS_BLKSIZE);
48 
49 	trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
50 
51 	return ret;
52 }
53 
f2fs_vm_page_mkwrite(struct vm_fault * vmf)54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
55 {
56 	struct page *page = vmf->page;
57 	struct inode *inode = file_inode(vmf->vma->vm_file);
58 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 	struct dnode_of_data dn;
60 	bool need_alloc = true;
61 	int err = 0;
62 
63 	if (unlikely(IS_IMMUTABLE(inode)))
64 		return VM_FAULT_SIGBUS;
65 
66 	if (unlikely(f2fs_cp_error(sbi))) {
67 		err = -EIO;
68 		goto err;
69 	}
70 
71 	if (!f2fs_is_checkpoint_ready(sbi)) {
72 		err = -ENOSPC;
73 		goto err;
74 	}
75 
76 #ifdef CONFIG_F2FS_FS_COMPRESSION
77 	if (f2fs_compressed_file(inode)) {
78 		int ret = f2fs_is_compressed_cluster(inode, page->index);
79 
80 		if (ret < 0) {
81 			err = ret;
82 			goto err;
83 		} else if (ret) {
84 			if (ret < F2FS_I(inode)->i_cluster_size) {
85 				err = -EAGAIN;
86 				goto err;
87 			}
88 			need_alloc = false;
89 		}
90 	}
91 #endif
92 	/* should do out of any locked page */
93 	if (need_alloc)
94 		f2fs_balance_fs(sbi, true);
95 
96 	sb_start_pagefault(inode->i_sb);
97 
98 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
99 
100 	file_update_time(vmf->vma->vm_file);
101 	down_read(&F2FS_I(inode)->i_mmap_sem);
102 	lock_page(page);
103 	if (unlikely(page->mapping != inode->i_mapping ||
104 			page_offset(page) > i_size_read(inode) ||
105 			!PageUptodate(page))) {
106 		unlock_page(page);
107 		err = -EFAULT;
108 		goto out_sem;
109 	}
110 
111 	if (need_alloc) {
112 		/* block allocation */
113 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
114 		set_new_dnode(&dn, inode, NULL, NULL, 0);
115 		err = f2fs_get_block(&dn, page->index);
116 		f2fs_put_dnode(&dn);
117 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
118 	}
119 
120 #ifdef CONFIG_F2FS_FS_COMPRESSION
121 	if (!need_alloc) {
122 		set_new_dnode(&dn, inode, NULL, NULL, 0);
123 		err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
124 		f2fs_put_dnode(&dn);
125 	}
126 #endif
127 	if (err) {
128 		unlock_page(page);
129 		goto out_sem;
130 	}
131 
132 	f2fs_wait_on_page_writeback(page, DATA, false, true);
133 
134 	/* wait for GCed page writeback via META_MAPPING */
135 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
136 
137 	/*
138 	 * check to see if the page is mapped already (no holes)
139 	 */
140 	if (PageMappedToDisk(page))
141 		goto out_sem;
142 
143 	/* page is wholly or partially inside EOF */
144 	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
145 						i_size_read(inode)) {
146 		loff_t offset;
147 
148 		offset = i_size_read(inode) & ~PAGE_MASK;
149 		zero_user_segment(page, offset, PAGE_SIZE);
150 	}
151 	set_page_dirty(page);
152 	if (!PageUptodate(page))
153 		SetPageUptodate(page);
154 
155 	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
156 	f2fs_update_time(sbi, REQ_TIME);
157 
158 	trace_f2fs_vm_page_mkwrite(page, DATA);
159 out_sem:
160 	up_read(&F2FS_I(inode)->i_mmap_sem);
161 
162 	sb_end_pagefault(inode->i_sb);
163 err:
164 	return block_page_mkwrite_return(err);
165 }
166 
167 static const struct vm_operations_struct f2fs_file_vm_ops = {
168 	.fault		= f2fs_filemap_fault,
169 	.map_pages	= filemap_map_pages,
170 	.page_mkwrite	= f2fs_vm_page_mkwrite,
171 };
172 
get_parent_ino(struct inode * inode,nid_t * pino)173 static int get_parent_ino(struct inode *inode, nid_t *pino)
174 {
175 	struct dentry *dentry;
176 
177 	/*
178 	 * Make sure to get the non-deleted alias.  The alias associated with
179 	 * the open file descriptor being fsync()'ed may be deleted already.
180 	 */
181 	dentry = d_find_alias(inode);
182 	if (!dentry)
183 		return 0;
184 
185 	*pino = parent_ino(dentry);
186 	dput(dentry);
187 	return 1;
188 }
189 
need_do_checkpoint(struct inode * inode)190 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
191 {
192 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
193 	enum cp_reason_type cp_reason = CP_NO_NEEDED;
194 
195 	if (!S_ISREG(inode->i_mode))
196 		cp_reason = CP_NON_REGULAR;
197 	else if (f2fs_compressed_file(inode))
198 		cp_reason = CP_COMPRESSED;
199 	else if (inode->i_nlink != 1)
200 		cp_reason = CP_HARDLINK;
201 	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
202 		cp_reason = CP_SB_NEED_CP;
203 	else if (file_wrong_pino(inode))
204 		cp_reason = CP_WRONG_PINO;
205 	else if (!f2fs_space_for_roll_forward(sbi))
206 		cp_reason = CP_NO_SPC_ROLL;
207 	else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
208 		cp_reason = CP_NODE_NEED_CP;
209 	else if (test_opt(sbi, FASTBOOT))
210 		cp_reason = CP_FASTBOOT_MODE;
211 	else if (F2FS_OPTION(sbi).active_logs == 2)
212 		cp_reason = CP_SPEC_LOG_NUM;
213 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
214 		f2fs_need_dentry_mark(sbi, inode->i_ino) &&
215 		f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
216 							TRANS_DIR_INO))
217 		cp_reason = CP_RECOVER_DIR;
218 
219 	return cp_reason;
220 }
221 
need_inode_page_update(struct f2fs_sb_info * sbi,nid_t ino)222 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
223 {
224 	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
225 	bool ret = false;
226 	/* But we need to avoid that there are some inode updates */
227 	if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
228 		ret = true;
229 	f2fs_put_page(i, 0);
230 	return ret;
231 }
232 
try_to_fix_pino(struct inode * inode)233 static void try_to_fix_pino(struct inode *inode)
234 {
235 	struct f2fs_inode_info *fi = F2FS_I(inode);
236 	nid_t pino;
237 
238 	down_write(&fi->i_sem);
239 	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
240 			get_parent_ino(inode, &pino)) {
241 		f2fs_i_pino_write(inode, pino);
242 		file_got_pino(inode);
243 	}
244 	up_write(&fi->i_sem);
245 }
246 
f2fs_do_sync_file(struct file * file,loff_t start,loff_t end,int datasync,bool atomic)247 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
248 						int datasync, bool atomic)
249 {
250 	struct inode *inode = file->f_mapping->host;
251 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
252 	nid_t ino = inode->i_ino;
253 	int ret = 0;
254 	enum cp_reason_type cp_reason = 0;
255 	struct writeback_control wbc = {
256 		.sync_mode = WB_SYNC_ALL,
257 		.nr_to_write = LONG_MAX,
258 		.for_reclaim = 0,
259 	};
260 	unsigned int seq_id = 0;
261 
262 	if (unlikely(f2fs_readonly(inode->i_sb)))
263 		return 0;
264 
265 	trace_f2fs_sync_file_enter(inode);
266 
267 	if (S_ISDIR(inode->i_mode))
268 		goto go_write;
269 
270 	/* if fdatasync is triggered, let's do in-place-update */
271 	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
272 		set_inode_flag(inode, FI_NEED_IPU);
273 	ret = file_write_and_wait_range(file, start, end);
274 	clear_inode_flag(inode, FI_NEED_IPU);
275 
276 	if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
277 		trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
278 		return ret;
279 	}
280 
281 	/* if the inode is dirty, let's recover all the time */
282 	if (!f2fs_skip_inode_update(inode, datasync)) {
283 		f2fs_write_inode(inode, NULL);
284 		goto go_write;
285 	}
286 
287 	/*
288 	 * if there is no written data, don't waste time to write recovery info.
289 	 */
290 	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
291 			!f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
292 
293 		/* it may call write_inode just prior to fsync */
294 		if (need_inode_page_update(sbi, ino))
295 			goto go_write;
296 
297 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
298 				f2fs_exist_written_data(sbi, ino, UPDATE_INO))
299 			goto flush_out;
300 		goto out;
301 	}
302 go_write:
303 	/*
304 	 * Both of fdatasync() and fsync() are able to be recovered from
305 	 * sudden-power-off.
306 	 */
307 	down_read(&F2FS_I(inode)->i_sem);
308 	cp_reason = need_do_checkpoint(inode);
309 	up_read(&F2FS_I(inode)->i_sem);
310 
311 	if (cp_reason) {
312 		/* all the dirty node pages should be flushed for POR */
313 		ret = f2fs_sync_fs(inode->i_sb, 1);
314 
315 		/*
316 		 * We've secured consistency through sync_fs. Following pino
317 		 * will be used only for fsynced inodes after checkpoint.
318 		 */
319 		try_to_fix_pino(inode);
320 		clear_inode_flag(inode, FI_APPEND_WRITE);
321 		clear_inode_flag(inode, FI_UPDATE_WRITE);
322 		goto out;
323 	}
324 sync_nodes:
325 	atomic_inc(&sbi->wb_sync_req[NODE]);
326 	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
327 	atomic_dec(&sbi->wb_sync_req[NODE]);
328 	if (ret)
329 		goto out;
330 
331 	/* if cp_error was enabled, we should avoid infinite loop */
332 	if (unlikely(f2fs_cp_error(sbi))) {
333 		ret = -EIO;
334 		goto out;
335 	}
336 
337 	if (f2fs_need_inode_block_update(sbi, ino)) {
338 		f2fs_mark_inode_dirty_sync(inode, true);
339 		f2fs_write_inode(inode, NULL);
340 		goto sync_nodes;
341 	}
342 
343 	/*
344 	 * If it's atomic_write, it's just fine to keep write ordering. So
345 	 * here we don't need to wait for node write completion, since we use
346 	 * node chain which serializes node blocks. If one of node writes are
347 	 * reordered, we can see simply broken chain, resulting in stopping
348 	 * roll-forward recovery. It means we'll recover all or none node blocks
349 	 * given fsync mark.
350 	 */
351 	if (!atomic) {
352 		ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
353 		if (ret)
354 			goto out;
355 	}
356 
357 	/* once recovery info is written, don't need to tack this */
358 	f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
359 	clear_inode_flag(inode, FI_APPEND_WRITE);
360 flush_out:
361 	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
362 		ret = f2fs_issue_flush(sbi, inode->i_ino);
363 	if (!ret) {
364 		f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
365 		clear_inode_flag(inode, FI_UPDATE_WRITE);
366 		f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
367 	}
368 	f2fs_update_time(sbi, REQ_TIME);
369 out:
370 	trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
371 	f2fs_trace_ios(NULL, 1);
372 	return ret;
373 }
374 
f2fs_sync_file(struct file * file,loff_t start,loff_t end,int datasync)375 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
376 {
377 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
378 		return -EIO;
379 	return f2fs_do_sync_file(file, start, end, datasync, false);
380 }
381 
__found_offset(struct address_space * mapping,block_t blkaddr,pgoff_t index,int whence)382 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
383 				pgoff_t index, int whence)
384 {
385 	switch (whence) {
386 	case SEEK_DATA:
387 		if (__is_valid_data_blkaddr(blkaddr))
388 			return true;
389 		if (blkaddr == NEW_ADDR &&
390 		    xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
391 			return true;
392 		break;
393 	case SEEK_HOLE:
394 		if (blkaddr == NULL_ADDR)
395 			return true;
396 		break;
397 	}
398 	return false;
399 }
400 
f2fs_seek_block(struct file * file,loff_t offset,int whence)401 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
402 {
403 	struct inode *inode = file->f_mapping->host;
404 	loff_t maxbytes = inode->i_sb->s_maxbytes;
405 	struct dnode_of_data dn;
406 	pgoff_t pgofs, end_offset;
407 	loff_t data_ofs = offset;
408 	loff_t isize;
409 	int err = 0;
410 
411 	inode_lock(inode);
412 
413 	isize = i_size_read(inode);
414 	if (offset >= isize)
415 		goto fail;
416 
417 	/* handle inline data case */
418 	if (f2fs_has_inline_data(inode)) {
419 		if (whence == SEEK_HOLE) {
420 			data_ofs = isize;
421 			goto found;
422 		} else if (whence == SEEK_DATA) {
423 			data_ofs = offset;
424 			goto found;
425 		}
426 	}
427 
428 	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
429 
430 	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
431 		set_new_dnode(&dn, inode, NULL, NULL, 0);
432 		err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
433 		if (err && err != -ENOENT) {
434 			goto fail;
435 		} else if (err == -ENOENT) {
436 			/* direct node does not exists */
437 			if (whence == SEEK_DATA) {
438 				pgofs = f2fs_get_next_page_offset(&dn, pgofs);
439 				continue;
440 			} else {
441 				goto found;
442 			}
443 		}
444 
445 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
446 
447 		/* find data/hole in dnode block */
448 		for (; dn.ofs_in_node < end_offset;
449 				dn.ofs_in_node++, pgofs++,
450 				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
451 			block_t blkaddr;
452 
453 			blkaddr = f2fs_data_blkaddr(&dn);
454 
455 			if (__is_valid_data_blkaddr(blkaddr) &&
456 				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
457 					blkaddr, DATA_GENERIC_ENHANCE)) {
458 				f2fs_put_dnode(&dn);
459 				goto fail;
460 			}
461 
462 			if (__found_offset(file->f_mapping, blkaddr,
463 							pgofs, whence)) {
464 				f2fs_put_dnode(&dn);
465 				goto found;
466 			}
467 		}
468 		f2fs_put_dnode(&dn);
469 	}
470 
471 	if (whence == SEEK_DATA)
472 		goto fail;
473 found:
474 	if (whence == SEEK_HOLE && data_ofs > isize)
475 		data_ofs = isize;
476 	inode_unlock(inode);
477 	return vfs_setpos(file, data_ofs, maxbytes);
478 fail:
479 	inode_unlock(inode);
480 	return -ENXIO;
481 }
482 
f2fs_llseek(struct file * file,loff_t offset,int whence)483 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
484 {
485 	struct inode *inode = file->f_mapping->host;
486 	loff_t maxbytes = inode->i_sb->s_maxbytes;
487 
488 	switch (whence) {
489 	case SEEK_SET:
490 	case SEEK_CUR:
491 	case SEEK_END:
492 		return generic_file_llseek_size(file, offset, whence,
493 						maxbytes, i_size_read(inode));
494 	case SEEK_DATA:
495 	case SEEK_HOLE:
496 		if (offset < 0)
497 			return -ENXIO;
498 		return f2fs_seek_block(file, offset, whence);
499 	}
500 
501 	return -EINVAL;
502 }
503 
f2fs_file_mmap(struct file * file,struct vm_area_struct * vma)504 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
505 {
506 	struct inode *inode = file_inode(file);
507 	int err;
508 
509 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
510 		return -EIO;
511 
512 	if (!f2fs_is_compress_backend_ready(inode))
513 		return -EOPNOTSUPP;
514 
515 	/* we don't need to use inline_data strictly */
516 	err = f2fs_convert_inline_inode(inode);
517 	if (err)
518 		return err;
519 
520 	file_accessed(file);
521 	vma->vm_ops = &f2fs_file_vm_ops;
522 	set_inode_flag(inode, FI_MMAP_FILE);
523 	return 0;
524 }
525 
f2fs_file_open(struct inode * inode,struct file * filp)526 static int f2fs_file_open(struct inode *inode, struct file *filp)
527 {
528 	int err = fscrypt_file_open(inode, filp);
529 
530 	if (err)
531 		return err;
532 
533 	if (!f2fs_is_compress_backend_ready(inode))
534 		return -EOPNOTSUPP;
535 
536 	err = fsverity_file_open(inode, filp);
537 	if (err)
538 		return err;
539 
540 	filp->f_mode |= FMODE_NOWAIT;
541 
542 	return dquot_file_open(inode, filp);
543 }
544 
f2fs_truncate_data_blocks_range(struct dnode_of_data * dn,int count)545 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
546 {
547 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
548 	struct f2fs_node *raw_node;
549 	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
550 	__le32 *addr;
551 	int base = 0;
552 	bool compressed_cluster = false;
553 	int cluster_index = 0, valid_blocks = 0;
554 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
555 	bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
556 
557 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
558 		base = get_extra_isize(dn->inode);
559 
560 	raw_node = F2FS_NODE(dn->node_page);
561 	addr = blkaddr_in_node(raw_node) + base + ofs;
562 
563 	/* Assumption: truncateion starts with cluster */
564 	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
565 		block_t blkaddr = le32_to_cpu(*addr);
566 
567 		if (f2fs_compressed_file(dn->inode) &&
568 					!(cluster_index & (cluster_size - 1))) {
569 			if (compressed_cluster)
570 				f2fs_i_compr_blocks_update(dn->inode,
571 							valid_blocks, false);
572 			compressed_cluster = (blkaddr == COMPRESS_ADDR);
573 			valid_blocks = 0;
574 		}
575 
576 		if (blkaddr == NULL_ADDR)
577 			continue;
578 
579 		dn->data_blkaddr = NULL_ADDR;
580 		f2fs_set_data_blkaddr(dn);
581 
582 		if (__is_valid_data_blkaddr(blkaddr)) {
583 			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
584 					DATA_GENERIC_ENHANCE))
585 				continue;
586 			if (compressed_cluster)
587 				valid_blocks++;
588 		}
589 
590 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
591 			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
592 
593 		f2fs_invalidate_blocks(sbi, blkaddr);
594 
595 		if (!released || blkaddr != COMPRESS_ADDR)
596 			nr_free++;
597 	}
598 
599 	if (compressed_cluster)
600 		f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
601 
602 	if (nr_free) {
603 		pgoff_t fofs;
604 		/*
605 		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
606 		 * we will invalidate all blkaddr in the whole range.
607 		 */
608 		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
609 							dn->inode) + ofs;
610 		f2fs_update_extent_cache_range(dn, fofs, 0, len);
611 		dec_valid_block_count(sbi, dn->inode, nr_free);
612 	}
613 	dn->ofs_in_node = ofs;
614 
615 	f2fs_update_time(sbi, REQ_TIME);
616 	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
617 					 dn->ofs_in_node, nr_free);
618 }
619 
f2fs_truncate_data_blocks(struct dnode_of_data * dn)620 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
621 {
622 	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
623 }
624 
truncate_partial_data_page(struct inode * inode,u64 from,bool cache_only)625 static int truncate_partial_data_page(struct inode *inode, u64 from,
626 								bool cache_only)
627 {
628 	loff_t offset = from & (PAGE_SIZE - 1);
629 	pgoff_t index = from >> PAGE_SHIFT;
630 	struct address_space *mapping = inode->i_mapping;
631 	struct page *page;
632 
633 	if (!offset && !cache_only)
634 		return 0;
635 
636 	if (cache_only) {
637 		page = find_lock_page(mapping, index);
638 		if (page && PageUptodate(page))
639 			goto truncate_out;
640 		f2fs_put_page(page, 1);
641 		return 0;
642 	}
643 
644 	page = f2fs_get_lock_data_page(inode, index, true);
645 	if (IS_ERR(page))
646 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
647 truncate_out:
648 	f2fs_wait_on_page_writeback(page, DATA, true, true);
649 	zero_user(page, offset, PAGE_SIZE - offset);
650 
651 	/* An encrypted inode should have a key and truncate the last page. */
652 	f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
653 	if (!cache_only)
654 		set_page_dirty(page);
655 	f2fs_put_page(page, 1);
656 	return 0;
657 }
658 
f2fs_do_truncate_blocks(struct inode * inode,u64 from,bool lock)659 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
660 {
661 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
662 	struct dnode_of_data dn;
663 	pgoff_t free_from;
664 	int count = 0, err = 0;
665 	struct page *ipage;
666 	bool truncate_page = false;
667 
668 	trace_f2fs_truncate_blocks_enter(inode, from);
669 
670 	free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
671 
672 	if (free_from >= sbi->max_file_blocks)
673 		goto free_partial;
674 
675 	if (lock)
676 		f2fs_lock_op(sbi);
677 
678 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
679 	if (IS_ERR(ipage)) {
680 		err = PTR_ERR(ipage);
681 		goto out;
682 	}
683 
684 	if (f2fs_has_inline_data(inode)) {
685 		f2fs_truncate_inline_inode(inode, ipage, from);
686 		f2fs_put_page(ipage, 1);
687 		truncate_page = true;
688 		goto out;
689 	}
690 
691 	set_new_dnode(&dn, inode, ipage, NULL, 0);
692 	err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
693 	if (err) {
694 		if (err == -ENOENT)
695 			goto free_next;
696 		goto out;
697 	}
698 
699 	count = ADDRS_PER_PAGE(dn.node_page, inode);
700 
701 	count -= dn.ofs_in_node;
702 	f2fs_bug_on(sbi, count < 0);
703 
704 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
705 		f2fs_truncate_data_blocks_range(&dn, count);
706 		free_from += count;
707 	}
708 
709 	f2fs_put_dnode(&dn);
710 free_next:
711 	err = f2fs_truncate_inode_blocks(inode, free_from);
712 out:
713 	if (lock)
714 		f2fs_unlock_op(sbi);
715 free_partial:
716 	/* lastly zero out the first data page */
717 	if (!err)
718 		err = truncate_partial_data_page(inode, from, truncate_page);
719 
720 	trace_f2fs_truncate_blocks_exit(inode, err);
721 	return err;
722 }
723 
f2fs_truncate_blocks(struct inode * inode,u64 from,bool lock)724 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
725 {
726 	u64 free_from = from;
727 	int err;
728 
729 #ifdef CONFIG_F2FS_FS_COMPRESSION
730 	/*
731 	 * for compressed file, only support cluster size
732 	 * aligned truncation.
733 	 */
734 	if (f2fs_compressed_file(inode))
735 		free_from = round_up(from,
736 				F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
737 #endif
738 
739 	err = f2fs_do_truncate_blocks(inode, free_from, lock);
740 	if (err)
741 		return err;
742 
743 #ifdef CONFIG_F2FS_FS_COMPRESSION
744 	if (from != free_from) {
745 		err = f2fs_truncate_partial_cluster(inode, from, lock);
746 		if (err)
747 			return err;
748 	}
749 #endif
750 
751 	return 0;
752 }
753 
f2fs_truncate(struct inode * inode)754 int f2fs_truncate(struct inode *inode)
755 {
756 	int err;
757 
758 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
759 		return -EIO;
760 
761 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
762 				S_ISLNK(inode->i_mode)))
763 		return 0;
764 
765 	trace_f2fs_truncate(inode);
766 
767 	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
768 		f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
769 		return -EIO;
770 	}
771 
772 	err = dquot_initialize(inode);
773 	if (err)
774 		return err;
775 
776 	/* we should check inline_data size */
777 	if (!f2fs_may_inline_data(inode)) {
778 		err = f2fs_convert_inline_inode(inode);
779 		if (err)
780 			return err;
781 	}
782 
783 	err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
784 	if (err)
785 		return err;
786 
787 	inode->i_mtime = inode->i_ctime = current_time(inode);
788 	f2fs_mark_inode_dirty_sync(inode, false);
789 	return 0;
790 }
791 
f2fs_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)792 int f2fs_getattr(const struct path *path, struct kstat *stat,
793 		 u32 request_mask, unsigned int query_flags)
794 {
795 	struct inode *inode = d_inode(path->dentry);
796 	struct f2fs_inode_info *fi = F2FS_I(inode);
797 	struct f2fs_inode *ri;
798 	unsigned int flags;
799 
800 	if (f2fs_has_extra_attr(inode) &&
801 			f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
802 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
803 		stat->result_mask |= STATX_BTIME;
804 		stat->btime.tv_sec = fi->i_crtime.tv_sec;
805 		stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
806 	}
807 
808 	flags = fi->i_flags;
809 	if (flags & F2FS_COMPR_FL)
810 		stat->attributes |= STATX_ATTR_COMPRESSED;
811 	if (flags & F2FS_APPEND_FL)
812 		stat->attributes |= STATX_ATTR_APPEND;
813 	if (IS_ENCRYPTED(inode))
814 		stat->attributes |= STATX_ATTR_ENCRYPTED;
815 	if (flags & F2FS_IMMUTABLE_FL)
816 		stat->attributes |= STATX_ATTR_IMMUTABLE;
817 	if (flags & F2FS_NODUMP_FL)
818 		stat->attributes |= STATX_ATTR_NODUMP;
819 	if (IS_VERITY(inode))
820 		stat->attributes |= STATX_ATTR_VERITY;
821 
822 	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
823 				  STATX_ATTR_APPEND |
824 				  STATX_ATTR_ENCRYPTED |
825 				  STATX_ATTR_IMMUTABLE |
826 				  STATX_ATTR_NODUMP |
827 				  STATX_ATTR_VERITY);
828 
829 	generic_fillattr(inode, stat);
830 
831 	/* we need to show initial sectors used for inline_data/dentries */
832 	if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
833 					f2fs_has_inline_dentry(inode))
834 		stat->blocks += (stat->size + 511) >> 9;
835 
836 	return 0;
837 }
838 
839 #ifdef CONFIG_F2FS_FS_POSIX_ACL
__setattr_copy(struct inode * inode,const struct iattr * attr)840 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
841 {
842 	unsigned int ia_valid = attr->ia_valid;
843 
844 	if (ia_valid & ATTR_UID)
845 		inode->i_uid = attr->ia_uid;
846 	if (ia_valid & ATTR_GID)
847 		inode->i_gid = attr->ia_gid;
848 	if (ia_valid & ATTR_ATIME)
849 		inode->i_atime = attr->ia_atime;
850 	if (ia_valid & ATTR_MTIME)
851 		inode->i_mtime = attr->ia_mtime;
852 	if (ia_valid & ATTR_CTIME)
853 		inode->i_ctime = attr->ia_ctime;
854 	if (ia_valid & ATTR_MODE) {
855 		umode_t mode = attr->ia_mode;
856 
857 		if (!in_group_p(inode->i_gid) &&
858 			!capable_wrt_inode_uidgid(inode, CAP_FSETID))
859 			mode &= ~S_ISGID;
860 		set_acl_inode(inode, mode);
861 	}
862 }
863 #else
864 #define __setattr_copy setattr_copy
865 #endif
866 
f2fs_setattr(struct dentry * dentry,struct iattr * attr)867 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
868 {
869 	struct inode *inode = d_inode(dentry);
870 	int err;
871 
872 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
873 		return -EIO;
874 
875 	if (unlikely(IS_IMMUTABLE(inode)))
876 		return -EPERM;
877 
878 	if (unlikely(IS_APPEND(inode) &&
879 			(attr->ia_valid & (ATTR_MODE | ATTR_UID |
880 				  ATTR_GID | ATTR_TIMES_SET))))
881 		return -EPERM;
882 
883 	if ((attr->ia_valid & ATTR_SIZE) &&
884 		!f2fs_is_compress_backend_ready(inode))
885 		return -EOPNOTSUPP;
886 
887 	err = setattr_prepare(dentry, attr);
888 	if (err)
889 		return err;
890 
891 	err = fscrypt_prepare_setattr(dentry, attr);
892 	if (err)
893 		return err;
894 
895 	err = fsverity_prepare_setattr(dentry, attr);
896 	if (err)
897 		return err;
898 
899 	if (is_quota_modification(inode, attr)) {
900 		err = dquot_initialize(inode);
901 		if (err)
902 			return err;
903 	}
904 	if ((attr->ia_valid & ATTR_UID &&
905 		!uid_eq(attr->ia_uid, inode->i_uid)) ||
906 		(attr->ia_valid & ATTR_GID &&
907 		!gid_eq(attr->ia_gid, inode->i_gid))) {
908 		f2fs_lock_op(F2FS_I_SB(inode));
909 		err = dquot_transfer(inode, attr);
910 		if (err) {
911 			set_sbi_flag(F2FS_I_SB(inode),
912 					SBI_QUOTA_NEED_REPAIR);
913 			f2fs_unlock_op(F2FS_I_SB(inode));
914 			return err;
915 		}
916 		/*
917 		 * update uid/gid under lock_op(), so that dquot and inode can
918 		 * be updated atomically.
919 		 */
920 		if (attr->ia_valid & ATTR_UID)
921 			inode->i_uid = attr->ia_uid;
922 		if (attr->ia_valid & ATTR_GID)
923 			inode->i_gid = attr->ia_gid;
924 		f2fs_mark_inode_dirty_sync(inode, true);
925 		f2fs_unlock_op(F2FS_I_SB(inode));
926 	}
927 
928 	if (attr->ia_valid & ATTR_SIZE) {
929 		loff_t old_size = i_size_read(inode);
930 
931 		if (attr->ia_size > MAX_INLINE_DATA(inode)) {
932 			/*
933 			 * should convert inline inode before i_size_write to
934 			 * keep smaller than inline_data size with inline flag.
935 			 */
936 			err = f2fs_convert_inline_inode(inode);
937 			if (err)
938 				return err;
939 		}
940 
941 		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
942 		down_write(&F2FS_I(inode)->i_mmap_sem);
943 
944 		truncate_setsize(inode, attr->ia_size);
945 
946 		if (attr->ia_size <= old_size)
947 			err = f2fs_truncate(inode);
948 		/*
949 		 * do not trim all blocks after i_size if target size is
950 		 * larger than i_size.
951 		 */
952 		up_write(&F2FS_I(inode)->i_mmap_sem);
953 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
954 		if (err)
955 			return err;
956 
957 		spin_lock(&F2FS_I(inode)->i_size_lock);
958 		inode->i_mtime = inode->i_ctime = current_time(inode);
959 		F2FS_I(inode)->last_disk_size = i_size_read(inode);
960 		spin_unlock(&F2FS_I(inode)->i_size_lock);
961 	}
962 
963 	__setattr_copy(inode, attr);
964 
965 	if (attr->ia_valid & ATTR_MODE) {
966 		err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
967 		if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
968 			inode->i_mode = F2FS_I(inode)->i_acl_mode;
969 			clear_inode_flag(inode, FI_ACL_MODE);
970 		}
971 	}
972 
973 	/* file size may changed here */
974 	f2fs_mark_inode_dirty_sync(inode, true);
975 
976 	/* inode change will produce dirty node pages flushed by checkpoint */
977 	f2fs_balance_fs(F2FS_I_SB(inode), true);
978 
979 	return err;
980 }
981 
982 const struct inode_operations f2fs_file_inode_operations = {
983 	.getattr	= f2fs_getattr,
984 	.setattr	= f2fs_setattr,
985 	.get_acl	= f2fs_get_acl,
986 	.set_acl	= f2fs_set_acl,
987 	.listxattr	= f2fs_listxattr,
988 	.fiemap		= f2fs_fiemap,
989 };
990 
fill_zero(struct inode * inode,pgoff_t index,loff_t start,loff_t len)991 static int fill_zero(struct inode *inode, pgoff_t index,
992 					loff_t start, loff_t len)
993 {
994 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
995 	struct page *page;
996 
997 	if (!len)
998 		return 0;
999 
1000 	f2fs_balance_fs(sbi, true);
1001 
1002 	f2fs_lock_op(sbi);
1003 	page = f2fs_get_new_data_page(inode, NULL, index, false);
1004 	f2fs_unlock_op(sbi);
1005 
1006 	if (IS_ERR(page))
1007 		return PTR_ERR(page);
1008 
1009 	f2fs_wait_on_page_writeback(page, DATA, true, true);
1010 	zero_user(page, start, len);
1011 	set_page_dirty(page);
1012 	f2fs_put_page(page, 1);
1013 	return 0;
1014 }
1015 
f2fs_truncate_hole(struct inode * inode,pgoff_t pg_start,pgoff_t pg_end)1016 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1017 {
1018 	int err;
1019 
1020 	while (pg_start < pg_end) {
1021 		struct dnode_of_data dn;
1022 		pgoff_t end_offset, count;
1023 
1024 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1025 		err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1026 		if (err) {
1027 			if (err == -ENOENT) {
1028 				pg_start = f2fs_get_next_page_offset(&dn,
1029 								pg_start);
1030 				continue;
1031 			}
1032 			return err;
1033 		}
1034 
1035 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1036 		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1037 
1038 		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1039 
1040 		f2fs_truncate_data_blocks_range(&dn, count);
1041 		f2fs_put_dnode(&dn);
1042 
1043 		pg_start += count;
1044 	}
1045 	return 0;
1046 }
1047 
punch_hole(struct inode * inode,loff_t offset,loff_t len)1048 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1049 {
1050 	pgoff_t pg_start, pg_end;
1051 	loff_t off_start, off_end;
1052 	int ret;
1053 
1054 	ret = f2fs_convert_inline_inode(inode);
1055 	if (ret)
1056 		return ret;
1057 
1058 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1059 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1060 
1061 	off_start = offset & (PAGE_SIZE - 1);
1062 	off_end = (offset + len) & (PAGE_SIZE - 1);
1063 
1064 	if (pg_start == pg_end) {
1065 		ret = fill_zero(inode, pg_start, off_start,
1066 						off_end - off_start);
1067 		if (ret)
1068 			return ret;
1069 	} else {
1070 		if (off_start) {
1071 			ret = fill_zero(inode, pg_start++, off_start,
1072 						PAGE_SIZE - off_start);
1073 			if (ret)
1074 				return ret;
1075 		}
1076 		if (off_end) {
1077 			ret = fill_zero(inode, pg_end, 0, off_end);
1078 			if (ret)
1079 				return ret;
1080 		}
1081 
1082 		if (pg_start < pg_end) {
1083 			loff_t blk_start, blk_end;
1084 			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1085 
1086 			f2fs_balance_fs(sbi, true);
1087 
1088 			blk_start = (loff_t)pg_start << PAGE_SHIFT;
1089 			blk_end = (loff_t)pg_end << PAGE_SHIFT;
1090 
1091 			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1092 			down_write(&F2FS_I(inode)->i_mmap_sem);
1093 
1094 			truncate_pagecache_range(inode, blk_start, blk_end - 1);
1095 
1096 			f2fs_lock_op(sbi);
1097 			ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1098 			f2fs_unlock_op(sbi);
1099 
1100 			up_write(&F2FS_I(inode)->i_mmap_sem);
1101 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1102 		}
1103 	}
1104 
1105 	return ret;
1106 }
1107 
__read_out_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,pgoff_t len)1108 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1109 				int *do_replace, pgoff_t off, pgoff_t len)
1110 {
1111 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1112 	struct dnode_of_data dn;
1113 	int ret, done, i;
1114 
1115 next_dnode:
1116 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1117 	ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1118 	if (ret && ret != -ENOENT) {
1119 		return ret;
1120 	} else if (ret == -ENOENT) {
1121 		if (dn.max_level == 0)
1122 			return -ENOENT;
1123 		done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1124 						dn.ofs_in_node, len);
1125 		blkaddr += done;
1126 		do_replace += done;
1127 		goto next;
1128 	}
1129 
1130 	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1131 							dn.ofs_in_node, len);
1132 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1133 		*blkaddr = f2fs_data_blkaddr(&dn);
1134 
1135 		if (__is_valid_data_blkaddr(*blkaddr) &&
1136 			!f2fs_is_valid_blkaddr(sbi, *blkaddr,
1137 					DATA_GENERIC_ENHANCE)) {
1138 			f2fs_put_dnode(&dn);
1139 			return -EFSCORRUPTED;
1140 		}
1141 
1142 		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1143 
1144 			if (f2fs_lfs_mode(sbi)) {
1145 				f2fs_put_dnode(&dn);
1146 				return -EOPNOTSUPP;
1147 			}
1148 
1149 			/* do not invalidate this block address */
1150 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1151 			*do_replace = 1;
1152 		}
1153 	}
1154 	f2fs_put_dnode(&dn);
1155 next:
1156 	len -= done;
1157 	off += done;
1158 	if (len)
1159 		goto next_dnode;
1160 	return 0;
1161 }
1162 
__roll_back_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,int len)1163 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1164 				int *do_replace, pgoff_t off, int len)
1165 {
1166 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1167 	struct dnode_of_data dn;
1168 	int ret, i;
1169 
1170 	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1171 		if (*do_replace == 0)
1172 			continue;
1173 
1174 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1175 		ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1176 		if (ret) {
1177 			dec_valid_block_count(sbi, inode, 1);
1178 			f2fs_invalidate_blocks(sbi, *blkaddr);
1179 		} else {
1180 			f2fs_update_data_blkaddr(&dn, *blkaddr);
1181 		}
1182 		f2fs_put_dnode(&dn);
1183 	}
1184 	return 0;
1185 }
1186 
__clone_blkaddrs(struct inode * src_inode,struct inode * dst_inode,block_t * blkaddr,int * do_replace,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1187 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1188 			block_t *blkaddr, int *do_replace,
1189 			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1190 {
1191 	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1192 	pgoff_t i = 0;
1193 	int ret;
1194 
1195 	while (i < len) {
1196 		if (blkaddr[i] == NULL_ADDR && !full) {
1197 			i++;
1198 			continue;
1199 		}
1200 
1201 		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1202 			struct dnode_of_data dn;
1203 			struct node_info ni;
1204 			size_t new_size;
1205 			pgoff_t ilen;
1206 
1207 			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1208 			ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1209 			if (ret)
1210 				return ret;
1211 
1212 			ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1213 			if (ret) {
1214 				f2fs_put_dnode(&dn);
1215 				return ret;
1216 			}
1217 
1218 			ilen = min((pgoff_t)
1219 				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1220 						dn.ofs_in_node, len - i);
1221 			do {
1222 				dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1223 				f2fs_truncate_data_blocks_range(&dn, 1);
1224 
1225 				if (do_replace[i]) {
1226 					f2fs_i_blocks_write(src_inode,
1227 							1, false, false);
1228 					f2fs_i_blocks_write(dst_inode,
1229 							1, true, false);
1230 					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1231 					blkaddr[i], ni.version, true, false);
1232 
1233 					do_replace[i] = 0;
1234 				}
1235 				dn.ofs_in_node++;
1236 				i++;
1237 				new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1238 				if (dst_inode->i_size < new_size)
1239 					f2fs_i_size_write(dst_inode, new_size);
1240 			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1241 
1242 			f2fs_put_dnode(&dn);
1243 		} else {
1244 			struct page *psrc, *pdst;
1245 
1246 			psrc = f2fs_get_lock_data_page(src_inode,
1247 							src + i, true);
1248 			if (IS_ERR(psrc))
1249 				return PTR_ERR(psrc);
1250 			pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1251 								true);
1252 			if (IS_ERR(pdst)) {
1253 				f2fs_put_page(psrc, 1);
1254 				return PTR_ERR(pdst);
1255 			}
1256 			f2fs_copy_page(psrc, pdst);
1257 			set_page_dirty(pdst);
1258 			f2fs_put_page(pdst, 1);
1259 			f2fs_put_page(psrc, 1);
1260 
1261 			ret = f2fs_truncate_hole(src_inode,
1262 						src + i, src + i + 1);
1263 			if (ret)
1264 				return ret;
1265 			i++;
1266 		}
1267 	}
1268 	return 0;
1269 }
1270 
__exchange_data_block(struct inode * src_inode,struct inode * dst_inode,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1271 static int __exchange_data_block(struct inode *src_inode,
1272 			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1273 			pgoff_t len, bool full)
1274 {
1275 	block_t *src_blkaddr;
1276 	int *do_replace;
1277 	pgoff_t olen;
1278 	int ret;
1279 
1280 	while (len) {
1281 		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1282 
1283 		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1284 					array_size(olen, sizeof(block_t)),
1285 					GFP_NOFS);
1286 		if (!src_blkaddr)
1287 			return -ENOMEM;
1288 
1289 		do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1290 					array_size(olen, sizeof(int)),
1291 					GFP_NOFS);
1292 		if (!do_replace) {
1293 			kvfree(src_blkaddr);
1294 			return -ENOMEM;
1295 		}
1296 
1297 		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1298 					do_replace, src, olen);
1299 		if (ret)
1300 			goto roll_back;
1301 
1302 		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1303 					do_replace, src, dst, olen, full);
1304 		if (ret)
1305 			goto roll_back;
1306 
1307 		src += olen;
1308 		dst += olen;
1309 		len -= olen;
1310 
1311 		kvfree(src_blkaddr);
1312 		kvfree(do_replace);
1313 	}
1314 	return 0;
1315 
1316 roll_back:
1317 	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1318 	kvfree(src_blkaddr);
1319 	kvfree(do_replace);
1320 	return ret;
1321 }
1322 
f2fs_do_collapse(struct inode * inode,loff_t offset,loff_t len)1323 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1324 {
1325 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1326 	pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1327 	pgoff_t start = offset >> PAGE_SHIFT;
1328 	pgoff_t end = (offset + len) >> PAGE_SHIFT;
1329 	int ret;
1330 
1331 	f2fs_balance_fs(sbi, true);
1332 
1333 	/* avoid gc operation during block exchange */
1334 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1335 	down_write(&F2FS_I(inode)->i_mmap_sem);
1336 
1337 	f2fs_lock_op(sbi);
1338 	f2fs_drop_extent_tree(inode);
1339 	truncate_pagecache(inode, offset);
1340 	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1341 	f2fs_unlock_op(sbi);
1342 
1343 	up_write(&F2FS_I(inode)->i_mmap_sem);
1344 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1345 	return ret;
1346 }
1347 
f2fs_collapse_range(struct inode * inode,loff_t offset,loff_t len)1348 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1349 {
1350 	loff_t new_size;
1351 	int ret;
1352 
1353 	if (offset + len >= i_size_read(inode))
1354 		return -EINVAL;
1355 
1356 	/* collapse range should be aligned to block size of f2fs. */
1357 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1358 		return -EINVAL;
1359 
1360 	ret = f2fs_convert_inline_inode(inode);
1361 	if (ret)
1362 		return ret;
1363 
1364 	/* write out all dirty pages from offset */
1365 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1366 	if (ret)
1367 		return ret;
1368 
1369 	ret = f2fs_do_collapse(inode, offset, len);
1370 	if (ret)
1371 		return ret;
1372 
1373 	/* write out all moved pages, if possible */
1374 	down_write(&F2FS_I(inode)->i_mmap_sem);
1375 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1376 	truncate_pagecache(inode, offset);
1377 
1378 	new_size = i_size_read(inode) - len;
1379 	ret = f2fs_truncate_blocks(inode, new_size, true);
1380 	up_write(&F2FS_I(inode)->i_mmap_sem);
1381 	if (!ret)
1382 		f2fs_i_size_write(inode, new_size);
1383 	return ret;
1384 }
1385 
f2fs_do_zero_range(struct dnode_of_data * dn,pgoff_t start,pgoff_t end)1386 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1387 								pgoff_t end)
1388 {
1389 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1390 	pgoff_t index = start;
1391 	unsigned int ofs_in_node = dn->ofs_in_node;
1392 	blkcnt_t count = 0;
1393 	int ret;
1394 
1395 	for (; index < end; index++, dn->ofs_in_node++) {
1396 		if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1397 			count++;
1398 	}
1399 
1400 	dn->ofs_in_node = ofs_in_node;
1401 	ret = f2fs_reserve_new_blocks(dn, count);
1402 	if (ret)
1403 		return ret;
1404 
1405 	dn->ofs_in_node = ofs_in_node;
1406 	for (index = start; index < end; index++, dn->ofs_in_node++) {
1407 		dn->data_blkaddr = f2fs_data_blkaddr(dn);
1408 		/*
1409 		 * f2fs_reserve_new_blocks will not guarantee entire block
1410 		 * allocation.
1411 		 */
1412 		if (dn->data_blkaddr == NULL_ADDR) {
1413 			ret = -ENOSPC;
1414 			break;
1415 		}
1416 		if (dn->data_blkaddr != NEW_ADDR) {
1417 			f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1418 			dn->data_blkaddr = NEW_ADDR;
1419 			f2fs_set_data_blkaddr(dn);
1420 		}
1421 	}
1422 
1423 	f2fs_update_extent_cache_range(dn, start, 0, index - start);
1424 
1425 	return ret;
1426 }
1427 
f2fs_zero_range(struct inode * inode,loff_t offset,loff_t len,int mode)1428 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1429 								int mode)
1430 {
1431 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1432 	struct address_space *mapping = inode->i_mapping;
1433 	pgoff_t index, pg_start, pg_end;
1434 	loff_t new_size = i_size_read(inode);
1435 	loff_t off_start, off_end;
1436 	int ret = 0;
1437 
1438 	ret = inode_newsize_ok(inode, (len + offset));
1439 	if (ret)
1440 		return ret;
1441 
1442 	ret = f2fs_convert_inline_inode(inode);
1443 	if (ret)
1444 		return ret;
1445 
1446 	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1447 	if (ret)
1448 		return ret;
1449 
1450 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1451 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1452 
1453 	off_start = offset & (PAGE_SIZE - 1);
1454 	off_end = (offset + len) & (PAGE_SIZE - 1);
1455 
1456 	if (pg_start == pg_end) {
1457 		ret = fill_zero(inode, pg_start, off_start,
1458 						off_end - off_start);
1459 		if (ret)
1460 			return ret;
1461 
1462 		new_size = max_t(loff_t, new_size, offset + len);
1463 	} else {
1464 		if (off_start) {
1465 			ret = fill_zero(inode, pg_start++, off_start,
1466 						PAGE_SIZE - off_start);
1467 			if (ret)
1468 				return ret;
1469 
1470 			new_size = max_t(loff_t, new_size,
1471 					(loff_t)pg_start << PAGE_SHIFT);
1472 		}
1473 
1474 		for (index = pg_start; index < pg_end;) {
1475 			struct dnode_of_data dn;
1476 			unsigned int end_offset;
1477 			pgoff_t end;
1478 
1479 			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1480 			down_write(&F2FS_I(inode)->i_mmap_sem);
1481 
1482 			truncate_pagecache_range(inode,
1483 				(loff_t)index << PAGE_SHIFT,
1484 				((loff_t)pg_end << PAGE_SHIFT) - 1);
1485 
1486 			f2fs_lock_op(sbi);
1487 
1488 			set_new_dnode(&dn, inode, NULL, NULL, 0);
1489 			ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1490 			if (ret) {
1491 				f2fs_unlock_op(sbi);
1492 				up_write(&F2FS_I(inode)->i_mmap_sem);
1493 				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1494 				goto out;
1495 			}
1496 
1497 			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1498 			end = min(pg_end, end_offset - dn.ofs_in_node + index);
1499 
1500 			ret = f2fs_do_zero_range(&dn, index, end);
1501 			f2fs_put_dnode(&dn);
1502 
1503 			f2fs_unlock_op(sbi);
1504 			up_write(&F2FS_I(inode)->i_mmap_sem);
1505 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1506 
1507 			f2fs_balance_fs(sbi, dn.node_changed);
1508 
1509 			if (ret)
1510 				goto out;
1511 
1512 			index = end;
1513 			new_size = max_t(loff_t, new_size,
1514 					(loff_t)index << PAGE_SHIFT);
1515 		}
1516 
1517 		if (off_end) {
1518 			ret = fill_zero(inode, pg_end, 0, off_end);
1519 			if (ret)
1520 				goto out;
1521 
1522 			new_size = max_t(loff_t, new_size, offset + len);
1523 		}
1524 	}
1525 
1526 out:
1527 	if (new_size > i_size_read(inode)) {
1528 		if (mode & FALLOC_FL_KEEP_SIZE)
1529 			file_set_keep_isize(inode);
1530 		else
1531 			f2fs_i_size_write(inode, new_size);
1532 	}
1533 	return ret;
1534 }
1535 
f2fs_insert_range(struct inode * inode,loff_t offset,loff_t len)1536 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1537 {
1538 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1539 	pgoff_t nr, pg_start, pg_end, delta, idx;
1540 	loff_t new_size;
1541 	int ret = 0;
1542 
1543 	new_size = i_size_read(inode) + len;
1544 	ret = inode_newsize_ok(inode, new_size);
1545 	if (ret)
1546 		return ret;
1547 
1548 	if (offset >= i_size_read(inode))
1549 		return -EINVAL;
1550 
1551 	/* insert range should be aligned to block size of f2fs. */
1552 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1553 		return -EINVAL;
1554 
1555 	ret = f2fs_convert_inline_inode(inode);
1556 	if (ret)
1557 		return ret;
1558 
1559 	f2fs_balance_fs(sbi, true);
1560 
1561 	down_write(&F2FS_I(inode)->i_mmap_sem);
1562 	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1563 	up_write(&F2FS_I(inode)->i_mmap_sem);
1564 	if (ret)
1565 		return ret;
1566 
1567 	/* write out all dirty pages from offset */
1568 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1569 	if (ret)
1570 		return ret;
1571 
1572 	pg_start = offset >> PAGE_SHIFT;
1573 	pg_end = (offset + len) >> PAGE_SHIFT;
1574 	delta = pg_end - pg_start;
1575 	idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1576 
1577 	/* avoid gc operation during block exchange */
1578 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1579 	down_write(&F2FS_I(inode)->i_mmap_sem);
1580 	truncate_pagecache(inode, offset);
1581 
1582 	while (!ret && idx > pg_start) {
1583 		nr = idx - pg_start;
1584 		if (nr > delta)
1585 			nr = delta;
1586 		idx -= nr;
1587 
1588 		f2fs_lock_op(sbi);
1589 		f2fs_drop_extent_tree(inode);
1590 
1591 		ret = __exchange_data_block(inode, inode, idx,
1592 					idx + delta, nr, false);
1593 		f2fs_unlock_op(sbi);
1594 	}
1595 	up_write(&F2FS_I(inode)->i_mmap_sem);
1596 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1597 
1598 	/* write out all moved pages, if possible */
1599 	down_write(&F2FS_I(inode)->i_mmap_sem);
1600 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1601 	truncate_pagecache(inode, offset);
1602 	up_write(&F2FS_I(inode)->i_mmap_sem);
1603 
1604 	if (!ret)
1605 		f2fs_i_size_write(inode, new_size);
1606 	return ret;
1607 }
1608 
expand_inode_data(struct inode * inode,loff_t offset,loff_t len,int mode)1609 static int expand_inode_data(struct inode *inode, loff_t offset,
1610 					loff_t len, int mode)
1611 {
1612 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1613 	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1614 			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1615 			.m_may_create = true };
1616 	pgoff_t pg_start, pg_end;
1617 	loff_t new_size = i_size_read(inode);
1618 	loff_t off_end;
1619 	block_t expanded = 0;
1620 	int err;
1621 
1622 	err = inode_newsize_ok(inode, (len + offset));
1623 	if (err)
1624 		return err;
1625 
1626 	err = f2fs_convert_inline_inode(inode);
1627 	if (err)
1628 		return err;
1629 
1630 	f2fs_balance_fs(sbi, true);
1631 
1632 	pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1633 	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1634 	off_end = (offset + len) & (PAGE_SIZE - 1);
1635 
1636 	map.m_lblk = pg_start;
1637 	map.m_len = pg_end - pg_start;
1638 	if (off_end)
1639 		map.m_len++;
1640 
1641 	if (!map.m_len)
1642 		return 0;
1643 
1644 	if (f2fs_is_pinned_file(inode)) {
1645 		block_t sec_blks = BLKS_PER_SEC(sbi);
1646 		block_t sec_len = roundup(map.m_len, sec_blks);
1647 
1648 		map.m_len = sec_blks;
1649 next_alloc:
1650 		if (has_not_enough_free_secs(sbi, 0,
1651 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1652 			down_write(&sbi->gc_lock);
1653 			err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1654 			if (err && err != -ENODATA && err != -EAGAIN)
1655 				goto out_err;
1656 		}
1657 
1658 		down_write(&sbi->pin_sem);
1659 
1660 		f2fs_lock_op(sbi);
1661 		f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
1662 		f2fs_unlock_op(sbi);
1663 
1664 		map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1665 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1666 
1667 		up_write(&sbi->pin_sem);
1668 
1669 		expanded += map.m_len;
1670 		sec_len -= map.m_len;
1671 		map.m_lblk += map.m_len;
1672 		if (!err && sec_len)
1673 			goto next_alloc;
1674 
1675 		map.m_len = expanded;
1676 	} else {
1677 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1678 		expanded = map.m_len;
1679 	}
1680 out_err:
1681 	if (err) {
1682 		pgoff_t last_off;
1683 
1684 		if (!expanded)
1685 			return err;
1686 
1687 		last_off = pg_start + expanded - 1;
1688 
1689 		/* update new size to the failed position */
1690 		new_size = (last_off == pg_end) ? offset + len :
1691 					(loff_t)(last_off + 1) << PAGE_SHIFT;
1692 	} else {
1693 		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1694 	}
1695 
1696 	if (new_size > i_size_read(inode)) {
1697 		if (mode & FALLOC_FL_KEEP_SIZE)
1698 			file_set_keep_isize(inode);
1699 		else
1700 			f2fs_i_size_write(inode, new_size);
1701 	}
1702 
1703 	return err;
1704 }
1705 
f2fs_fallocate(struct file * file,int mode,loff_t offset,loff_t len)1706 static long f2fs_fallocate(struct file *file, int mode,
1707 				loff_t offset, loff_t len)
1708 {
1709 	struct inode *inode = file_inode(file);
1710 	long ret = 0;
1711 
1712 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1713 		return -EIO;
1714 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1715 		return -ENOSPC;
1716 	if (!f2fs_is_compress_backend_ready(inode))
1717 		return -EOPNOTSUPP;
1718 
1719 	/* f2fs only support ->fallocate for regular file */
1720 	if (!S_ISREG(inode->i_mode))
1721 		return -EINVAL;
1722 
1723 	if (IS_ENCRYPTED(inode) &&
1724 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1725 		return -EOPNOTSUPP;
1726 
1727 	if (f2fs_compressed_file(inode) &&
1728 		(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1729 			FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1730 		return -EOPNOTSUPP;
1731 
1732 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1733 			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1734 			FALLOC_FL_INSERT_RANGE))
1735 		return -EOPNOTSUPP;
1736 
1737 	inode_lock(inode);
1738 
1739 	if (mode & FALLOC_FL_PUNCH_HOLE) {
1740 		if (offset >= inode->i_size)
1741 			goto out;
1742 
1743 		ret = punch_hole(inode, offset, len);
1744 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1745 		ret = f2fs_collapse_range(inode, offset, len);
1746 	} else if (mode & FALLOC_FL_ZERO_RANGE) {
1747 		ret = f2fs_zero_range(inode, offset, len, mode);
1748 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
1749 		ret = f2fs_insert_range(inode, offset, len);
1750 	} else {
1751 		ret = expand_inode_data(inode, offset, len, mode);
1752 	}
1753 
1754 	if (!ret) {
1755 		inode->i_mtime = inode->i_ctime = current_time(inode);
1756 		f2fs_mark_inode_dirty_sync(inode, false);
1757 		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1758 	}
1759 
1760 out:
1761 	inode_unlock(inode);
1762 
1763 	trace_f2fs_fallocate(inode, mode, offset, len, ret);
1764 	return ret;
1765 }
1766 
f2fs_release_file(struct inode * inode,struct file * filp)1767 static int f2fs_release_file(struct inode *inode, struct file *filp)
1768 {
1769 	/*
1770 	 * f2fs_relase_file is called at every close calls. So we should
1771 	 * not drop any inmemory pages by close called by other process.
1772 	 */
1773 	if (!(filp->f_mode & FMODE_WRITE) ||
1774 			atomic_read(&inode->i_writecount) != 1)
1775 		return 0;
1776 
1777 	/* some remained atomic pages should discarded */
1778 	if (f2fs_is_atomic_file(inode))
1779 		f2fs_drop_inmem_pages(inode);
1780 	if (f2fs_is_volatile_file(inode)) {
1781 		set_inode_flag(inode, FI_DROP_CACHE);
1782 		filemap_fdatawrite(inode->i_mapping);
1783 		clear_inode_flag(inode, FI_DROP_CACHE);
1784 		clear_inode_flag(inode, FI_VOLATILE_FILE);
1785 		stat_dec_volatile_write(inode);
1786 	}
1787 	return 0;
1788 }
1789 
f2fs_file_flush(struct file * file,fl_owner_t id)1790 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1791 {
1792 	struct inode *inode = file_inode(file);
1793 
1794 	/*
1795 	 * If the process doing a transaction is crashed, we should do
1796 	 * roll-back. Otherwise, other reader/write can see corrupted database
1797 	 * until all the writers close its file. Since this should be done
1798 	 * before dropping file lock, it needs to do in ->flush.
1799 	 */
1800 	if (f2fs_is_atomic_file(inode) &&
1801 			F2FS_I(inode)->inmem_task == current)
1802 		f2fs_drop_inmem_pages(inode);
1803 	return 0;
1804 }
1805 
f2fs_setflags_common(struct inode * inode,u32 iflags,u32 mask)1806 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1807 {
1808 	struct f2fs_inode_info *fi = F2FS_I(inode);
1809 	u32 masked_flags = fi->i_flags & mask;
1810 
1811 	f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1812 
1813 	/* Is it quota file? Do not allow user to mess with it */
1814 	if (IS_NOQUOTA(inode))
1815 		return -EPERM;
1816 
1817 	if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1818 		if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1819 			return -EOPNOTSUPP;
1820 		if (!f2fs_empty_dir(inode))
1821 			return -ENOTEMPTY;
1822 	}
1823 
1824 	if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1825 		if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1826 			return -EOPNOTSUPP;
1827 		if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1828 			return -EINVAL;
1829 	}
1830 
1831 	if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1832 		if (masked_flags & F2FS_COMPR_FL) {
1833 			if (!f2fs_disable_compressed_file(inode))
1834 				return -EINVAL;
1835 		}
1836 		if (iflags & F2FS_NOCOMP_FL)
1837 			return -EINVAL;
1838 		if (iflags & F2FS_COMPR_FL) {
1839 			if (!f2fs_may_compress(inode))
1840 				return -EINVAL;
1841 			if (S_ISREG(inode->i_mode) && inode->i_size)
1842 				return -EINVAL;
1843 
1844 			set_compress_context(inode);
1845 		}
1846 	}
1847 	if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1848 		if (masked_flags & F2FS_COMPR_FL)
1849 			return -EINVAL;
1850 	}
1851 
1852 	fi->i_flags = iflags | (fi->i_flags & ~mask);
1853 	f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1854 					(fi->i_flags & F2FS_NOCOMP_FL));
1855 
1856 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
1857 		set_inode_flag(inode, FI_PROJ_INHERIT);
1858 	else
1859 		clear_inode_flag(inode, FI_PROJ_INHERIT);
1860 
1861 	inode->i_ctime = current_time(inode);
1862 	f2fs_set_inode_flags(inode);
1863 	f2fs_mark_inode_dirty_sync(inode, true);
1864 	return 0;
1865 }
1866 
1867 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1868 
1869 /*
1870  * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1871  * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1872  * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1873  * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1874  */
1875 
1876 static const struct {
1877 	u32 iflag;
1878 	u32 fsflag;
1879 } f2fs_fsflags_map[] = {
1880 	{ F2FS_COMPR_FL,	FS_COMPR_FL },
1881 	{ F2FS_SYNC_FL,		FS_SYNC_FL },
1882 	{ F2FS_IMMUTABLE_FL,	FS_IMMUTABLE_FL },
1883 	{ F2FS_APPEND_FL,	FS_APPEND_FL },
1884 	{ F2FS_NODUMP_FL,	FS_NODUMP_FL },
1885 	{ F2FS_NOATIME_FL,	FS_NOATIME_FL },
1886 	{ F2FS_NOCOMP_FL,	FS_NOCOMP_FL },
1887 	{ F2FS_INDEX_FL,	FS_INDEX_FL },
1888 	{ F2FS_DIRSYNC_FL,	FS_DIRSYNC_FL },
1889 	{ F2FS_PROJINHERIT_FL,	FS_PROJINHERIT_FL },
1890 	{ F2FS_CASEFOLD_FL,	FS_CASEFOLD_FL },
1891 };
1892 
1893 #define F2FS_GETTABLE_FS_FL (		\
1894 		FS_COMPR_FL |		\
1895 		FS_SYNC_FL |		\
1896 		FS_IMMUTABLE_FL |	\
1897 		FS_APPEND_FL |		\
1898 		FS_NODUMP_FL |		\
1899 		FS_NOATIME_FL |		\
1900 		FS_NOCOMP_FL |		\
1901 		FS_INDEX_FL |		\
1902 		FS_DIRSYNC_FL |		\
1903 		FS_PROJINHERIT_FL |	\
1904 		FS_ENCRYPT_FL |		\
1905 		FS_INLINE_DATA_FL |	\
1906 		FS_NOCOW_FL |		\
1907 		FS_VERITY_FL |		\
1908 		FS_CASEFOLD_FL)
1909 
1910 #define F2FS_SETTABLE_FS_FL (		\
1911 		FS_COMPR_FL |		\
1912 		FS_SYNC_FL |		\
1913 		FS_IMMUTABLE_FL |	\
1914 		FS_APPEND_FL |		\
1915 		FS_NODUMP_FL |		\
1916 		FS_NOATIME_FL |		\
1917 		FS_NOCOMP_FL |		\
1918 		FS_DIRSYNC_FL |		\
1919 		FS_PROJINHERIT_FL |	\
1920 		FS_CASEFOLD_FL)
1921 
1922 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
f2fs_iflags_to_fsflags(u32 iflags)1923 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1924 {
1925 	u32 fsflags = 0;
1926 	int i;
1927 
1928 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1929 		if (iflags & f2fs_fsflags_map[i].iflag)
1930 			fsflags |= f2fs_fsflags_map[i].fsflag;
1931 
1932 	return fsflags;
1933 }
1934 
1935 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
f2fs_fsflags_to_iflags(u32 fsflags)1936 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1937 {
1938 	u32 iflags = 0;
1939 	int i;
1940 
1941 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1942 		if (fsflags & f2fs_fsflags_map[i].fsflag)
1943 			iflags |= f2fs_fsflags_map[i].iflag;
1944 
1945 	return iflags;
1946 }
1947 
f2fs_ioc_getflags(struct file * filp,unsigned long arg)1948 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1949 {
1950 	struct inode *inode = file_inode(filp);
1951 	struct f2fs_inode_info *fi = F2FS_I(inode);
1952 	u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1953 
1954 	if (IS_ENCRYPTED(inode))
1955 		fsflags |= FS_ENCRYPT_FL;
1956 	if (IS_VERITY(inode))
1957 		fsflags |= FS_VERITY_FL;
1958 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1959 		fsflags |= FS_INLINE_DATA_FL;
1960 	if (is_inode_flag_set(inode, FI_PIN_FILE))
1961 		fsflags |= FS_NOCOW_FL;
1962 
1963 	fsflags &= F2FS_GETTABLE_FS_FL;
1964 
1965 	return put_user(fsflags, (int __user *)arg);
1966 }
1967 
f2fs_ioc_setflags(struct file * filp,unsigned long arg)1968 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1969 {
1970 	struct inode *inode = file_inode(filp);
1971 	struct f2fs_inode_info *fi = F2FS_I(inode);
1972 	u32 fsflags, old_fsflags;
1973 	u32 iflags;
1974 	int ret;
1975 
1976 	if (!inode_owner_or_capable(inode))
1977 		return -EACCES;
1978 
1979 	if (get_user(fsflags, (int __user *)arg))
1980 		return -EFAULT;
1981 
1982 	if (fsflags & ~F2FS_GETTABLE_FS_FL)
1983 		return -EOPNOTSUPP;
1984 	fsflags &= F2FS_SETTABLE_FS_FL;
1985 
1986 	iflags = f2fs_fsflags_to_iflags(fsflags);
1987 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1988 		return -EOPNOTSUPP;
1989 
1990 	ret = mnt_want_write_file(filp);
1991 	if (ret)
1992 		return ret;
1993 
1994 	inode_lock(inode);
1995 
1996 	old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1997 	ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
1998 	if (ret)
1999 		goto out;
2000 
2001 	ret = f2fs_setflags_common(inode, iflags,
2002 			f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2003 out:
2004 	inode_unlock(inode);
2005 	mnt_drop_write_file(filp);
2006 	return ret;
2007 }
2008 
f2fs_ioc_getversion(struct file * filp,unsigned long arg)2009 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2010 {
2011 	struct inode *inode = file_inode(filp);
2012 
2013 	return put_user(inode->i_generation, (int __user *)arg);
2014 }
2015 
f2fs_ioc_start_atomic_write(struct file * filp)2016 static int f2fs_ioc_start_atomic_write(struct file *filp)
2017 {
2018 	struct inode *inode = file_inode(filp);
2019 	struct f2fs_inode_info *fi = F2FS_I(inode);
2020 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2021 	int ret;
2022 
2023 	if (!inode_owner_or_capable(inode))
2024 		return -EACCES;
2025 
2026 	if (!S_ISREG(inode->i_mode))
2027 		return -EINVAL;
2028 
2029 	if (filp->f_flags & O_DIRECT)
2030 		return -EINVAL;
2031 
2032 	ret = mnt_want_write_file(filp);
2033 	if (ret)
2034 		return ret;
2035 
2036 	inode_lock(inode);
2037 
2038 	f2fs_disable_compressed_file(inode);
2039 
2040 	if (f2fs_is_atomic_file(inode)) {
2041 		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2042 			ret = -EINVAL;
2043 		goto out;
2044 	}
2045 
2046 	ret = f2fs_convert_inline_inode(inode);
2047 	if (ret)
2048 		goto out;
2049 
2050 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2051 
2052 	/*
2053 	 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2054 	 * f2fs_is_atomic_file.
2055 	 */
2056 	if (get_dirty_pages(inode))
2057 		f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2058 			  inode->i_ino, get_dirty_pages(inode));
2059 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2060 	if (ret) {
2061 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2062 		goto out;
2063 	}
2064 
2065 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2066 	if (list_empty(&fi->inmem_ilist))
2067 		list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2068 	sbi->atomic_files++;
2069 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2070 
2071 	/* add inode in inmem_list first and set atomic_file */
2072 	set_inode_flag(inode, FI_ATOMIC_FILE);
2073 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2074 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2075 
2076 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2077 	F2FS_I(inode)->inmem_task = current;
2078 	stat_update_max_atomic_write(inode);
2079 out:
2080 	inode_unlock(inode);
2081 	mnt_drop_write_file(filp);
2082 	return ret;
2083 }
2084 
f2fs_ioc_commit_atomic_write(struct file * filp)2085 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2086 {
2087 	struct inode *inode = file_inode(filp);
2088 	int ret;
2089 
2090 	if (!inode_owner_or_capable(inode))
2091 		return -EACCES;
2092 
2093 	ret = mnt_want_write_file(filp);
2094 	if (ret)
2095 		return ret;
2096 
2097 	f2fs_balance_fs(F2FS_I_SB(inode), true);
2098 
2099 	inode_lock(inode);
2100 
2101 	if (f2fs_is_volatile_file(inode)) {
2102 		ret = -EINVAL;
2103 		goto err_out;
2104 	}
2105 
2106 	if (f2fs_is_atomic_file(inode)) {
2107 		ret = f2fs_commit_inmem_pages(inode);
2108 		if (ret)
2109 			goto err_out;
2110 
2111 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2112 		if (!ret)
2113 			f2fs_drop_inmem_pages(inode);
2114 	} else {
2115 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2116 	}
2117 err_out:
2118 	if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2119 		clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2120 		ret = -EINVAL;
2121 	}
2122 	inode_unlock(inode);
2123 	mnt_drop_write_file(filp);
2124 	return ret;
2125 }
2126 
f2fs_ioc_start_volatile_write(struct file * filp)2127 static int f2fs_ioc_start_volatile_write(struct file *filp)
2128 {
2129 	struct inode *inode = file_inode(filp);
2130 	int ret;
2131 
2132 	if (!inode_owner_or_capable(inode))
2133 		return -EACCES;
2134 
2135 	if (!S_ISREG(inode->i_mode))
2136 		return -EINVAL;
2137 
2138 	ret = mnt_want_write_file(filp);
2139 	if (ret)
2140 		return ret;
2141 
2142 	inode_lock(inode);
2143 
2144 	if (f2fs_is_volatile_file(inode))
2145 		goto out;
2146 
2147 	ret = f2fs_convert_inline_inode(inode);
2148 	if (ret)
2149 		goto out;
2150 
2151 	stat_inc_volatile_write(inode);
2152 	stat_update_max_volatile_write(inode);
2153 
2154 	set_inode_flag(inode, FI_VOLATILE_FILE);
2155 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2156 out:
2157 	inode_unlock(inode);
2158 	mnt_drop_write_file(filp);
2159 	return ret;
2160 }
2161 
f2fs_ioc_release_volatile_write(struct file * filp)2162 static int f2fs_ioc_release_volatile_write(struct file *filp)
2163 {
2164 	struct inode *inode = file_inode(filp);
2165 	int ret;
2166 
2167 	if (!inode_owner_or_capable(inode))
2168 		return -EACCES;
2169 
2170 	ret = mnt_want_write_file(filp);
2171 	if (ret)
2172 		return ret;
2173 
2174 	inode_lock(inode);
2175 
2176 	if (!f2fs_is_volatile_file(inode))
2177 		goto out;
2178 
2179 	if (!f2fs_is_first_block_written(inode)) {
2180 		ret = truncate_partial_data_page(inode, 0, true);
2181 		goto out;
2182 	}
2183 
2184 	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2185 out:
2186 	inode_unlock(inode);
2187 	mnt_drop_write_file(filp);
2188 	return ret;
2189 }
2190 
f2fs_ioc_abort_volatile_write(struct file * filp)2191 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2192 {
2193 	struct inode *inode = file_inode(filp);
2194 	int ret;
2195 
2196 	if (!inode_owner_or_capable(inode))
2197 		return -EACCES;
2198 
2199 	ret = mnt_want_write_file(filp);
2200 	if (ret)
2201 		return ret;
2202 
2203 	inode_lock(inode);
2204 
2205 	if (f2fs_is_atomic_file(inode))
2206 		f2fs_drop_inmem_pages(inode);
2207 	if (f2fs_is_volatile_file(inode)) {
2208 		clear_inode_flag(inode, FI_VOLATILE_FILE);
2209 		stat_dec_volatile_write(inode);
2210 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2211 	}
2212 
2213 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2214 
2215 	inode_unlock(inode);
2216 
2217 	mnt_drop_write_file(filp);
2218 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2219 	return ret;
2220 }
2221 
f2fs_ioc_shutdown(struct file * filp,unsigned long arg)2222 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2223 {
2224 	struct inode *inode = file_inode(filp);
2225 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2226 	struct super_block *sb = sbi->sb;
2227 	__u32 in;
2228 	int ret = 0;
2229 
2230 	if (!capable(CAP_SYS_ADMIN))
2231 		return -EPERM;
2232 
2233 	if (get_user(in, (__u32 __user *)arg))
2234 		return -EFAULT;
2235 
2236 	if (in != F2FS_GOING_DOWN_FULLSYNC) {
2237 		ret = mnt_want_write_file(filp);
2238 		if (ret) {
2239 			if (ret == -EROFS) {
2240 				ret = 0;
2241 				f2fs_stop_checkpoint(sbi, false);
2242 				set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2243 				trace_f2fs_shutdown(sbi, in, ret);
2244 			}
2245 			return ret;
2246 		}
2247 	}
2248 
2249 	switch (in) {
2250 	case F2FS_GOING_DOWN_FULLSYNC:
2251 		sb = freeze_bdev(sb->s_bdev);
2252 		if (IS_ERR(sb)) {
2253 			ret = PTR_ERR(sb);
2254 			goto out;
2255 		}
2256 		if (sb) {
2257 			f2fs_stop_checkpoint(sbi, false);
2258 			set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2259 			thaw_bdev(sb->s_bdev, sb);
2260 		}
2261 		break;
2262 	case F2FS_GOING_DOWN_METASYNC:
2263 		/* do checkpoint only */
2264 		ret = f2fs_sync_fs(sb, 1);
2265 		if (ret)
2266 			goto out;
2267 		f2fs_stop_checkpoint(sbi, false);
2268 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2269 		break;
2270 	case F2FS_GOING_DOWN_NOSYNC:
2271 		f2fs_stop_checkpoint(sbi, false);
2272 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2273 		break;
2274 	case F2FS_GOING_DOWN_METAFLUSH:
2275 		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2276 		f2fs_stop_checkpoint(sbi, false);
2277 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2278 		break;
2279 	case F2FS_GOING_DOWN_NEED_FSCK:
2280 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2281 		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2282 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2283 		/* do checkpoint only */
2284 		ret = f2fs_sync_fs(sb, 1);
2285 		goto out;
2286 	default:
2287 		ret = -EINVAL;
2288 		goto out;
2289 	}
2290 
2291 	f2fs_stop_gc_thread(sbi);
2292 	f2fs_stop_discard_thread(sbi);
2293 
2294 	f2fs_drop_discard_cmd(sbi);
2295 	clear_opt(sbi, DISCARD);
2296 
2297 	f2fs_update_time(sbi, REQ_TIME);
2298 out:
2299 	if (in != F2FS_GOING_DOWN_FULLSYNC)
2300 		mnt_drop_write_file(filp);
2301 
2302 	trace_f2fs_shutdown(sbi, in, ret);
2303 
2304 	return ret;
2305 }
2306 
f2fs_ioc_fitrim(struct file * filp,unsigned long arg)2307 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2308 {
2309 	struct inode *inode = file_inode(filp);
2310 	struct super_block *sb = inode->i_sb;
2311 	struct request_queue *q = bdev_get_queue(sb->s_bdev);
2312 	struct fstrim_range range;
2313 	int ret;
2314 
2315 	if (!capable(CAP_SYS_ADMIN))
2316 		return -EPERM;
2317 
2318 	if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2319 		return -EOPNOTSUPP;
2320 
2321 	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2322 				sizeof(range)))
2323 		return -EFAULT;
2324 
2325 	ret = mnt_want_write_file(filp);
2326 	if (ret)
2327 		return ret;
2328 
2329 	range.minlen = max((unsigned int)range.minlen,
2330 				q->limits.discard_granularity);
2331 	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2332 	mnt_drop_write_file(filp);
2333 	if (ret < 0)
2334 		return ret;
2335 
2336 	if (copy_to_user((struct fstrim_range __user *)arg, &range,
2337 				sizeof(range)))
2338 		return -EFAULT;
2339 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2340 	return 0;
2341 }
2342 
uuid_is_nonzero(__u8 u[16])2343 static bool uuid_is_nonzero(__u8 u[16])
2344 {
2345 	int i;
2346 
2347 	for (i = 0; i < 16; i++)
2348 		if (u[i])
2349 			return true;
2350 	return false;
2351 }
2352 
f2fs_ioc_set_encryption_policy(struct file * filp,unsigned long arg)2353 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2354 {
2355 	struct inode *inode = file_inode(filp);
2356 
2357 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2358 		return -EOPNOTSUPP;
2359 
2360 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2361 
2362 	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2363 }
2364 
f2fs_ioc_get_encryption_policy(struct file * filp,unsigned long arg)2365 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2366 {
2367 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2368 		return -EOPNOTSUPP;
2369 	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2370 }
2371 
f2fs_ioc_get_encryption_pwsalt(struct file * filp,unsigned long arg)2372 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2373 {
2374 	struct inode *inode = file_inode(filp);
2375 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2376 	int err;
2377 
2378 	if (!f2fs_sb_has_encrypt(sbi))
2379 		return -EOPNOTSUPP;
2380 
2381 	err = mnt_want_write_file(filp);
2382 	if (err)
2383 		return err;
2384 
2385 	down_write(&sbi->sb_lock);
2386 
2387 	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2388 		goto got_it;
2389 
2390 	/* update superblock with uuid */
2391 	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2392 
2393 	err = f2fs_commit_super(sbi, false);
2394 	if (err) {
2395 		/* undo new data */
2396 		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2397 		goto out_err;
2398 	}
2399 got_it:
2400 	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2401 									16))
2402 		err = -EFAULT;
2403 out_err:
2404 	up_write(&sbi->sb_lock);
2405 	mnt_drop_write_file(filp);
2406 	return err;
2407 }
2408 
f2fs_ioc_get_encryption_policy_ex(struct file * filp,unsigned long arg)2409 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2410 					     unsigned long arg)
2411 {
2412 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2413 		return -EOPNOTSUPP;
2414 
2415 	return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2416 }
2417 
f2fs_ioc_add_encryption_key(struct file * filp,unsigned long arg)2418 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2419 {
2420 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2421 		return -EOPNOTSUPP;
2422 
2423 	return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2424 }
2425 
f2fs_ioc_remove_encryption_key(struct file * filp,unsigned long arg)2426 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2427 {
2428 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2429 		return -EOPNOTSUPP;
2430 
2431 	return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2432 }
2433 
f2fs_ioc_remove_encryption_key_all_users(struct file * filp,unsigned long arg)2434 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2435 						    unsigned long arg)
2436 {
2437 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2438 		return -EOPNOTSUPP;
2439 
2440 	return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2441 }
2442 
f2fs_ioc_get_encryption_key_status(struct file * filp,unsigned long arg)2443 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2444 					      unsigned long arg)
2445 {
2446 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2447 		return -EOPNOTSUPP;
2448 
2449 	return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2450 }
2451 
f2fs_ioc_get_encryption_nonce(struct file * filp,unsigned long arg)2452 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2453 {
2454 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2455 		return -EOPNOTSUPP;
2456 
2457 	return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2458 }
2459 
f2fs_ioc_gc(struct file * filp,unsigned long arg)2460 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2461 {
2462 	struct inode *inode = file_inode(filp);
2463 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2464 	__u32 sync;
2465 	int ret;
2466 
2467 	if (!capable(CAP_SYS_ADMIN))
2468 		return -EPERM;
2469 
2470 	if (get_user(sync, (__u32 __user *)arg))
2471 		return -EFAULT;
2472 
2473 	if (f2fs_readonly(sbi->sb))
2474 		return -EROFS;
2475 
2476 	ret = mnt_want_write_file(filp);
2477 	if (ret)
2478 		return ret;
2479 
2480 	if (!sync) {
2481 		if (!down_write_trylock(&sbi->gc_lock)) {
2482 			ret = -EBUSY;
2483 			goto out;
2484 		}
2485 	} else {
2486 		down_write(&sbi->gc_lock);
2487 	}
2488 
2489 	ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2490 out:
2491 	mnt_drop_write_file(filp);
2492 	return ret;
2493 }
2494 
__f2fs_ioc_gc_range(struct file * filp,struct f2fs_gc_range * range)2495 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2496 {
2497 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2498 	u64 end;
2499 	int ret;
2500 
2501 	if (!capable(CAP_SYS_ADMIN))
2502 		return -EPERM;
2503 	if (f2fs_readonly(sbi->sb))
2504 		return -EROFS;
2505 
2506 	end = range->start + range->len;
2507 	if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2508 					end >= MAX_BLKADDR(sbi))
2509 		return -EINVAL;
2510 
2511 	ret = mnt_want_write_file(filp);
2512 	if (ret)
2513 		return ret;
2514 
2515 do_more:
2516 	if (!range->sync) {
2517 		if (!down_write_trylock(&sbi->gc_lock)) {
2518 			ret = -EBUSY;
2519 			goto out;
2520 		}
2521 	} else {
2522 		down_write(&sbi->gc_lock);
2523 	}
2524 
2525 	ret = f2fs_gc(sbi, range->sync, true, false,
2526 				GET_SEGNO(sbi, range->start));
2527 	if (ret) {
2528 		if (ret == -EBUSY)
2529 			ret = -EAGAIN;
2530 		goto out;
2531 	}
2532 	range->start += BLKS_PER_SEC(sbi);
2533 	if (range->start <= end)
2534 		goto do_more;
2535 out:
2536 	mnt_drop_write_file(filp);
2537 	return ret;
2538 }
2539 
f2fs_ioc_gc_range(struct file * filp,unsigned long arg)2540 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2541 {
2542 	struct f2fs_gc_range range;
2543 
2544 	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2545 							sizeof(range)))
2546 		return -EFAULT;
2547 	return __f2fs_ioc_gc_range(filp, &range);
2548 }
2549 
f2fs_ioc_write_checkpoint(struct file * filp,unsigned long arg)2550 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2551 {
2552 	struct inode *inode = file_inode(filp);
2553 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2554 	int ret;
2555 
2556 	if (!capable(CAP_SYS_ADMIN))
2557 		return -EPERM;
2558 
2559 	if (f2fs_readonly(sbi->sb))
2560 		return -EROFS;
2561 
2562 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2563 		f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2564 		return -EINVAL;
2565 	}
2566 
2567 	ret = mnt_want_write_file(filp);
2568 	if (ret)
2569 		return ret;
2570 
2571 	ret = f2fs_sync_fs(sbi->sb, 1);
2572 
2573 	mnt_drop_write_file(filp);
2574 	return ret;
2575 }
2576 
f2fs_defragment_range(struct f2fs_sb_info * sbi,struct file * filp,struct f2fs_defragment * range)2577 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2578 					struct file *filp,
2579 					struct f2fs_defragment *range)
2580 {
2581 	struct inode *inode = file_inode(filp);
2582 	struct f2fs_map_blocks map = { .m_next_extent = NULL,
2583 					.m_seg_type = NO_CHECK_TYPE ,
2584 					.m_may_create = false };
2585 	struct extent_info ei = {0, 0, 0};
2586 	pgoff_t pg_start, pg_end, next_pgofs;
2587 	unsigned int blk_per_seg = sbi->blocks_per_seg;
2588 	unsigned int total = 0, sec_num;
2589 	block_t blk_end = 0;
2590 	bool fragmented = false;
2591 	int err;
2592 
2593 	/* if in-place-update policy is enabled, don't waste time here */
2594 	if (f2fs_should_update_inplace(inode, NULL))
2595 		return -EINVAL;
2596 
2597 	pg_start = range->start >> PAGE_SHIFT;
2598 	pg_end = (range->start + range->len) >> PAGE_SHIFT;
2599 
2600 	f2fs_balance_fs(sbi, true);
2601 
2602 	inode_lock(inode);
2603 
2604 	/* writeback all dirty pages in the range */
2605 	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2606 						range->start + range->len - 1);
2607 	if (err)
2608 		goto out;
2609 
2610 	/*
2611 	 * lookup mapping info in extent cache, skip defragmenting if physical
2612 	 * block addresses are continuous.
2613 	 */
2614 	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2615 		if (ei.fofs + ei.len >= pg_end)
2616 			goto out;
2617 	}
2618 
2619 	map.m_lblk = pg_start;
2620 	map.m_next_pgofs = &next_pgofs;
2621 
2622 	/*
2623 	 * lookup mapping info in dnode page cache, skip defragmenting if all
2624 	 * physical block addresses are continuous even if there are hole(s)
2625 	 * in logical blocks.
2626 	 */
2627 	while (map.m_lblk < pg_end) {
2628 		map.m_len = pg_end - map.m_lblk;
2629 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2630 		if (err)
2631 			goto out;
2632 
2633 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2634 			map.m_lblk = next_pgofs;
2635 			continue;
2636 		}
2637 
2638 		if (blk_end && blk_end != map.m_pblk)
2639 			fragmented = true;
2640 
2641 		/* record total count of block that we're going to move */
2642 		total += map.m_len;
2643 
2644 		blk_end = map.m_pblk + map.m_len;
2645 
2646 		map.m_lblk += map.m_len;
2647 	}
2648 
2649 	if (!fragmented) {
2650 		total = 0;
2651 		goto out;
2652 	}
2653 
2654 	sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2655 
2656 	/*
2657 	 * make sure there are enough free section for LFS allocation, this can
2658 	 * avoid defragment running in SSR mode when free section are allocated
2659 	 * intensively
2660 	 */
2661 	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2662 		err = -EAGAIN;
2663 		goto out;
2664 	}
2665 
2666 	map.m_lblk = pg_start;
2667 	map.m_len = pg_end - pg_start;
2668 	total = 0;
2669 
2670 	while (map.m_lblk < pg_end) {
2671 		pgoff_t idx;
2672 		int cnt = 0;
2673 
2674 do_map:
2675 		map.m_len = pg_end - map.m_lblk;
2676 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2677 		if (err)
2678 			goto clear_out;
2679 
2680 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2681 			map.m_lblk = next_pgofs;
2682 			goto check;
2683 		}
2684 
2685 		set_inode_flag(inode, FI_DO_DEFRAG);
2686 
2687 		idx = map.m_lblk;
2688 		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2689 			struct page *page;
2690 
2691 			page = f2fs_get_lock_data_page(inode, idx, true);
2692 			if (IS_ERR(page)) {
2693 				err = PTR_ERR(page);
2694 				goto clear_out;
2695 			}
2696 
2697 			set_page_dirty(page);
2698 			f2fs_put_page(page, 1);
2699 
2700 			idx++;
2701 			cnt++;
2702 			total++;
2703 		}
2704 
2705 		map.m_lblk = idx;
2706 check:
2707 		if (map.m_lblk < pg_end && cnt < blk_per_seg)
2708 			goto do_map;
2709 
2710 		clear_inode_flag(inode, FI_DO_DEFRAG);
2711 
2712 		err = filemap_fdatawrite(inode->i_mapping);
2713 		if (err)
2714 			goto out;
2715 	}
2716 clear_out:
2717 	clear_inode_flag(inode, FI_DO_DEFRAG);
2718 out:
2719 	inode_unlock(inode);
2720 	if (!err)
2721 		range->len = (u64)total << PAGE_SHIFT;
2722 	return err;
2723 }
2724 
f2fs_ioc_defragment(struct file * filp,unsigned long arg)2725 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2726 {
2727 	struct inode *inode = file_inode(filp);
2728 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2729 	struct f2fs_defragment range;
2730 	int err;
2731 
2732 	if (!capable(CAP_SYS_ADMIN))
2733 		return -EPERM;
2734 
2735 	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2736 		return -EINVAL;
2737 
2738 	if (f2fs_readonly(sbi->sb))
2739 		return -EROFS;
2740 
2741 	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2742 							sizeof(range)))
2743 		return -EFAULT;
2744 
2745 	/* verify alignment of offset & size */
2746 	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2747 		return -EINVAL;
2748 
2749 	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2750 					sbi->max_file_blocks))
2751 		return -EINVAL;
2752 
2753 	err = mnt_want_write_file(filp);
2754 	if (err)
2755 		return err;
2756 
2757 	err = f2fs_defragment_range(sbi, filp, &range);
2758 	mnt_drop_write_file(filp);
2759 
2760 	f2fs_update_time(sbi, REQ_TIME);
2761 	if (err < 0)
2762 		return err;
2763 
2764 	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2765 							sizeof(range)))
2766 		return -EFAULT;
2767 
2768 	return 0;
2769 }
2770 
f2fs_move_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t len)2771 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2772 			struct file *file_out, loff_t pos_out, size_t len)
2773 {
2774 	struct inode *src = file_inode(file_in);
2775 	struct inode *dst = file_inode(file_out);
2776 	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2777 	size_t olen = len, dst_max_i_size = 0;
2778 	size_t dst_osize;
2779 	int ret;
2780 
2781 	if (file_in->f_path.mnt != file_out->f_path.mnt ||
2782 				src->i_sb != dst->i_sb)
2783 		return -EXDEV;
2784 
2785 	if (unlikely(f2fs_readonly(src->i_sb)))
2786 		return -EROFS;
2787 
2788 	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2789 		return -EINVAL;
2790 
2791 	if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2792 		return -EOPNOTSUPP;
2793 
2794 	if (pos_out < 0 || pos_in < 0)
2795 		return -EINVAL;
2796 
2797 	if (src == dst) {
2798 		if (pos_in == pos_out)
2799 			return 0;
2800 		if (pos_out > pos_in && pos_out < pos_in + len)
2801 			return -EINVAL;
2802 	}
2803 
2804 	inode_lock(src);
2805 	if (src != dst) {
2806 		ret = -EBUSY;
2807 		if (!inode_trylock(dst))
2808 			goto out;
2809 	}
2810 
2811 	ret = -EINVAL;
2812 	if (pos_in + len > src->i_size || pos_in + len < pos_in)
2813 		goto out_unlock;
2814 	if (len == 0)
2815 		olen = len = src->i_size - pos_in;
2816 	if (pos_in + len == src->i_size)
2817 		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2818 	if (len == 0) {
2819 		ret = 0;
2820 		goto out_unlock;
2821 	}
2822 
2823 	dst_osize = dst->i_size;
2824 	if (pos_out + olen > dst->i_size)
2825 		dst_max_i_size = pos_out + olen;
2826 
2827 	/* verify the end result is block aligned */
2828 	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2829 			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2830 			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2831 		goto out_unlock;
2832 
2833 	ret = f2fs_convert_inline_inode(src);
2834 	if (ret)
2835 		goto out_unlock;
2836 
2837 	ret = f2fs_convert_inline_inode(dst);
2838 	if (ret)
2839 		goto out_unlock;
2840 
2841 	/* write out all dirty pages from offset */
2842 	ret = filemap_write_and_wait_range(src->i_mapping,
2843 					pos_in, pos_in + len);
2844 	if (ret)
2845 		goto out_unlock;
2846 
2847 	ret = filemap_write_and_wait_range(dst->i_mapping,
2848 					pos_out, pos_out + len);
2849 	if (ret)
2850 		goto out_unlock;
2851 
2852 	f2fs_balance_fs(sbi, true);
2853 
2854 	down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2855 	if (src != dst) {
2856 		ret = -EBUSY;
2857 		if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2858 			goto out_src;
2859 	}
2860 
2861 	f2fs_lock_op(sbi);
2862 	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2863 				pos_out >> F2FS_BLKSIZE_BITS,
2864 				len >> F2FS_BLKSIZE_BITS, false);
2865 
2866 	if (!ret) {
2867 		if (dst_max_i_size)
2868 			f2fs_i_size_write(dst, dst_max_i_size);
2869 		else if (dst_osize != dst->i_size)
2870 			f2fs_i_size_write(dst, dst_osize);
2871 	}
2872 	f2fs_unlock_op(sbi);
2873 
2874 	if (src != dst)
2875 		up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2876 out_src:
2877 	up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2878 out_unlock:
2879 	if (src != dst)
2880 		inode_unlock(dst);
2881 out:
2882 	inode_unlock(src);
2883 	return ret;
2884 }
2885 
__f2fs_ioc_move_range(struct file * filp,struct f2fs_move_range * range)2886 static int __f2fs_ioc_move_range(struct file *filp,
2887 				struct f2fs_move_range *range)
2888 {
2889 	struct fd dst;
2890 	int err;
2891 
2892 	if (!(filp->f_mode & FMODE_READ) ||
2893 			!(filp->f_mode & FMODE_WRITE))
2894 		return -EBADF;
2895 
2896 	dst = fdget(range->dst_fd);
2897 	if (!dst.file)
2898 		return -EBADF;
2899 
2900 	if (!(dst.file->f_mode & FMODE_WRITE)) {
2901 		err = -EBADF;
2902 		goto err_out;
2903 	}
2904 
2905 	err = mnt_want_write_file(filp);
2906 	if (err)
2907 		goto err_out;
2908 
2909 	err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2910 					range->pos_out, range->len);
2911 
2912 	mnt_drop_write_file(filp);
2913 err_out:
2914 	fdput(dst);
2915 	return err;
2916 }
2917 
f2fs_ioc_move_range(struct file * filp,unsigned long arg)2918 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2919 {
2920 	struct f2fs_move_range range;
2921 
2922 	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2923 							sizeof(range)))
2924 		return -EFAULT;
2925 	return __f2fs_ioc_move_range(filp, &range);
2926 }
2927 
f2fs_ioc_flush_device(struct file * filp,unsigned long arg)2928 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2929 {
2930 	struct inode *inode = file_inode(filp);
2931 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2932 	struct sit_info *sm = SIT_I(sbi);
2933 	unsigned int start_segno = 0, end_segno = 0;
2934 	unsigned int dev_start_segno = 0, dev_end_segno = 0;
2935 	struct f2fs_flush_device range;
2936 	int ret;
2937 
2938 	if (!capable(CAP_SYS_ADMIN))
2939 		return -EPERM;
2940 
2941 	if (f2fs_readonly(sbi->sb))
2942 		return -EROFS;
2943 
2944 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2945 		return -EINVAL;
2946 
2947 	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2948 							sizeof(range)))
2949 		return -EFAULT;
2950 
2951 	if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2952 			__is_large_section(sbi)) {
2953 		f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2954 			  range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2955 		return -EINVAL;
2956 	}
2957 
2958 	ret = mnt_want_write_file(filp);
2959 	if (ret)
2960 		return ret;
2961 
2962 	if (range.dev_num != 0)
2963 		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2964 	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2965 
2966 	start_segno = sm->last_victim[FLUSH_DEVICE];
2967 	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2968 		start_segno = dev_start_segno;
2969 	end_segno = min(start_segno + range.segments, dev_end_segno);
2970 
2971 	while (start_segno < end_segno) {
2972 		if (!down_write_trylock(&sbi->gc_lock)) {
2973 			ret = -EBUSY;
2974 			goto out;
2975 		}
2976 		sm->last_victim[GC_CB] = end_segno + 1;
2977 		sm->last_victim[GC_GREEDY] = end_segno + 1;
2978 		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2979 		ret = f2fs_gc(sbi, true, true, true, start_segno);
2980 		if (ret == -EAGAIN)
2981 			ret = 0;
2982 		else if (ret < 0)
2983 			break;
2984 		start_segno++;
2985 	}
2986 out:
2987 	mnt_drop_write_file(filp);
2988 	return ret;
2989 }
2990 
f2fs_ioc_get_features(struct file * filp,unsigned long arg)2991 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2992 {
2993 	struct inode *inode = file_inode(filp);
2994 	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2995 
2996 	/* Must validate to set it with SQLite behavior in Android. */
2997 	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2998 
2999 	return put_user(sb_feature, (u32 __user *)arg);
3000 }
3001 
3002 #ifdef CONFIG_QUOTA
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3003 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3004 {
3005 	struct dquot *transfer_to[MAXQUOTAS] = {};
3006 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3007 	struct super_block *sb = sbi->sb;
3008 	int err = 0;
3009 
3010 	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3011 	if (!IS_ERR(transfer_to[PRJQUOTA])) {
3012 		err = __dquot_transfer(inode, transfer_to);
3013 		if (err)
3014 			set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3015 		dqput(transfer_to[PRJQUOTA]);
3016 	}
3017 	return err;
3018 }
3019 
f2fs_ioc_setproject(struct file * filp,__u32 projid)3020 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3021 {
3022 	struct inode *inode = file_inode(filp);
3023 	struct f2fs_inode_info *fi = F2FS_I(inode);
3024 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3025 	struct page *ipage;
3026 	kprojid_t kprojid;
3027 	int err;
3028 
3029 	if (!f2fs_sb_has_project_quota(sbi)) {
3030 		if (projid != F2FS_DEF_PROJID)
3031 			return -EOPNOTSUPP;
3032 		else
3033 			return 0;
3034 	}
3035 
3036 	if (!f2fs_has_extra_attr(inode))
3037 		return -EOPNOTSUPP;
3038 
3039 	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3040 
3041 	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3042 		return 0;
3043 
3044 	err = -EPERM;
3045 	/* Is it quota file? Do not allow user to mess with it */
3046 	if (IS_NOQUOTA(inode))
3047 		return err;
3048 
3049 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
3050 	if (IS_ERR(ipage))
3051 		return PTR_ERR(ipage);
3052 
3053 	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3054 								i_projid)) {
3055 		err = -EOVERFLOW;
3056 		f2fs_put_page(ipage, 1);
3057 		return err;
3058 	}
3059 	f2fs_put_page(ipage, 1);
3060 
3061 	err = dquot_initialize(inode);
3062 	if (err)
3063 		return err;
3064 
3065 	f2fs_lock_op(sbi);
3066 	err = f2fs_transfer_project_quota(inode, kprojid);
3067 	if (err)
3068 		goto out_unlock;
3069 
3070 	F2FS_I(inode)->i_projid = kprojid;
3071 	inode->i_ctime = current_time(inode);
3072 	f2fs_mark_inode_dirty_sync(inode, true);
3073 out_unlock:
3074 	f2fs_unlock_op(sbi);
3075 	return err;
3076 }
3077 #else
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3078 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3079 {
3080 	return 0;
3081 }
3082 
f2fs_ioc_setproject(struct file * filp,__u32 projid)3083 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3084 {
3085 	if (projid != F2FS_DEF_PROJID)
3086 		return -EOPNOTSUPP;
3087 	return 0;
3088 }
3089 #endif
3090 
3091 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3092 
3093 /*
3094  * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3095  * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3096  * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3097  */
3098 
3099 static const struct {
3100 	u32 iflag;
3101 	u32 xflag;
3102 } f2fs_xflags_map[] = {
3103 	{ F2FS_SYNC_FL,		FS_XFLAG_SYNC },
3104 	{ F2FS_IMMUTABLE_FL,	FS_XFLAG_IMMUTABLE },
3105 	{ F2FS_APPEND_FL,	FS_XFLAG_APPEND },
3106 	{ F2FS_NODUMP_FL,	FS_XFLAG_NODUMP },
3107 	{ F2FS_NOATIME_FL,	FS_XFLAG_NOATIME },
3108 	{ F2FS_PROJINHERIT_FL,	FS_XFLAG_PROJINHERIT },
3109 };
3110 
3111 #define F2FS_SUPPORTED_XFLAGS (		\
3112 		FS_XFLAG_SYNC |		\
3113 		FS_XFLAG_IMMUTABLE |	\
3114 		FS_XFLAG_APPEND |	\
3115 		FS_XFLAG_NODUMP |	\
3116 		FS_XFLAG_NOATIME |	\
3117 		FS_XFLAG_PROJINHERIT)
3118 
3119 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
f2fs_iflags_to_xflags(u32 iflags)3120 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3121 {
3122 	u32 xflags = 0;
3123 	int i;
3124 
3125 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3126 		if (iflags & f2fs_xflags_map[i].iflag)
3127 			xflags |= f2fs_xflags_map[i].xflag;
3128 
3129 	return xflags;
3130 }
3131 
3132 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
f2fs_xflags_to_iflags(u32 xflags)3133 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3134 {
3135 	u32 iflags = 0;
3136 	int i;
3137 
3138 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3139 		if (xflags & f2fs_xflags_map[i].xflag)
3140 			iflags |= f2fs_xflags_map[i].iflag;
3141 
3142 	return iflags;
3143 }
3144 
f2fs_fill_fsxattr(struct inode * inode,struct fsxattr * fa)3145 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3146 {
3147 	struct f2fs_inode_info *fi = F2FS_I(inode);
3148 
3149 	simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3150 
3151 	if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3152 		fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3153 }
3154 
f2fs_ioc_fsgetxattr(struct file * filp,unsigned long arg)3155 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3156 {
3157 	struct inode *inode = file_inode(filp);
3158 	struct fsxattr fa;
3159 
3160 	f2fs_fill_fsxattr(inode, &fa);
3161 
3162 	if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3163 		return -EFAULT;
3164 	return 0;
3165 }
3166 
f2fs_ioc_fssetxattr(struct file * filp,unsigned long arg)3167 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3168 {
3169 	struct inode *inode = file_inode(filp);
3170 	struct fsxattr fa, old_fa;
3171 	u32 iflags;
3172 	int err;
3173 
3174 	if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3175 		return -EFAULT;
3176 
3177 	/* Make sure caller has proper permission */
3178 	if (!inode_owner_or_capable(inode))
3179 		return -EACCES;
3180 
3181 	if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3182 		return -EOPNOTSUPP;
3183 
3184 	iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3185 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3186 		return -EOPNOTSUPP;
3187 
3188 	err = mnt_want_write_file(filp);
3189 	if (err)
3190 		return err;
3191 
3192 	inode_lock(inode);
3193 
3194 	f2fs_fill_fsxattr(inode, &old_fa);
3195 	err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3196 	if (err)
3197 		goto out;
3198 
3199 	err = f2fs_setflags_common(inode, iflags,
3200 			f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3201 	if (err)
3202 		goto out;
3203 
3204 	err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3205 out:
3206 	inode_unlock(inode);
3207 	mnt_drop_write_file(filp);
3208 	return err;
3209 }
3210 
f2fs_pin_file_control(struct inode * inode,bool inc)3211 int f2fs_pin_file_control(struct inode *inode, bool inc)
3212 {
3213 	struct f2fs_inode_info *fi = F2FS_I(inode);
3214 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3215 
3216 	/* Use i_gc_failures for normal file as a risk signal. */
3217 	if (inc)
3218 		f2fs_i_gc_failures_write(inode,
3219 				fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3220 
3221 	if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3222 		f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3223 			  __func__, inode->i_ino,
3224 			  fi->i_gc_failures[GC_FAILURE_PIN]);
3225 		clear_inode_flag(inode, FI_PIN_FILE);
3226 		return -EAGAIN;
3227 	}
3228 	return 0;
3229 }
3230 
f2fs_ioc_set_pin_file(struct file * filp,unsigned long arg)3231 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3232 {
3233 	struct inode *inode = file_inode(filp);
3234 	__u32 pin;
3235 	int ret = 0;
3236 
3237 	if (get_user(pin, (__u32 __user *)arg))
3238 		return -EFAULT;
3239 
3240 	if (!S_ISREG(inode->i_mode))
3241 		return -EINVAL;
3242 
3243 	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3244 		return -EROFS;
3245 
3246 	ret = mnt_want_write_file(filp);
3247 	if (ret)
3248 		return ret;
3249 
3250 	inode_lock(inode);
3251 
3252 	if (f2fs_should_update_outplace(inode, NULL)) {
3253 		ret = -EINVAL;
3254 		goto out;
3255 	}
3256 
3257 	if (!pin) {
3258 		clear_inode_flag(inode, FI_PIN_FILE);
3259 		f2fs_i_gc_failures_write(inode, 0);
3260 		goto done;
3261 	}
3262 
3263 	if (f2fs_pin_file_control(inode, false)) {
3264 		ret = -EAGAIN;
3265 		goto out;
3266 	}
3267 
3268 	ret = f2fs_convert_inline_inode(inode);
3269 	if (ret)
3270 		goto out;
3271 
3272 	if (!f2fs_disable_compressed_file(inode)) {
3273 		ret = -EOPNOTSUPP;
3274 		goto out;
3275 	}
3276 
3277 	set_inode_flag(inode, FI_PIN_FILE);
3278 	ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3279 done:
3280 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3281 out:
3282 	inode_unlock(inode);
3283 	mnt_drop_write_file(filp);
3284 	return ret;
3285 }
3286 
f2fs_ioc_get_pin_file(struct file * filp,unsigned long arg)3287 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3288 {
3289 	struct inode *inode = file_inode(filp);
3290 	__u32 pin = 0;
3291 
3292 	if (is_inode_flag_set(inode, FI_PIN_FILE))
3293 		pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3294 	return put_user(pin, (u32 __user *)arg);
3295 }
3296 
f2fs_precache_extents(struct inode * inode)3297 int f2fs_precache_extents(struct inode *inode)
3298 {
3299 	struct f2fs_inode_info *fi = F2FS_I(inode);
3300 	struct f2fs_map_blocks map;
3301 	pgoff_t m_next_extent;
3302 	loff_t end;
3303 	int err;
3304 
3305 	if (is_inode_flag_set(inode, FI_NO_EXTENT))
3306 		return -EOPNOTSUPP;
3307 
3308 	map.m_lblk = 0;
3309 	map.m_next_pgofs = NULL;
3310 	map.m_next_extent = &m_next_extent;
3311 	map.m_seg_type = NO_CHECK_TYPE;
3312 	map.m_may_create = false;
3313 	end = F2FS_I_SB(inode)->max_file_blocks;
3314 
3315 	while (map.m_lblk < end) {
3316 		map.m_len = end - map.m_lblk;
3317 
3318 		down_write(&fi->i_gc_rwsem[WRITE]);
3319 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3320 		up_write(&fi->i_gc_rwsem[WRITE]);
3321 		if (err)
3322 			return err;
3323 
3324 		map.m_lblk = m_next_extent;
3325 	}
3326 
3327 	return err;
3328 }
3329 
f2fs_ioc_precache_extents(struct file * filp,unsigned long arg)3330 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3331 {
3332 	return f2fs_precache_extents(file_inode(filp));
3333 }
3334 
f2fs_ioc_resize_fs(struct file * filp,unsigned long arg)3335 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3336 {
3337 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3338 	__u64 block_count;
3339 
3340 	if (!capable(CAP_SYS_ADMIN))
3341 		return -EPERM;
3342 
3343 	if (f2fs_readonly(sbi->sb))
3344 		return -EROFS;
3345 
3346 	if (copy_from_user(&block_count, (void __user *)arg,
3347 			   sizeof(block_count)))
3348 		return -EFAULT;
3349 
3350 	return f2fs_resize_fs(sbi, block_count);
3351 }
3352 
f2fs_ioc_enable_verity(struct file * filp,unsigned long arg)3353 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3354 {
3355 	struct inode *inode = file_inode(filp);
3356 
3357 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3358 
3359 	if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3360 		f2fs_warn(F2FS_I_SB(inode),
3361 			  "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3362 			  inode->i_ino);
3363 		return -EOPNOTSUPP;
3364 	}
3365 
3366 	return fsverity_ioctl_enable(filp, (const void __user *)arg);
3367 }
3368 
f2fs_ioc_measure_verity(struct file * filp,unsigned long arg)3369 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3370 {
3371 	if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3372 		return -EOPNOTSUPP;
3373 
3374 	return fsverity_ioctl_measure(filp, (void __user *)arg);
3375 }
3376 
f2fs_ioc_getfslabel(struct file * filp,unsigned long arg)3377 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3378 {
3379 	struct inode *inode = file_inode(filp);
3380 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3381 	char *vbuf;
3382 	int count;
3383 	int err = 0;
3384 
3385 	vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3386 	if (!vbuf)
3387 		return -ENOMEM;
3388 
3389 	down_read(&sbi->sb_lock);
3390 	count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3391 			ARRAY_SIZE(sbi->raw_super->volume_name),
3392 			UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3393 	up_read(&sbi->sb_lock);
3394 
3395 	if (copy_to_user((char __user *)arg, vbuf,
3396 				min(FSLABEL_MAX, count)))
3397 		err = -EFAULT;
3398 
3399 	kfree(vbuf);
3400 	return err;
3401 }
3402 
f2fs_ioc_setfslabel(struct file * filp,unsigned long arg)3403 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3404 {
3405 	struct inode *inode = file_inode(filp);
3406 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3407 	char *vbuf;
3408 	int err = 0;
3409 
3410 	if (!capable(CAP_SYS_ADMIN))
3411 		return -EPERM;
3412 
3413 	vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3414 	if (IS_ERR(vbuf))
3415 		return PTR_ERR(vbuf);
3416 
3417 	err = mnt_want_write_file(filp);
3418 	if (err)
3419 		goto out;
3420 
3421 	down_write(&sbi->sb_lock);
3422 
3423 	memset(sbi->raw_super->volume_name, 0,
3424 			sizeof(sbi->raw_super->volume_name));
3425 	utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3426 			sbi->raw_super->volume_name,
3427 			ARRAY_SIZE(sbi->raw_super->volume_name));
3428 
3429 	err = f2fs_commit_super(sbi, false);
3430 
3431 	up_write(&sbi->sb_lock);
3432 
3433 	mnt_drop_write_file(filp);
3434 out:
3435 	kfree(vbuf);
3436 	return err;
3437 }
3438 
f2fs_get_compress_blocks(struct file * filp,unsigned long arg)3439 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3440 {
3441 	struct inode *inode = file_inode(filp);
3442 	__u64 blocks;
3443 
3444 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3445 		return -EOPNOTSUPP;
3446 
3447 	if (!f2fs_compressed_file(inode))
3448 		return -EINVAL;
3449 
3450 	blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3451 	return put_user(blocks, (u64 __user *)arg);
3452 }
3453 
release_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3454 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3455 {
3456 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3457 	unsigned int released_blocks = 0;
3458 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3459 	block_t blkaddr;
3460 	int i;
3461 
3462 	for (i = 0; i < count; i++) {
3463 		blkaddr = data_blkaddr(dn->inode, dn->node_page,
3464 						dn->ofs_in_node + i);
3465 
3466 		if (!__is_valid_data_blkaddr(blkaddr))
3467 			continue;
3468 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3469 					DATA_GENERIC_ENHANCE)))
3470 			return -EFSCORRUPTED;
3471 	}
3472 
3473 	while (count) {
3474 		int compr_blocks = 0;
3475 
3476 		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3477 			blkaddr = f2fs_data_blkaddr(dn);
3478 
3479 			if (i == 0) {
3480 				if (blkaddr == COMPRESS_ADDR)
3481 					continue;
3482 				dn->ofs_in_node += cluster_size;
3483 				goto next;
3484 			}
3485 
3486 			if (__is_valid_data_blkaddr(blkaddr))
3487 				compr_blocks++;
3488 
3489 			if (blkaddr != NEW_ADDR)
3490 				continue;
3491 
3492 			dn->data_blkaddr = NULL_ADDR;
3493 			f2fs_set_data_blkaddr(dn);
3494 		}
3495 
3496 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3497 		dec_valid_block_count(sbi, dn->inode,
3498 					cluster_size - compr_blocks);
3499 
3500 		released_blocks += cluster_size - compr_blocks;
3501 next:
3502 		count -= cluster_size;
3503 	}
3504 
3505 	return released_blocks;
3506 }
3507 
f2fs_release_compress_blocks(struct file * filp,unsigned long arg)3508 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3509 {
3510 	struct inode *inode = file_inode(filp);
3511 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3512 	pgoff_t page_idx = 0, last_idx;
3513 	unsigned int released_blocks = 0;
3514 	int ret;
3515 	int writecount;
3516 
3517 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3518 		return -EOPNOTSUPP;
3519 
3520 	if (!f2fs_compressed_file(inode))
3521 		return -EINVAL;
3522 
3523 	if (f2fs_readonly(sbi->sb))
3524 		return -EROFS;
3525 
3526 	ret = mnt_want_write_file(filp);
3527 	if (ret)
3528 		return ret;
3529 
3530 	f2fs_balance_fs(F2FS_I_SB(inode), true);
3531 
3532 	inode_lock(inode);
3533 
3534 	writecount = atomic_read(&inode->i_writecount);
3535 	if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3536 			(!(filp->f_mode & FMODE_WRITE) && writecount)) {
3537 		ret = -EBUSY;
3538 		goto out;
3539 	}
3540 
3541 	if (IS_IMMUTABLE(inode)) {
3542 		ret = -EINVAL;
3543 		goto out;
3544 	}
3545 
3546 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3547 	if (ret)
3548 		goto out;
3549 
3550 	F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3551 	f2fs_set_inode_flags(inode);
3552 	inode->i_ctime = current_time(inode);
3553 	f2fs_mark_inode_dirty_sync(inode, true);
3554 
3555 	if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3556 		goto out;
3557 
3558 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3559 	down_write(&F2FS_I(inode)->i_mmap_sem);
3560 
3561 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3562 
3563 	while (page_idx < last_idx) {
3564 		struct dnode_of_data dn;
3565 		pgoff_t end_offset, count;
3566 
3567 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3568 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3569 		if (ret) {
3570 			if (ret == -ENOENT) {
3571 				page_idx = f2fs_get_next_page_offset(&dn,
3572 								page_idx);
3573 				ret = 0;
3574 				continue;
3575 			}
3576 			break;
3577 		}
3578 
3579 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3580 		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3581 		count = round_up(count, F2FS_I(inode)->i_cluster_size);
3582 
3583 		ret = release_compress_blocks(&dn, count);
3584 
3585 		f2fs_put_dnode(&dn);
3586 
3587 		if (ret < 0)
3588 			break;
3589 
3590 		page_idx += count;
3591 		released_blocks += ret;
3592 	}
3593 
3594 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3595 	up_write(&F2FS_I(inode)->i_mmap_sem);
3596 out:
3597 	inode_unlock(inode);
3598 
3599 	mnt_drop_write_file(filp);
3600 
3601 	if (ret >= 0) {
3602 		ret = put_user(released_blocks, (u64 __user *)arg);
3603 	} else if (released_blocks &&
3604 			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3605 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3606 		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3607 			"iblocks=%llu, released=%u, compr_blocks=%u, "
3608 			"run fsck to fix.",
3609 			__func__, inode->i_ino, inode->i_blocks,
3610 			released_blocks,
3611 			atomic_read(&F2FS_I(inode)->i_compr_blocks));
3612 	}
3613 
3614 	return ret;
3615 }
3616 
reserve_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3617 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3618 {
3619 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3620 	unsigned int reserved_blocks = 0;
3621 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3622 	block_t blkaddr;
3623 	int i;
3624 
3625 	for (i = 0; i < count; i++) {
3626 		blkaddr = data_blkaddr(dn->inode, dn->node_page,
3627 						dn->ofs_in_node + i);
3628 
3629 		if (!__is_valid_data_blkaddr(blkaddr))
3630 			continue;
3631 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3632 					DATA_GENERIC_ENHANCE)))
3633 			return -EFSCORRUPTED;
3634 	}
3635 
3636 	while (count) {
3637 		int compr_blocks = 0;
3638 		blkcnt_t reserved;
3639 		int ret;
3640 
3641 		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3642 			blkaddr = f2fs_data_blkaddr(dn);
3643 
3644 			if (i == 0) {
3645 				if (blkaddr == COMPRESS_ADDR)
3646 					continue;
3647 				dn->ofs_in_node += cluster_size;
3648 				goto next;
3649 			}
3650 
3651 			if (__is_valid_data_blkaddr(blkaddr)) {
3652 				compr_blocks++;
3653 				continue;
3654 			}
3655 
3656 			dn->data_blkaddr = NEW_ADDR;
3657 			f2fs_set_data_blkaddr(dn);
3658 		}
3659 
3660 		reserved = cluster_size - compr_blocks;
3661 		ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3662 		if (ret)
3663 			return ret;
3664 
3665 		if (reserved != cluster_size - compr_blocks)
3666 			return -ENOSPC;
3667 
3668 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3669 
3670 		reserved_blocks += reserved;
3671 next:
3672 		count -= cluster_size;
3673 	}
3674 
3675 	return reserved_blocks;
3676 }
3677 
f2fs_reserve_compress_blocks(struct file * filp,unsigned long arg)3678 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3679 {
3680 	struct inode *inode = file_inode(filp);
3681 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3682 	pgoff_t page_idx = 0, last_idx;
3683 	unsigned int reserved_blocks = 0;
3684 	int ret;
3685 
3686 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3687 		return -EOPNOTSUPP;
3688 
3689 	if (!f2fs_compressed_file(inode))
3690 		return -EINVAL;
3691 
3692 	if (f2fs_readonly(sbi->sb))
3693 		return -EROFS;
3694 
3695 	ret = mnt_want_write_file(filp);
3696 	if (ret)
3697 		return ret;
3698 
3699 	if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3700 		goto out;
3701 
3702 	f2fs_balance_fs(F2FS_I_SB(inode), true);
3703 
3704 	inode_lock(inode);
3705 
3706 	if (!IS_IMMUTABLE(inode)) {
3707 		ret = -EINVAL;
3708 		goto unlock_inode;
3709 	}
3710 
3711 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3712 	down_write(&F2FS_I(inode)->i_mmap_sem);
3713 
3714 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3715 
3716 	while (page_idx < last_idx) {
3717 		struct dnode_of_data dn;
3718 		pgoff_t end_offset, count;
3719 
3720 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3721 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3722 		if (ret) {
3723 			if (ret == -ENOENT) {
3724 				page_idx = f2fs_get_next_page_offset(&dn,
3725 								page_idx);
3726 				ret = 0;
3727 				continue;
3728 			}
3729 			break;
3730 		}
3731 
3732 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3733 		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3734 		count = round_up(count, F2FS_I(inode)->i_cluster_size);
3735 
3736 		ret = reserve_compress_blocks(&dn, count);
3737 
3738 		f2fs_put_dnode(&dn);
3739 
3740 		if (ret < 0)
3741 			break;
3742 
3743 		page_idx += count;
3744 		reserved_blocks += ret;
3745 	}
3746 
3747 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3748 	up_write(&F2FS_I(inode)->i_mmap_sem);
3749 
3750 	if (ret >= 0) {
3751 		F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3752 		f2fs_set_inode_flags(inode);
3753 		inode->i_ctime = current_time(inode);
3754 		f2fs_mark_inode_dirty_sync(inode, true);
3755 	}
3756 unlock_inode:
3757 	inode_unlock(inode);
3758 out:
3759 	mnt_drop_write_file(filp);
3760 
3761 	if (ret >= 0) {
3762 		ret = put_user(reserved_blocks, (u64 __user *)arg);
3763 	} else if (reserved_blocks &&
3764 			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3765 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3766 		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3767 			"iblocks=%llu, reserved=%u, compr_blocks=%u, "
3768 			"run fsck to fix.",
3769 			__func__, inode->i_ino, inode->i_blocks,
3770 			reserved_blocks,
3771 			atomic_read(&F2FS_I(inode)->i_compr_blocks));
3772 	}
3773 
3774 	return ret;
3775 }
3776 
f2fs_secure_erase(struct block_device * bdev,struct inode * inode,pgoff_t off,block_t block,block_t len,u32 flags)3777 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3778 		pgoff_t off, block_t block, block_t len, u32 flags)
3779 {
3780 	struct request_queue *q = bdev_get_queue(bdev);
3781 	sector_t sector = SECTOR_FROM_BLOCK(block);
3782 	sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3783 	int ret = 0;
3784 
3785 	if (!q)
3786 		return -ENXIO;
3787 
3788 	if (flags & F2FS_TRIM_FILE_DISCARD)
3789 		ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3790 						blk_queue_secure_erase(q) ?
3791 						BLKDEV_DISCARD_SECURE : 0);
3792 
3793 	if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3794 		if (IS_ENCRYPTED(inode))
3795 			ret = fscrypt_zeroout_range(inode, off, block, len);
3796 		else
3797 			ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3798 					GFP_NOFS, 0);
3799 	}
3800 
3801 	return ret;
3802 }
3803 
f2fs_sec_trim_file(struct file * filp,unsigned long arg)3804 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3805 {
3806 	struct inode *inode = file_inode(filp);
3807 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3808 	struct address_space *mapping = inode->i_mapping;
3809 	struct block_device *prev_bdev = NULL;
3810 	struct f2fs_sectrim_range range;
3811 	pgoff_t index, pg_end, prev_index = 0;
3812 	block_t prev_block = 0, len = 0;
3813 	loff_t end_addr;
3814 	bool to_end = false;
3815 	int ret = 0;
3816 
3817 	if (!(filp->f_mode & FMODE_WRITE))
3818 		return -EBADF;
3819 
3820 	if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3821 				sizeof(range)))
3822 		return -EFAULT;
3823 
3824 	if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3825 			!S_ISREG(inode->i_mode))
3826 		return -EINVAL;
3827 
3828 	if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3829 			!f2fs_hw_support_discard(sbi)) ||
3830 			((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3831 			 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3832 		return -EOPNOTSUPP;
3833 
3834 	file_start_write(filp);
3835 	inode_lock(inode);
3836 
3837 	if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3838 			range.start >= inode->i_size) {
3839 		ret = -EINVAL;
3840 		goto err;
3841 	}
3842 
3843 	if (range.len == 0)
3844 		goto err;
3845 
3846 	if (inode->i_size - range.start > range.len) {
3847 		end_addr = range.start + range.len;
3848 	} else {
3849 		end_addr = range.len == (u64)-1 ?
3850 			sbi->sb->s_maxbytes : inode->i_size;
3851 		to_end = true;
3852 	}
3853 
3854 	if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3855 			(!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3856 		ret = -EINVAL;
3857 		goto err;
3858 	}
3859 
3860 	index = F2FS_BYTES_TO_BLK(range.start);
3861 	pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3862 
3863 	ret = f2fs_convert_inline_inode(inode);
3864 	if (ret)
3865 		goto err;
3866 
3867 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3868 	down_write(&F2FS_I(inode)->i_mmap_sem);
3869 
3870 	ret = filemap_write_and_wait_range(mapping, range.start,
3871 			to_end ? LLONG_MAX : end_addr - 1);
3872 	if (ret)
3873 		goto out;
3874 
3875 	truncate_inode_pages_range(mapping, range.start,
3876 			to_end ? -1 : end_addr - 1);
3877 
3878 	while (index < pg_end) {
3879 		struct dnode_of_data dn;
3880 		pgoff_t end_offset, count;
3881 		int i;
3882 
3883 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3884 		ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3885 		if (ret) {
3886 			if (ret == -ENOENT) {
3887 				index = f2fs_get_next_page_offset(&dn, index);
3888 				continue;
3889 			}
3890 			goto out;
3891 		}
3892 
3893 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3894 		count = min(end_offset - dn.ofs_in_node, pg_end - index);
3895 		for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3896 			struct block_device *cur_bdev;
3897 			block_t blkaddr = f2fs_data_blkaddr(&dn);
3898 
3899 			if (!__is_valid_data_blkaddr(blkaddr))
3900 				continue;
3901 
3902 			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3903 						DATA_GENERIC_ENHANCE)) {
3904 				ret = -EFSCORRUPTED;
3905 				f2fs_put_dnode(&dn);
3906 				goto out;
3907 			}
3908 
3909 			cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3910 			if (f2fs_is_multi_device(sbi)) {
3911 				int di = f2fs_target_device_index(sbi, blkaddr);
3912 
3913 				blkaddr -= FDEV(di).start_blk;
3914 			}
3915 
3916 			if (len) {
3917 				if (prev_bdev == cur_bdev &&
3918 						index == prev_index + len &&
3919 						blkaddr == prev_block + len) {
3920 					len++;
3921 				} else {
3922 					ret = f2fs_secure_erase(prev_bdev,
3923 						inode, prev_index, prev_block,
3924 						len, range.flags);
3925 					if (ret) {
3926 						f2fs_put_dnode(&dn);
3927 						goto out;
3928 					}
3929 
3930 					len = 0;
3931 				}
3932 			}
3933 
3934 			if (!len) {
3935 				prev_bdev = cur_bdev;
3936 				prev_index = index;
3937 				prev_block = blkaddr;
3938 				len = 1;
3939 			}
3940 		}
3941 
3942 		f2fs_put_dnode(&dn);
3943 
3944 		if (fatal_signal_pending(current)) {
3945 			ret = -EINTR;
3946 			goto out;
3947 		}
3948 		cond_resched();
3949 	}
3950 
3951 	if (len)
3952 		ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3953 				prev_block, len, range.flags);
3954 out:
3955 	up_write(&F2FS_I(inode)->i_mmap_sem);
3956 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3957 err:
3958 	inode_unlock(inode);
3959 	file_end_write(filp);
3960 
3961 	return ret;
3962 }
3963 
__f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)3964 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3965 {
3966 	switch (cmd) {
3967 	case FS_IOC_GETFLAGS:
3968 		return f2fs_ioc_getflags(filp, arg);
3969 	case FS_IOC_SETFLAGS:
3970 		return f2fs_ioc_setflags(filp, arg);
3971 	case FS_IOC_GETVERSION:
3972 		return f2fs_ioc_getversion(filp, arg);
3973 	case F2FS_IOC_START_ATOMIC_WRITE:
3974 		return f2fs_ioc_start_atomic_write(filp);
3975 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
3976 		return f2fs_ioc_commit_atomic_write(filp);
3977 	case F2FS_IOC_START_VOLATILE_WRITE:
3978 		return f2fs_ioc_start_volatile_write(filp);
3979 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
3980 		return f2fs_ioc_release_volatile_write(filp);
3981 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
3982 		return f2fs_ioc_abort_volatile_write(filp);
3983 	case F2FS_IOC_SHUTDOWN:
3984 		return f2fs_ioc_shutdown(filp, arg);
3985 	case FITRIM:
3986 		return f2fs_ioc_fitrim(filp, arg);
3987 	case FS_IOC_SET_ENCRYPTION_POLICY:
3988 		return f2fs_ioc_set_encryption_policy(filp, arg);
3989 	case FS_IOC_GET_ENCRYPTION_POLICY:
3990 		return f2fs_ioc_get_encryption_policy(filp, arg);
3991 	case FS_IOC_GET_ENCRYPTION_PWSALT:
3992 		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
3993 	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
3994 		return f2fs_ioc_get_encryption_policy_ex(filp, arg);
3995 	case FS_IOC_ADD_ENCRYPTION_KEY:
3996 		return f2fs_ioc_add_encryption_key(filp, arg);
3997 	case FS_IOC_REMOVE_ENCRYPTION_KEY:
3998 		return f2fs_ioc_remove_encryption_key(filp, arg);
3999 	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4000 		return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4001 	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4002 		return f2fs_ioc_get_encryption_key_status(filp, arg);
4003 	case FS_IOC_GET_ENCRYPTION_NONCE:
4004 		return f2fs_ioc_get_encryption_nonce(filp, arg);
4005 	case F2FS_IOC_GARBAGE_COLLECT:
4006 		return f2fs_ioc_gc(filp, arg);
4007 	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4008 		return f2fs_ioc_gc_range(filp, arg);
4009 	case F2FS_IOC_WRITE_CHECKPOINT:
4010 		return f2fs_ioc_write_checkpoint(filp, arg);
4011 	case F2FS_IOC_DEFRAGMENT:
4012 		return f2fs_ioc_defragment(filp, arg);
4013 	case F2FS_IOC_MOVE_RANGE:
4014 		return f2fs_ioc_move_range(filp, arg);
4015 	case F2FS_IOC_FLUSH_DEVICE:
4016 		return f2fs_ioc_flush_device(filp, arg);
4017 	case F2FS_IOC_GET_FEATURES:
4018 		return f2fs_ioc_get_features(filp, arg);
4019 	case FS_IOC_FSGETXATTR:
4020 		return f2fs_ioc_fsgetxattr(filp, arg);
4021 	case FS_IOC_FSSETXATTR:
4022 		return f2fs_ioc_fssetxattr(filp, arg);
4023 	case F2FS_IOC_GET_PIN_FILE:
4024 		return f2fs_ioc_get_pin_file(filp, arg);
4025 	case F2FS_IOC_SET_PIN_FILE:
4026 		return f2fs_ioc_set_pin_file(filp, arg);
4027 	case F2FS_IOC_PRECACHE_EXTENTS:
4028 		return f2fs_ioc_precache_extents(filp, arg);
4029 	case F2FS_IOC_RESIZE_FS:
4030 		return f2fs_ioc_resize_fs(filp, arg);
4031 	case FS_IOC_ENABLE_VERITY:
4032 		return f2fs_ioc_enable_verity(filp, arg);
4033 	case FS_IOC_MEASURE_VERITY:
4034 		return f2fs_ioc_measure_verity(filp, arg);
4035 	case FS_IOC_GETFSLABEL:
4036 		return f2fs_ioc_getfslabel(filp, arg);
4037 	case FS_IOC_SETFSLABEL:
4038 		return f2fs_ioc_setfslabel(filp, arg);
4039 	case F2FS_IOC_GET_COMPRESS_BLOCKS:
4040 		return f2fs_get_compress_blocks(filp, arg);
4041 	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4042 		return f2fs_release_compress_blocks(filp, arg);
4043 	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4044 		return f2fs_reserve_compress_blocks(filp, arg);
4045 	case F2FS_IOC_SEC_TRIM_FILE:
4046 		return f2fs_sec_trim_file(filp, arg);
4047 	default:
4048 		return -ENOTTY;
4049 	}
4050 }
4051 
f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4052 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4053 {
4054 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4055 		return -EIO;
4056 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4057 		return -ENOSPC;
4058 
4059 	return __f2fs_ioctl(filp, cmd, arg);
4060 }
4061 
f2fs_file_read_iter(struct kiocb * iocb,struct iov_iter * iter)4062 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4063 {
4064 	struct file *file = iocb->ki_filp;
4065 	struct inode *inode = file_inode(file);
4066 	int ret;
4067 
4068 	if (!f2fs_is_compress_backend_ready(inode))
4069 		return -EOPNOTSUPP;
4070 
4071 	ret = generic_file_read_iter(iocb, iter);
4072 
4073 	if (ret > 0)
4074 		f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4075 
4076 	return ret;
4077 }
4078 
f2fs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)4079 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4080 {
4081 	struct file *file = iocb->ki_filp;
4082 	struct inode *inode = file_inode(file);
4083 	ssize_t ret;
4084 
4085 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4086 		ret = -EIO;
4087 		goto out;
4088 	}
4089 
4090 	if (!f2fs_is_compress_backend_ready(inode)) {
4091 		ret = -EOPNOTSUPP;
4092 		goto out;
4093 	}
4094 
4095 	if (iocb->ki_flags & IOCB_NOWAIT) {
4096 		if (!inode_trylock(inode)) {
4097 			ret = -EAGAIN;
4098 			goto out;
4099 		}
4100 	} else {
4101 		inode_lock(inode);
4102 	}
4103 
4104 	if (unlikely(IS_IMMUTABLE(inode))) {
4105 		ret = -EPERM;
4106 		goto unlock;
4107 	}
4108 
4109 	ret = generic_write_checks(iocb, from);
4110 	if (ret > 0) {
4111 		bool preallocated = false;
4112 		size_t target_size = 0;
4113 		int err;
4114 
4115 		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4116 			set_inode_flag(inode, FI_NO_PREALLOC);
4117 
4118 		if ((iocb->ki_flags & IOCB_NOWAIT)) {
4119 			if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4120 						iov_iter_count(from)) ||
4121 				f2fs_has_inline_data(inode) ||
4122 				f2fs_force_buffered_io(inode, iocb, from)) {
4123 				clear_inode_flag(inode, FI_NO_PREALLOC);
4124 				inode_unlock(inode);
4125 				ret = -EAGAIN;
4126 				goto out;
4127 			}
4128 			goto write;
4129 		}
4130 
4131 		if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4132 			goto write;
4133 
4134 		if (iocb->ki_flags & IOCB_DIRECT) {
4135 			/*
4136 			 * Convert inline data for Direct I/O before entering
4137 			 * f2fs_direct_IO().
4138 			 */
4139 			err = f2fs_convert_inline_inode(inode);
4140 			if (err)
4141 				goto out_err;
4142 			/*
4143 			 * If force_buffere_io() is true, we have to allocate
4144 			 * blocks all the time, since f2fs_direct_IO will fall
4145 			 * back to buffered IO.
4146 			 */
4147 			if (!f2fs_force_buffered_io(inode, iocb, from) &&
4148 					allow_outplace_dio(inode, iocb, from))
4149 				goto write;
4150 		}
4151 		preallocated = true;
4152 		target_size = iocb->ki_pos + iov_iter_count(from);
4153 
4154 		err = f2fs_preallocate_blocks(iocb, from);
4155 		if (err) {
4156 out_err:
4157 			clear_inode_flag(inode, FI_NO_PREALLOC);
4158 			inode_unlock(inode);
4159 			ret = err;
4160 			goto out;
4161 		}
4162 write:
4163 		ret = __generic_file_write_iter(iocb, from);
4164 		clear_inode_flag(inode, FI_NO_PREALLOC);
4165 
4166 		/* if we couldn't write data, we should deallocate blocks. */
4167 		if (preallocated && i_size_read(inode) < target_size)
4168 			f2fs_truncate(inode);
4169 
4170 		if (ret > 0)
4171 			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4172 	}
4173 unlock:
4174 	inode_unlock(inode);
4175 out:
4176 	trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4177 					iov_iter_count(from), ret);
4178 	if (ret > 0)
4179 		ret = generic_write_sync(iocb, ret);
4180 	return ret;
4181 }
4182 
4183 #ifdef CONFIG_COMPAT
4184 struct compat_f2fs_gc_range {
4185 	u32 sync;
4186 	compat_u64 start;
4187 	compat_u64 len;
4188 };
4189 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE	_IOW(F2FS_IOCTL_MAGIC, 11,\
4190 						struct compat_f2fs_gc_range)
4191 
f2fs_compat_ioc_gc_range(struct file * file,unsigned long arg)4192 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4193 {
4194 	struct compat_f2fs_gc_range __user *urange;
4195 	struct f2fs_gc_range range;
4196 	int err;
4197 
4198 	urange = compat_ptr(arg);
4199 	err = get_user(range.sync, &urange->sync);
4200 	err |= get_user(range.start, &urange->start);
4201 	err |= get_user(range.len, &urange->len);
4202 	if (err)
4203 		return -EFAULT;
4204 
4205 	return __f2fs_ioc_gc_range(file, &range);
4206 }
4207 
4208 struct compat_f2fs_move_range {
4209 	u32 dst_fd;
4210 	compat_u64 pos_in;
4211 	compat_u64 pos_out;
4212 	compat_u64 len;
4213 };
4214 #define F2FS_IOC32_MOVE_RANGE		_IOWR(F2FS_IOCTL_MAGIC, 9,	\
4215 					struct compat_f2fs_move_range)
4216 
f2fs_compat_ioc_move_range(struct file * file,unsigned long arg)4217 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4218 {
4219 	struct compat_f2fs_move_range __user *urange;
4220 	struct f2fs_move_range range;
4221 	int err;
4222 
4223 	urange = compat_ptr(arg);
4224 	err = get_user(range.dst_fd, &urange->dst_fd);
4225 	err |= get_user(range.pos_in, &urange->pos_in);
4226 	err |= get_user(range.pos_out, &urange->pos_out);
4227 	err |= get_user(range.len, &urange->len);
4228 	if (err)
4229 		return -EFAULT;
4230 
4231 	return __f2fs_ioc_move_range(file, &range);
4232 }
4233 
f2fs_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)4234 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4235 {
4236 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4237 		return -EIO;
4238 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4239 		return -ENOSPC;
4240 
4241 	switch (cmd) {
4242 	case FS_IOC32_GETFLAGS:
4243 		cmd = FS_IOC_GETFLAGS;
4244 		break;
4245 	case FS_IOC32_SETFLAGS:
4246 		cmd = FS_IOC_SETFLAGS;
4247 		break;
4248 	case FS_IOC32_GETVERSION:
4249 		cmd = FS_IOC_GETVERSION;
4250 		break;
4251 	case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4252 		return f2fs_compat_ioc_gc_range(file, arg);
4253 	case F2FS_IOC32_MOVE_RANGE:
4254 		return f2fs_compat_ioc_move_range(file, arg);
4255 	case F2FS_IOC_START_ATOMIC_WRITE:
4256 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4257 	case F2FS_IOC_START_VOLATILE_WRITE:
4258 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4259 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
4260 	case F2FS_IOC_SHUTDOWN:
4261 	case FITRIM:
4262 	case FS_IOC_SET_ENCRYPTION_POLICY:
4263 	case FS_IOC_GET_ENCRYPTION_PWSALT:
4264 	case FS_IOC_GET_ENCRYPTION_POLICY:
4265 	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4266 	case FS_IOC_ADD_ENCRYPTION_KEY:
4267 	case FS_IOC_REMOVE_ENCRYPTION_KEY:
4268 	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4269 	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4270 	case FS_IOC_GET_ENCRYPTION_NONCE:
4271 	case F2FS_IOC_GARBAGE_COLLECT:
4272 	case F2FS_IOC_WRITE_CHECKPOINT:
4273 	case F2FS_IOC_DEFRAGMENT:
4274 	case F2FS_IOC_FLUSH_DEVICE:
4275 	case F2FS_IOC_GET_FEATURES:
4276 	case FS_IOC_FSGETXATTR:
4277 	case FS_IOC_FSSETXATTR:
4278 	case F2FS_IOC_GET_PIN_FILE:
4279 	case F2FS_IOC_SET_PIN_FILE:
4280 	case F2FS_IOC_PRECACHE_EXTENTS:
4281 	case F2FS_IOC_RESIZE_FS:
4282 	case FS_IOC_ENABLE_VERITY:
4283 	case FS_IOC_MEASURE_VERITY:
4284 	case FS_IOC_GETFSLABEL:
4285 	case FS_IOC_SETFSLABEL:
4286 	case F2FS_IOC_GET_COMPRESS_BLOCKS:
4287 	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4288 	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4289 	case F2FS_IOC_SEC_TRIM_FILE:
4290 		break;
4291 	default:
4292 		return -ENOIOCTLCMD;
4293 	}
4294 	return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4295 }
4296 #endif
4297 
4298 const struct file_operations f2fs_file_operations = {
4299 	.llseek		= f2fs_llseek,
4300 	.read_iter	= f2fs_file_read_iter,
4301 	.write_iter	= f2fs_file_write_iter,
4302 	.open		= f2fs_file_open,
4303 	.release	= f2fs_release_file,
4304 	.mmap		= f2fs_file_mmap,
4305 	.flush		= f2fs_file_flush,
4306 	.fsync		= f2fs_sync_file,
4307 	.fallocate	= f2fs_fallocate,
4308 	.unlocked_ioctl	= f2fs_ioctl,
4309 #ifdef CONFIG_COMPAT
4310 	.compat_ioctl	= f2fs_compat_ioctl,
4311 #endif
4312 	.splice_read	= generic_file_splice_read,
4313 	.splice_write	= iter_file_splice_write,
4314 };
4315