• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/f2fs/file.c
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 
26 #include "f2fs.h"
27 #include "node.h"
28 #include "segment.h"
29 #include "xattr.h"
30 #include "acl.h"
31 #include "gc.h"
32 #include "trace.h"
33 #include <trace/events/f2fs.h>
34 #include <uapi/linux/f2fs.h>
35 
f2fs_filemap_fault(struct vm_fault * vmf)36 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
37 {
38 	struct inode *inode = file_inode(vmf->vma->vm_file);
39 	vm_fault_t ret;
40 
41 	down_read(&F2FS_I(inode)->i_mmap_sem);
42 	ret = filemap_fault(vmf);
43 	up_read(&F2FS_I(inode)->i_mmap_sem);
44 
45 	if (ret & VM_FAULT_LOCKED)
46 		f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
47 							F2FS_BLKSIZE);
48 
49 	trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
50 
51 	return ret;
52 }
53 
f2fs_vm_page_mkwrite(struct vm_fault * vmf)54 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
55 {
56 	struct page *page = vmf->page;
57 	struct inode *inode = file_inode(vmf->vma->vm_file);
58 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
59 	struct dnode_of_data dn;
60 	bool need_alloc = true;
61 	int err = 0;
62 
63 	if (unlikely(IS_IMMUTABLE(inode)))
64 		return VM_FAULT_SIGBUS;
65 
66 	if (unlikely(f2fs_cp_error(sbi))) {
67 		err = -EIO;
68 		goto err;
69 	}
70 
71 	if (!f2fs_is_checkpoint_ready(sbi)) {
72 		err = -ENOSPC;
73 		goto err;
74 	}
75 
76 #ifdef CONFIG_F2FS_FS_COMPRESSION
77 	if (f2fs_compressed_file(inode)) {
78 		int ret = f2fs_is_compressed_cluster(inode, page->index);
79 
80 		if (ret < 0) {
81 			err = ret;
82 			goto err;
83 		} else if (ret) {
84 			if (ret < F2FS_I(inode)->i_cluster_size) {
85 				err = -EAGAIN;
86 				goto err;
87 			}
88 			need_alloc = false;
89 		}
90 	}
91 #endif
92 	/* should do out of any locked page */
93 	if (need_alloc)
94 		f2fs_balance_fs(sbi, true);
95 
96 	sb_start_pagefault(inode->i_sb);
97 
98 	f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
99 
100 	file_update_time(vmf->vma->vm_file);
101 	down_read(&F2FS_I(inode)->i_mmap_sem);
102 	lock_page(page);
103 	if (unlikely(page->mapping != inode->i_mapping ||
104 			page_offset(page) > i_size_read(inode) ||
105 			!PageUptodate(page))) {
106 		unlock_page(page);
107 		err = -EFAULT;
108 		goto out_sem;
109 	}
110 
111 	if (need_alloc) {
112 		/* block allocation */
113 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
114 		set_new_dnode(&dn, inode, NULL, NULL, 0);
115 		err = f2fs_get_block(&dn, page->index);
116 		f2fs_put_dnode(&dn);
117 		f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
118 	}
119 
120 #ifdef CONFIG_F2FS_FS_COMPRESSION
121 	if (!need_alloc) {
122 		set_new_dnode(&dn, inode, NULL, NULL, 0);
123 		err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
124 		f2fs_put_dnode(&dn);
125 	}
126 #endif
127 	if (err) {
128 		unlock_page(page);
129 		goto out_sem;
130 	}
131 
132 	f2fs_wait_on_page_writeback(page, DATA, false, true);
133 
134 	/* wait for GCed page writeback via META_MAPPING */
135 	f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
136 
137 	/*
138 	 * check to see if the page is mapped already (no holes)
139 	 */
140 	if (PageMappedToDisk(page))
141 		goto out_sem;
142 
143 	/* page is wholly or partially inside EOF */
144 	if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
145 						i_size_read(inode)) {
146 		loff_t offset;
147 
148 		offset = i_size_read(inode) & ~PAGE_MASK;
149 		zero_user_segment(page, offset, PAGE_SIZE);
150 	}
151 	set_page_dirty(page);
152 	if (!PageUptodate(page))
153 		SetPageUptodate(page);
154 
155 	f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
156 	f2fs_update_time(sbi, REQ_TIME);
157 
158 	trace_f2fs_vm_page_mkwrite(page, DATA);
159 out_sem:
160 	up_read(&F2FS_I(inode)->i_mmap_sem);
161 
162 	sb_end_pagefault(inode->i_sb);
163 err:
164 	return block_page_mkwrite_return(err);
165 }
166 
167 static const struct vm_operations_struct f2fs_file_vm_ops = {
168 	.fault		= f2fs_filemap_fault,
169 	.map_pages	= filemap_map_pages,
170 	.page_mkwrite	= f2fs_vm_page_mkwrite,
171 };
172 
get_parent_ino(struct inode * inode,nid_t * pino)173 static int get_parent_ino(struct inode *inode, nid_t *pino)
174 {
175 	struct dentry *dentry;
176 
177 	/*
178 	 * Make sure to get the non-deleted alias.  The alias associated with
179 	 * the open file descriptor being fsync()'ed may be deleted already.
180 	 */
181 	dentry = d_find_alias(inode);
182 	if (!dentry)
183 		return 0;
184 
185 	*pino = parent_ino(dentry);
186 	dput(dentry);
187 	return 1;
188 }
189 
need_do_checkpoint(struct inode * inode)190 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
191 {
192 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
193 	enum cp_reason_type cp_reason = CP_NO_NEEDED;
194 
195 	if (!S_ISREG(inode->i_mode))
196 		cp_reason = CP_NON_REGULAR;
197 	else if (f2fs_compressed_file(inode))
198 		cp_reason = CP_COMPRESSED;
199 	else if (inode->i_nlink != 1)
200 		cp_reason = CP_HARDLINK;
201 	else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
202 		cp_reason = CP_SB_NEED_CP;
203 	else if (file_wrong_pino(inode))
204 		cp_reason = CP_WRONG_PINO;
205 	else if (!f2fs_space_for_roll_forward(sbi))
206 		cp_reason = CP_NO_SPC_ROLL;
207 	else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
208 		cp_reason = CP_NODE_NEED_CP;
209 	else if (test_opt(sbi, FASTBOOT))
210 		cp_reason = CP_FASTBOOT_MODE;
211 	else if (F2FS_OPTION(sbi).active_logs == 2)
212 		cp_reason = CP_SPEC_LOG_NUM;
213 	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
214 		f2fs_need_dentry_mark(sbi, inode->i_ino) &&
215 		f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
216 							TRANS_DIR_INO))
217 		cp_reason = CP_RECOVER_DIR;
218 
219 	return cp_reason;
220 }
221 
need_inode_page_update(struct f2fs_sb_info * sbi,nid_t ino)222 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
223 {
224 	struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
225 	bool ret = false;
226 	/* But we need to avoid that there are some inode updates */
227 	if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
228 		ret = true;
229 	f2fs_put_page(i, 0);
230 	return ret;
231 }
232 
try_to_fix_pino(struct inode * inode)233 static void try_to_fix_pino(struct inode *inode)
234 {
235 	struct f2fs_inode_info *fi = F2FS_I(inode);
236 	nid_t pino;
237 
238 	down_write(&fi->i_sem);
239 	if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
240 			get_parent_ino(inode, &pino)) {
241 		f2fs_i_pino_write(inode, pino);
242 		file_got_pino(inode);
243 	}
244 	up_write(&fi->i_sem);
245 }
246 
f2fs_do_sync_file(struct file * file,loff_t start,loff_t end,int datasync,bool atomic)247 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
248 						int datasync, bool atomic)
249 {
250 	struct inode *inode = file->f_mapping->host;
251 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
252 	nid_t ino = inode->i_ino;
253 	int ret = 0;
254 	enum cp_reason_type cp_reason = 0;
255 	struct writeback_control wbc = {
256 		.sync_mode = WB_SYNC_ALL,
257 		.nr_to_write = LONG_MAX,
258 		.for_reclaim = 0,
259 	};
260 	unsigned int seq_id = 0;
261 
262 	if (unlikely(f2fs_readonly(inode->i_sb)))
263 		return 0;
264 
265 	trace_f2fs_sync_file_enter(inode);
266 
267 	if (S_ISDIR(inode->i_mode))
268 		goto go_write;
269 
270 	/* if fdatasync is triggered, let's do in-place-update */
271 	if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
272 		set_inode_flag(inode, FI_NEED_IPU);
273 	ret = file_write_and_wait_range(file, start, end);
274 	clear_inode_flag(inode, FI_NEED_IPU);
275 
276 	if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
277 		trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
278 		return ret;
279 	}
280 
281 	/* if the inode is dirty, let's recover all the time */
282 	if (!f2fs_skip_inode_update(inode, datasync)) {
283 		f2fs_write_inode(inode, NULL);
284 		goto go_write;
285 	}
286 
287 	/*
288 	 * if there is no written data, don't waste time to write recovery info.
289 	 */
290 	if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
291 			!f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
292 
293 		/* it may call write_inode just prior to fsync */
294 		if (need_inode_page_update(sbi, ino))
295 			goto go_write;
296 
297 		if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
298 				f2fs_exist_written_data(sbi, ino, UPDATE_INO))
299 			goto flush_out;
300 		goto out;
301 	}
302 go_write:
303 	/*
304 	 * Both of fdatasync() and fsync() are able to be recovered from
305 	 * sudden-power-off.
306 	 */
307 	down_read(&F2FS_I(inode)->i_sem);
308 	cp_reason = need_do_checkpoint(inode);
309 	up_read(&F2FS_I(inode)->i_sem);
310 
311 	if (cp_reason) {
312 		/* all the dirty node pages should be flushed for POR */
313 		ret = f2fs_sync_fs(inode->i_sb, 1);
314 
315 		/*
316 		 * We've secured consistency through sync_fs. Following pino
317 		 * will be used only for fsynced inodes after checkpoint.
318 		 */
319 		try_to_fix_pino(inode);
320 		clear_inode_flag(inode, FI_APPEND_WRITE);
321 		clear_inode_flag(inode, FI_UPDATE_WRITE);
322 		goto out;
323 	}
324 sync_nodes:
325 	atomic_inc(&sbi->wb_sync_req[NODE]);
326 	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
327 	atomic_dec(&sbi->wb_sync_req[NODE]);
328 	if (ret)
329 		goto out;
330 
331 	/* if cp_error was enabled, we should avoid infinite loop */
332 	if (unlikely(f2fs_cp_error(sbi))) {
333 		ret = -EIO;
334 		goto out;
335 	}
336 
337 	if (f2fs_need_inode_block_update(sbi, ino)) {
338 		f2fs_mark_inode_dirty_sync(inode, true);
339 		f2fs_write_inode(inode, NULL);
340 		goto sync_nodes;
341 	}
342 
343 	/*
344 	 * If it's atomic_write, it's just fine to keep write ordering. So
345 	 * here we don't need to wait for node write completion, since we use
346 	 * node chain which serializes node blocks. If one of node writes are
347 	 * reordered, we can see simply broken chain, resulting in stopping
348 	 * roll-forward recovery. It means we'll recover all or none node blocks
349 	 * given fsync mark.
350 	 */
351 	if (!atomic) {
352 		ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
353 		if (ret)
354 			goto out;
355 	}
356 
357 	/* once recovery info is written, don't need to tack this */
358 	f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
359 	clear_inode_flag(inode, FI_APPEND_WRITE);
360 flush_out:
361 	if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
362 		ret = f2fs_issue_flush(sbi, inode->i_ino);
363 	if (!ret) {
364 		f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
365 		clear_inode_flag(inode, FI_UPDATE_WRITE);
366 		f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
367 	}
368 	f2fs_update_time(sbi, REQ_TIME);
369 out:
370 	trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
371 	f2fs_trace_ios(NULL, 1);
372 	return ret;
373 }
374 
f2fs_sync_file(struct file * file,loff_t start,loff_t end,int datasync)375 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
376 {
377 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
378 		return -EIO;
379 	return f2fs_do_sync_file(file, start, end, datasync, false);
380 }
381 
__found_offset(struct address_space * mapping,block_t blkaddr,pgoff_t index,int whence)382 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
383 				pgoff_t index, int whence)
384 {
385 	switch (whence) {
386 	case SEEK_DATA:
387 		if (__is_valid_data_blkaddr(blkaddr))
388 			return true;
389 		if (blkaddr == NEW_ADDR &&
390 		    xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
391 			return true;
392 		break;
393 	case SEEK_HOLE:
394 		if (blkaddr == NULL_ADDR)
395 			return true;
396 		break;
397 	}
398 	return false;
399 }
400 
f2fs_seek_block(struct file * file,loff_t offset,int whence)401 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
402 {
403 	struct inode *inode = file->f_mapping->host;
404 	loff_t maxbytes = inode->i_sb->s_maxbytes;
405 	struct dnode_of_data dn;
406 	pgoff_t pgofs, end_offset;
407 	loff_t data_ofs = offset;
408 	loff_t isize;
409 	int err = 0;
410 
411 	inode_lock(inode);
412 
413 	isize = i_size_read(inode);
414 	if (offset >= isize)
415 		goto fail;
416 
417 	/* handle inline data case */
418 	if (f2fs_has_inline_data(inode)) {
419 		if (whence == SEEK_HOLE) {
420 			data_ofs = isize;
421 			goto found;
422 		} else if (whence == SEEK_DATA) {
423 			data_ofs = offset;
424 			goto found;
425 		}
426 	}
427 
428 	pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
429 
430 	for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
431 		set_new_dnode(&dn, inode, NULL, NULL, 0);
432 		err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
433 		if (err && err != -ENOENT) {
434 			goto fail;
435 		} else if (err == -ENOENT) {
436 			/* direct node does not exists */
437 			if (whence == SEEK_DATA) {
438 				pgofs = f2fs_get_next_page_offset(&dn, pgofs);
439 				continue;
440 			} else {
441 				goto found;
442 			}
443 		}
444 
445 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
446 
447 		/* find data/hole in dnode block */
448 		for (; dn.ofs_in_node < end_offset;
449 				dn.ofs_in_node++, pgofs++,
450 				data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
451 			block_t blkaddr;
452 
453 			blkaddr = f2fs_data_blkaddr(&dn);
454 
455 			if (__is_valid_data_blkaddr(blkaddr) &&
456 				!f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
457 					blkaddr, DATA_GENERIC_ENHANCE)) {
458 				f2fs_put_dnode(&dn);
459 				goto fail;
460 			}
461 
462 			if (__found_offset(file->f_mapping, blkaddr,
463 							pgofs, whence)) {
464 				f2fs_put_dnode(&dn);
465 				goto found;
466 			}
467 		}
468 		f2fs_put_dnode(&dn);
469 	}
470 
471 	if (whence == SEEK_DATA)
472 		goto fail;
473 found:
474 	if (whence == SEEK_HOLE && data_ofs > isize)
475 		data_ofs = isize;
476 	inode_unlock(inode);
477 	return vfs_setpos(file, data_ofs, maxbytes);
478 fail:
479 	inode_unlock(inode);
480 	return -ENXIO;
481 }
482 
f2fs_llseek(struct file * file,loff_t offset,int whence)483 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
484 {
485 	struct inode *inode = file->f_mapping->host;
486 	loff_t maxbytes = inode->i_sb->s_maxbytes;
487 
488 	switch (whence) {
489 	case SEEK_SET:
490 	case SEEK_CUR:
491 	case SEEK_END:
492 		return generic_file_llseek_size(file, offset, whence,
493 						maxbytes, i_size_read(inode));
494 	case SEEK_DATA:
495 	case SEEK_HOLE:
496 		if (offset < 0)
497 			return -ENXIO;
498 		return f2fs_seek_block(file, offset, whence);
499 	}
500 
501 	return -EINVAL;
502 }
503 
f2fs_file_mmap(struct file * file,struct vm_area_struct * vma)504 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
505 {
506 	struct inode *inode = file_inode(file);
507 	int err;
508 
509 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
510 		return -EIO;
511 
512 	if (!f2fs_is_compress_backend_ready(inode))
513 		return -EOPNOTSUPP;
514 
515 	/* we don't need to use inline_data strictly */
516 	err = f2fs_convert_inline_inode(inode);
517 	if (err)
518 		return err;
519 
520 	file_accessed(file);
521 	vma->vm_ops = &f2fs_file_vm_ops;
522 	set_inode_flag(inode, FI_MMAP_FILE);
523 	return 0;
524 }
525 
f2fs_file_open(struct inode * inode,struct file * filp)526 static int f2fs_file_open(struct inode *inode, struct file *filp)
527 {
528 	int err = fscrypt_file_open(inode, filp);
529 
530 	if (err)
531 		return err;
532 
533 	if (!f2fs_is_compress_backend_ready(inode))
534 		return -EOPNOTSUPP;
535 
536 	err = fsverity_file_open(inode, filp);
537 	if (err)
538 		return err;
539 
540 	filp->f_mode |= FMODE_NOWAIT;
541 
542 	return dquot_file_open(inode, filp);
543 }
544 
f2fs_truncate_data_blocks_range(struct dnode_of_data * dn,int count)545 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
546 {
547 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
548 	struct f2fs_node *raw_node;
549 	int nr_free = 0, ofs = dn->ofs_in_node, len = count;
550 	__le32 *addr;
551 	int base = 0;
552 	bool compressed_cluster = false;
553 	int cluster_index = 0, valid_blocks = 0;
554 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
555 	bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
556 
557 	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
558 		base = get_extra_isize(dn->inode);
559 
560 	raw_node = F2FS_NODE(dn->node_page);
561 	addr = blkaddr_in_node(raw_node) + base + ofs;
562 
563 	/* Assumption: truncateion starts with cluster */
564 	for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
565 		block_t blkaddr = le32_to_cpu(*addr);
566 
567 		if (f2fs_compressed_file(dn->inode) &&
568 					!(cluster_index & (cluster_size - 1))) {
569 			if (compressed_cluster)
570 				f2fs_i_compr_blocks_update(dn->inode,
571 							valid_blocks, false);
572 			compressed_cluster = (blkaddr == COMPRESS_ADDR);
573 			valid_blocks = 0;
574 		}
575 
576 		if (blkaddr == NULL_ADDR)
577 			continue;
578 
579 		dn->data_blkaddr = NULL_ADDR;
580 		f2fs_set_data_blkaddr(dn);
581 
582 		if (__is_valid_data_blkaddr(blkaddr)) {
583 			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
584 					DATA_GENERIC_ENHANCE))
585 				continue;
586 			if (compressed_cluster)
587 				valid_blocks++;
588 		}
589 
590 		if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
591 			clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
592 
593 		f2fs_invalidate_blocks(sbi, blkaddr);
594 
595 		if (!released || blkaddr != COMPRESS_ADDR)
596 			nr_free++;
597 	}
598 
599 	if (compressed_cluster)
600 		f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
601 
602 	if (nr_free) {
603 		pgoff_t fofs;
604 		/*
605 		 * once we invalidate valid blkaddr in range [ofs, ofs + count],
606 		 * we will invalidate all blkaddr in the whole range.
607 		 */
608 		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
609 							dn->inode) + ofs;
610 		f2fs_update_extent_cache_range(dn, fofs, 0, len);
611 		dec_valid_block_count(sbi, dn->inode, nr_free);
612 	}
613 	dn->ofs_in_node = ofs;
614 
615 	f2fs_update_time(sbi, REQ_TIME);
616 	trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
617 					 dn->ofs_in_node, nr_free);
618 }
619 
f2fs_truncate_data_blocks(struct dnode_of_data * dn)620 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
621 {
622 	f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
623 }
624 
truncate_partial_data_page(struct inode * inode,u64 from,bool cache_only)625 static int truncate_partial_data_page(struct inode *inode, u64 from,
626 								bool cache_only)
627 {
628 	loff_t offset = from & (PAGE_SIZE - 1);
629 	pgoff_t index = from >> PAGE_SHIFT;
630 	struct address_space *mapping = inode->i_mapping;
631 	struct page *page;
632 
633 	if (!offset && !cache_only)
634 		return 0;
635 
636 	if (cache_only) {
637 		page = find_lock_page(mapping, index);
638 		if (page && PageUptodate(page))
639 			goto truncate_out;
640 		f2fs_put_page(page, 1);
641 		return 0;
642 	}
643 
644 	page = f2fs_get_lock_data_page(inode, index, true);
645 	if (IS_ERR(page))
646 		return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
647 truncate_out:
648 	f2fs_wait_on_page_writeback(page, DATA, true, true);
649 	zero_user(page, offset, PAGE_SIZE - offset);
650 
651 	/* An encrypted inode should have a key and truncate the last page. */
652 	f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
653 	if (!cache_only)
654 		set_page_dirty(page);
655 	f2fs_put_page(page, 1);
656 	return 0;
657 }
658 
f2fs_do_truncate_blocks(struct inode * inode,u64 from,bool lock)659 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
660 {
661 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
662 	struct dnode_of_data dn;
663 	pgoff_t free_from;
664 	int count = 0, err = 0;
665 	struct page *ipage;
666 	bool truncate_page = false;
667 
668 	trace_f2fs_truncate_blocks_enter(inode, from);
669 
670 	free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
671 
672 	if (free_from >= sbi->max_file_blocks)
673 		goto free_partial;
674 
675 	if (lock)
676 		f2fs_lock_op(sbi);
677 
678 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
679 	if (IS_ERR(ipage)) {
680 		err = PTR_ERR(ipage);
681 		goto out;
682 	}
683 
684 	if (f2fs_has_inline_data(inode)) {
685 		f2fs_truncate_inline_inode(inode, ipage, from);
686 		f2fs_put_page(ipage, 1);
687 		truncate_page = true;
688 		goto out;
689 	}
690 
691 	set_new_dnode(&dn, inode, ipage, NULL, 0);
692 	err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
693 	if (err) {
694 		if (err == -ENOENT)
695 			goto free_next;
696 		goto out;
697 	}
698 
699 	count = ADDRS_PER_PAGE(dn.node_page, inode);
700 
701 	count -= dn.ofs_in_node;
702 	f2fs_bug_on(sbi, count < 0);
703 
704 	if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
705 		f2fs_truncate_data_blocks_range(&dn, count);
706 		free_from += count;
707 	}
708 
709 	f2fs_put_dnode(&dn);
710 free_next:
711 	err = f2fs_truncate_inode_blocks(inode, free_from);
712 out:
713 	if (lock)
714 		f2fs_unlock_op(sbi);
715 free_partial:
716 	/* lastly zero out the first data page */
717 	if (!err)
718 		err = truncate_partial_data_page(inode, from, truncate_page);
719 
720 	trace_f2fs_truncate_blocks_exit(inode, err);
721 	return err;
722 }
723 
f2fs_truncate_blocks(struct inode * inode,u64 from,bool lock)724 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
725 {
726 	u64 free_from = from;
727 	int err;
728 
729 #ifdef CONFIG_F2FS_FS_COMPRESSION
730 	/*
731 	 * for compressed file, only support cluster size
732 	 * aligned truncation.
733 	 */
734 	if (f2fs_compressed_file(inode))
735 		free_from = round_up(from,
736 				F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
737 #endif
738 
739 	err = f2fs_do_truncate_blocks(inode, free_from, lock);
740 	if (err)
741 		return err;
742 
743 #ifdef CONFIG_F2FS_FS_COMPRESSION
744 	if (from != free_from) {
745 		err = f2fs_truncate_partial_cluster(inode, from, lock);
746 		if (err)
747 			return err;
748 	}
749 #endif
750 
751 	return 0;
752 }
753 
f2fs_truncate(struct inode * inode)754 int f2fs_truncate(struct inode *inode)
755 {
756 	int err;
757 
758 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
759 		return -EIO;
760 
761 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
762 				S_ISLNK(inode->i_mode)))
763 		return 0;
764 
765 	trace_f2fs_truncate(inode);
766 
767 	if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
768 		f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
769 		return -EIO;
770 	}
771 
772 	err = dquot_initialize(inode);
773 	if (err)
774 		return err;
775 
776 	/* we should check inline_data size */
777 	if (!f2fs_may_inline_data(inode)) {
778 		err = f2fs_convert_inline_inode(inode);
779 		if (err)
780 			return err;
781 	}
782 
783 	err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
784 	if (err)
785 		return err;
786 
787 	inode->i_mtime = inode->i_ctime = current_time(inode);
788 	f2fs_mark_inode_dirty_sync(inode, false);
789 	return 0;
790 }
791 
f2fs_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)792 int f2fs_getattr(const struct path *path, struct kstat *stat,
793 		 u32 request_mask, unsigned int query_flags)
794 {
795 	struct inode *inode = d_inode(path->dentry);
796 	struct f2fs_inode_info *fi = F2FS_I(inode);
797 	struct f2fs_inode *ri;
798 	unsigned int flags;
799 
800 	if (f2fs_has_extra_attr(inode) &&
801 			f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
802 			F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
803 		stat->result_mask |= STATX_BTIME;
804 		stat->btime.tv_sec = fi->i_crtime.tv_sec;
805 		stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
806 	}
807 
808 	flags = fi->i_flags;
809 	if (flags & F2FS_COMPR_FL)
810 		stat->attributes |= STATX_ATTR_COMPRESSED;
811 	if (flags & F2FS_APPEND_FL)
812 		stat->attributes |= STATX_ATTR_APPEND;
813 	if (IS_ENCRYPTED(inode))
814 		stat->attributes |= STATX_ATTR_ENCRYPTED;
815 	if (flags & F2FS_IMMUTABLE_FL)
816 		stat->attributes |= STATX_ATTR_IMMUTABLE;
817 	if (flags & F2FS_NODUMP_FL)
818 		stat->attributes |= STATX_ATTR_NODUMP;
819 	if (IS_VERITY(inode))
820 		stat->attributes |= STATX_ATTR_VERITY;
821 
822 	stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
823 				  STATX_ATTR_APPEND |
824 				  STATX_ATTR_ENCRYPTED |
825 				  STATX_ATTR_IMMUTABLE |
826 				  STATX_ATTR_NODUMP |
827 				  STATX_ATTR_VERITY);
828 
829 	generic_fillattr(inode, stat);
830 
831 	/* we need to show initial sectors used for inline_data/dentries */
832 	if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
833 					f2fs_has_inline_dentry(inode))
834 		stat->blocks += (stat->size + 511) >> 9;
835 
836 	return 0;
837 }
838 
839 #ifdef CONFIG_F2FS_FS_POSIX_ACL
__setattr_copy(struct inode * inode,const struct iattr * attr)840 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
841 {
842 	unsigned int ia_valid = attr->ia_valid;
843 
844 	if (ia_valid & ATTR_UID)
845 		inode->i_uid = attr->ia_uid;
846 	if (ia_valid & ATTR_GID)
847 		inode->i_gid = attr->ia_gid;
848 	if (ia_valid & ATTR_ATIME)
849 		inode->i_atime = attr->ia_atime;
850 	if (ia_valid & ATTR_MTIME)
851 		inode->i_mtime = attr->ia_mtime;
852 	if (ia_valid & ATTR_CTIME)
853 		inode->i_ctime = attr->ia_ctime;
854 	if (ia_valid & ATTR_MODE) {
855 		umode_t mode = attr->ia_mode;
856 
857 		if (!in_group_p(inode->i_gid) &&
858 			!capable_wrt_inode_uidgid(inode, CAP_FSETID))
859 			mode &= ~S_ISGID;
860 		set_acl_inode(inode, mode);
861 	}
862 }
863 #else
864 #define __setattr_copy setattr_copy
865 #endif
866 
f2fs_setattr(struct dentry * dentry,struct iattr * attr)867 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
868 {
869 	struct inode *inode = d_inode(dentry);
870 	int err;
871 
872 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
873 		return -EIO;
874 
875 	if (unlikely(IS_IMMUTABLE(inode)))
876 		return -EPERM;
877 
878 	if (unlikely(IS_APPEND(inode) &&
879 			(attr->ia_valid & (ATTR_MODE | ATTR_UID |
880 				  ATTR_GID | ATTR_TIMES_SET))))
881 		return -EPERM;
882 
883 	if ((attr->ia_valid & ATTR_SIZE) &&
884 		!f2fs_is_compress_backend_ready(inode))
885 		return -EOPNOTSUPP;
886 
887 	err = setattr_prepare(dentry, attr);
888 	if (err)
889 		return err;
890 
891 	err = fscrypt_prepare_setattr(dentry, attr);
892 	if (err)
893 		return err;
894 
895 	err = fsverity_prepare_setattr(dentry, attr);
896 	if (err)
897 		return err;
898 
899 	if (is_quota_modification(inode, attr)) {
900 		err = dquot_initialize(inode);
901 		if (err)
902 			return err;
903 	}
904 	if ((attr->ia_valid & ATTR_UID &&
905 		!uid_eq(attr->ia_uid, inode->i_uid)) ||
906 		(attr->ia_valid & ATTR_GID &&
907 		!gid_eq(attr->ia_gid, inode->i_gid))) {
908 		f2fs_lock_op(F2FS_I_SB(inode));
909 		err = dquot_transfer(inode, attr);
910 		if (err) {
911 			set_sbi_flag(F2FS_I_SB(inode),
912 					SBI_QUOTA_NEED_REPAIR);
913 			f2fs_unlock_op(F2FS_I_SB(inode));
914 			return err;
915 		}
916 		/*
917 		 * update uid/gid under lock_op(), so that dquot and inode can
918 		 * be updated atomically.
919 		 */
920 		if (attr->ia_valid & ATTR_UID)
921 			inode->i_uid = attr->ia_uid;
922 		if (attr->ia_valid & ATTR_GID)
923 			inode->i_gid = attr->ia_gid;
924 		f2fs_mark_inode_dirty_sync(inode, true);
925 		f2fs_unlock_op(F2FS_I_SB(inode));
926 	}
927 
928 	if (attr->ia_valid & ATTR_SIZE) {
929 		loff_t old_size = i_size_read(inode);
930 
931 		if (attr->ia_size > MAX_INLINE_DATA(inode)) {
932 			/*
933 			 * should convert inline inode before i_size_write to
934 			 * keep smaller than inline_data size with inline flag.
935 			 */
936 			err = f2fs_convert_inline_inode(inode);
937 			if (err)
938 				return err;
939 		}
940 
941 		/*
942 		 * wait for inflight dio, blocks should be removed after
943 		 * IO completion.
944 		 */
945 		if (attr->ia_size < old_size)
946 			inode_dio_wait(inode);
947 
948 		down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
949 		down_write(&F2FS_I(inode)->i_mmap_sem);
950 
951 		truncate_setsize(inode, attr->ia_size);
952 
953 		if (attr->ia_size <= old_size)
954 			err = f2fs_truncate(inode);
955 		/*
956 		 * do not trim all blocks after i_size if target size is
957 		 * larger than i_size.
958 		 */
959 		up_write(&F2FS_I(inode)->i_mmap_sem);
960 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
961 		if (err)
962 			return err;
963 
964 		spin_lock(&F2FS_I(inode)->i_size_lock);
965 		inode->i_mtime = inode->i_ctime = current_time(inode);
966 		F2FS_I(inode)->last_disk_size = i_size_read(inode);
967 		spin_unlock(&F2FS_I(inode)->i_size_lock);
968 	}
969 
970 	__setattr_copy(inode, attr);
971 
972 	if (attr->ia_valid & ATTR_MODE) {
973 		err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
974 		if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
975 			inode->i_mode = F2FS_I(inode)->i_acl_mode;
976 			clear_inode_flag(inode, FI_ACL_MODE);
977 		}
978 	}
979 
980 	/* file size may changed here */
981 	f2fs_mark_inode_dirty_sync(inode, true);
982 
983 	/* inode change will produce dirty node pages flushed by checkpoint */
984 	f2fs_balance_fs(F2FS_I_SB(inode), true);
985 
986 	return err;
987 }
988 
989 const struct inode_operations f2fs_file_inode_operations = {
990 	.getattr	= f2fs_getattr,
991 	.setattr	= f2fs_setattr,
992 	.get_acl	= f2fs_get_acl,
993 	.set_acl	= f2fs_set_acl,
994 	.listxattr	= f2fs_listxattr,
995 	.fiemap		= f2fs_fiemap,
996 };
997 
fill_zero(struct inode * inode,pgoff_t index,loff_t start,loff_t len)998 static int fill_zero(struct inode *inode, pgoff_t index,
999 					loff_t start, loff_t len)
1000 {
1001 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1002 	struct page *page;
1003 
1004 	if (!len)
1005 		return 0;
1006 
1007 	f2fs_balance_fs(sbi, true);
1008 
1009 	f2fs_lock_op(sbi);
1010 	page = f2fs_get_new_data_page(inode, NULL, index, false);
1011 	f2fs_unlock_op(sbi);
1012 
1013 	if (IS_ERR(page))
1014 		return PTR_ERR(page);
1015 
1016 	f2fs_wait_on_page_writeback(page, DATA, true, true);
1017 	zero_user(page, start, len);
1018 	set_page_dirty(page);
1019 	f2fs_put_page(page, 1);
1020 	return 0;
1021 }
1022 
f2fs_truncate_hole(struct inode * inode,pgoff_t pg_start,pgoff_t pg_end)1023 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1024 {
1025 	int err;
1026 
1027 	while (pg_start < pg_end) {
1028 		struct dnode_of_data dn;
1029 		pgoff_t end_offset, count;
1030 
1031 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1032 		err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1033 		if (err) {
1034 			if (err == -ENOENT) {
1035 				pg_start = f2fs_get_next_page_offset(&dn,
1036 								pg_start);
1037 				continue;
1038 			}
1039 			return err;
1040 		}
1041 
1042 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1043 		count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1044 
1045 		f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1046 
1047 		f2fs_truncate_data_blocks_range(&dn, count);
1048 		f2fs_put_dnode(&dn);
1049 
1050 		pg_start += count;
1051 	}
1052 	return 0;
1053 }
1054 
punch_hole(struct inode * inode,loff_t offset,loff_t len)1055 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1056 {
1057 	pgoff_t pg_start, pg_end;
1058 	loff_t off_start, off_end;
1059 	int ret;
1060 
1061 	ret = f2fs_convert_inline_inode(inode);
1062 	if (ret)
1063 		return ret;
1064 
1065 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1066 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1067 
1068 	off_start = offset & (PAGE_SIZE - 1);
1069 	off_end = (offset + len) & (PAGE_SIZE - 1);
1070 
1071 	if (pg_start == pg_end) {
1072 		ret = fill_zero(inode, pg_start, off_start,
1073 						off_end - off_start);
1074 		if (ret)
1075 			return ret;
1076 	} else {
1077 		if (off_start) {
1078 			ret = fill_zero(inode, pg_start++, off_start,
1079 						PAGE_SIZE - off_start);
1080 			if (ret)
1081 				return ret;
1082 		}
1083 		if (off_end) {
1084 			ret = fill_zero(inode, pg_end, 0, off_end);
1085 			if (ret)
1086 				return ret;
1087 		}
1088 
1089 		if (pg_start < pg_end) {
1090 			loff_t blk_start, blk_end;
1091 			struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1092 
1093 			f2fs_balance_fs(sbi, true);
1094 
1095 			blk_start = (loff_t)pg_start << PAGE_SHIFT;
1096 			blk_end = (loff_t)pg_end << PAGE_SHIFT;
1097 
1098 			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1099 			down_write(&F2FS_I(inode)->i_mmap_sem);
1100 
1101 			truncate_pagecache_range(inode, blk_start, blk_end - 1);
1102 
1103 			f2fs_lock_op(sbi);
1104 			ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1105 			f2fs_unlock_op(sbi);
1106 
1107 			up_write(&F2FS_I(inode)->i_mmap_sem);
1108 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1109 		}
1110 	}
1111 
1112 	return ret;
1113 }
1114 
__read_out_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,pgoff_t len)1115 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1116 				int *do_replace, pgoff_t off, pgoff_t len)
1117 {
1118 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1119 	struct dnode_of_data dn;
1120 	int ret, done, i;
1121 
1122 next_dnode:
1123 	set_new_dnode(&dn, inode, NULL, NULL, 0);
1124 	ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1125 	if (ret && ret != -ENOENT) {
1126 		return ret;
1127 	} else if (ret == -ENOENT) {
1128 		if (dn.max_level == 0)
1129 			return -ENOENT;
1130 		done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1131 						dn.ofs_in_node, len);
1132 		blkaddr += done;
1133 		do_replace += done;
1134 		goto next;
1135 	}
1136 
1137 	done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1138 							dn.ofs_in_node, len);
1139 	for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1140 		*blkaddr = f2fs_data_blkaddr(&dn);
1141 
1142 		if (__is_valid_data_blkaddr(*blkaddr) &&
1143 			!f2fs_is_valid_blkaddr(sbi, *blkaddr,
1144 					DATA_GENERIC_ENHANCE)) {
1145 			f2fs_put_dnode(&dn);
1146 			return -EFSCORRUPTED;
1147 		}
1148 
1149 		if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1150 
1151 			if (f2fs_lfs_mode(sbi)) {
1152 				f2fs_put_dnode(&dn);
1153 				return -EOPNOTSUPP;
1154 			}
1155 
1156 			/* do not invalidate this block address */
1157 			f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1158 			*do_replace = 1;
1159 		}
1160 	}
1161 	f2fs_put_dnode(&dn);
1162 next:
1163 	len -= done;
1164 	off += done;
1165 	if (len)
1166 		goto next_dnode;
1167 	return 0;
1168 }
1169 
__roll_back_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,int len)1170 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1171 				int *do_replace, pgoff_t off, int len)
1172 {
1173 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1174 	struct dnode_of_data dn;
1175 	int ret, i;
1176 
1177 	for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1178 		if (*do_replace == 0)
1179 			continue;
1180 
1181 		set_new_dnode(&dn, inode, NULL, NULL, 0);
1182 		ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1183 		if (ret) {
1184 			dec_valid_block_count(sbi, inode, 1);
1185 			f2fs_invalidate_blocks(sbi, *blkaddr);
1186 		} else {
1187 			f2fs_update_data_blkaddr(&dn, *blkaddr);
1188 		}
1189 		f2fs_put_dnode(&dn);
1190 	}
1191 	return 0;
1192 }
1193 
__clone_blkaddrs(struct inode * src_inode,struct inode * dst_inode,block_t * blkaddr,int * do_replace,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1194 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1195 			block_t *blkaddr, int *do_replace,
1196 			pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1197 {
1198 	struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1199 	pgoff_t i = 0;
1200 	int ret;
1201 
1202 	while (i < len) {
1203 		if (blkaddr[i] == NULL_ADDR && !full) {
1204 			i++;
1205 			continue;
1206 		}
1207 
1208 		if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1209 			struct dnode_of_data dn;
1210 			struct node_info ni;
1211 			size_t new_size;
1212 			pgoff_t ilen;
1213 
1214 			set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1215 			ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1216 			if (ret)
1217 				return ret;
1218 
1219 			ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1220 			if (ret) {
1221 				f2fs_put_dnode(&dn);
1222 				return ret;
1223 			}
1224 
1225 			ilen = min((pgoff_t)
1226 				ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1227 						dn.ofs_in_node, len - i);
1228 			do {
1229 				dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1230 				f2fs_truncate_data_blocks_range(&dn, 1);
1231 
1232 				if (do_replace[i]) {
1233 					f2fs_i_blocks_write(src_inode,
1234 							1, false, false);
1235 					f2fs_i_blocks_write(dst_inode,
1236 							1, true, false);
1237 					f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1238 					blkaddr[i], ni.version, true, false);
1239 
1240 					do_replace[i] = 0;
1241 				}
1242 				dn.ofs_in_node++;
1243 				i++;
1244 				new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1245 				if (dst_inode->i_size < new_size)
1246 					f2fs_i_size_write(dst_inode, new_size);
1247 			} while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1248 
1249 			f2fs_put_dnode(&dn);
1250 		} else {
1251 			struct page *psrc, *pdst;
1252 
1253 			psrc = f2fs_get_lock_data_page(src_inode,
1254 							src + i, true);
1255 			if (IS_ERR(psrc))
1256 				return PTR_ERR(psrc);
1257 			pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1258 								true);
1259 			if (IS_ERR(pdst)) {
1260 				f2fs_put_page(psrc, 1);
1261 				return PTR_ERR(pdst);
1262 			}
1263 			f2fs_copy_page(psrc, pdst);
1264 			set_page_dirty(pdst);
1265 			f2fs_put_page(pdst, 1);
1266 			f2fs_put_page(psrc, 1);
1267 
1268 			ret = f2fs_truncate_hole(src_inode,
1269 						src + i, src + i + 1);
1270 			if (ret)
1271 				return ret;
1272 			i++;
1273 		}
1274 	}
1275 	return 0;
1276 }
1277 
__exchange_data_block(struct inode * src_inode,struct inode * dst_inode,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1278 static int __exchange_data_block(struct inode *src_inode,
1279 			struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1280 			pgoff_t len, bool full)
1281 {
1282 	block_t *src_blkaddr;
1283 	int *do_replace;
1284 	pgoff_t olen;
1285 	int ret;
1286 
1287 	while (len) {
1288 		olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1289 
1290 		src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1291 					array_size(olen, sizeof(block_t)),
1292 					GFP_NOFS);
1293 		if (!src_blkaddr)
1294 			return -ENOMEM;
1295 
1296 		do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1297 					array_size(olen, sizeof(int)),
1298 					GFP_NOFS);
1299 		if (!do_replace) {
1300 			kvfree(src_blkaddr);
1301 			return -ENOMEM;
1302 		}
1303 
1304 		ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1305 					do_replace, src, olen);
1306 		if (ret)
1307 			goto roll_back;
1308 
1309 		ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1310 					do_replace, src, dst, olen, full);
1311 		if (ret)
1312 			goto roll_back;
1313 
1314 		src += olen;
1315 		dst += olen;
1316 		len -= olen;
1317 
1318 		kvfree(src_blkaddr);
1319 		kvfree(do_replace);
1320 	}
1321 	return 0;
1322 
1323 roll_back:
1324 	__roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1325 	kvfree(src_blkaddr);
1326 	kvfree(do_replace);
1327 	return ret;
1328 }
1329 
f2fs_do_collapse(struct inode * inode,loff_t offset,loff_t len)1330 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1331 {
1332 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1333 	pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1334 	pgoff_t start = offset >> PAGE_SHIFT;
1335 	pgoff_t end = (offset + len) >> PAGE_SHIFT;
1336 	int ret;
1337 
1338 	f2fs_balance_fs(sbi, true);
1339 
1340 	/* avoid gc operation during block exchange */
1341 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1342 	down_write(&F2FS_I(inode)->i_mmap_sem);
1343 
1344 	f2fs_lock_op(sbi);
1345 	f2fs_drop_extent_tree(inode);
1346 	truncate_pagecache(inode, offset);
1347 	ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1348 	f2fs_unlock_op(sbi);
1349 
1350 	up_write(&F2FS_I(inode)->i_mmap_sem);
1351 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1352 	return ret;
1353 }
1354 
f2fs_collapse_range(struct inode * inode,loff_t offset,loff_t len)1355 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1356 {
1357 	loff_t new_size;
1358 	int ret;
1359 
1360 	if (offset + len >= i_size_read(inode))
1361 		return -EINVAL;
1362 
1363 	/* collapse range should be aligned to block size of f2fs. */
1364 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1365 		return -EINVAL;
1366 
1367 	ret = f2fs_convert_inline_inode(inode);
1368 	if (ret)
1369 		return ret;
1370 
1371 	/* write out all dirty pages from offset */
1372 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1373 	if (ret)
1374 		return ret;
1375 
1376 	ret = f2fs_do_collapse(inode, offset, len);
1377 	if (ret)
1378 		return ret;
1379 
1380 	/* write out all moved pages, if possible */
1381 	down_write(&F2FS_I(inode)->i_mmap_sem);
1382 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1383 	truncate_pagecache(inode, offset);
1384 
1385 	new_size = i_size_read(inode) - len;
1386 	ret = f2fs_truncate_blocks(inode, new_size, true);
1387 	up_write(&F2FS_I(inode)->i_mmap_sem);
1388 	if (!ret)
1389 		f2fs_i_size_write(inode, new_size);
1390 	return ret;
1391 }
1392 
f2fs_do_zero_range(struct dnode_of_data * dn,pgoff_t start,pgoff_t end)1393 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1394 								pgoff_t end)
1395 {
1396 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1397 	pgoff_t index = start;
1398 	unsigned int ofs_in_node = dn->ofs_in_node;
1399 	blkcnt_t count = 0;
1400 	int ret;
1401 
1402 	for (; index < end; index++, dn->ofs_in_node++) {
1403 		if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1404 			count++;
1405 	}
1406 
1407 	dn->ofs_in_node = ofs_in_node;
1408 	ret = f2fs_reserve_new_blocks(dn, count);
1409 	if (ret)
1410 		return ret;
1411 
1412 	dn->ofs_in_node = ofs_in_node;
1413 	for (index = start; index < end; index++, dn->ofs_in_node++) {
1414 		dn->data_blkaddr = f2fs_data_blkaddr(dn);
1415 		/*
1416 		 * f2fs_reserve_new_blocks will not guarantee entire block
1417 		 * allocation.
1418 		 */
1419 		if (dn->data_blkaddr == NULL_ADDR) {
1420 			ret = -ENOSPC;
1421 			break;
1422 		}
1423 
1424 		if (dn->data_blkaddr == NEW_ADDR)
1425 			continue;
1426 
1427 		if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1428 					DATA_GENERIC_ENHANCE)) {
1429 			ret = -EFSCORRUPTED;
1430 			break;
1431 		}
1432 
1433 		f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1434 		dn->data_blkaddr = NEW_ADDR;
1435 		f2fs_set_data_blkaddr(dn);
1436 	}
1437 
1438 	f2fs_update_extent_cache_range(dn, start, 0, index - start);
1439 
1440 	return ret;
1441 }
1442 
f2fs_zero_range(struct inode * inode,loff_t offset,loff_t len,int mode)1443 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1444 								int mode)
1445 {
1446 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1447 	struct address_space *mapping = inode->i_mapping;
1448 	pgoff_t index, pg_start, pg_end;
1449 	loff_t new_size = i_size_read(inode);
1450 	loff_t off_start, off_end;
1451 	int ret = 0;
1452 
1453 	ret = inode_newsize_ok(inode, (len + offset));
1454 	if (ret)
1455 		return ret;
1456 
1457 	ret = f2fs_convert_inline_inode(inode);
1458 	if (ret)
1459 		return ret;
1460 
1461 	ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1462 	if (ret)
1463 		return ret;
1464 
1465 	pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1466 	pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1467 
1468 	off_start = offset & (PAGE_SIZE - 1);
1469 	off_end = (offset + len) & (PAGE_SIZE - 1);
1470 
1471 	if (pg_start == pg_end) {
1472 		ret = fill_zero(inode, pg_start, off_start,
1473 						off_end - off_start);
1474 		if (ret)
1475 			return ret;
1476 
1477 		new_size = max_t(loff_t, new_size, offset + len);
1478 	} else {
1479 		if (off_start) {
1480 			ret = fill_zero(inode, pg_start++, off_start,
1481 						PAGE_SIZE - off_start);
1482 			if (ret)
1483 				return ret;
1484 
1485 			new_size = max_t(loff_t, new_size,
1486 					(loff_t)pg_start << PAGE_SHIFT);
1487 		}
1488 
1489 		for (index = pg_start; index < pg_end;) {
1490 			struct dnode_of_data dn;
1491 			unsigned int end_offset;
1492 			pgoff_t end;
1493 
1494 			down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1495 			down_write(&F2FS_I(inode)->i_mmap_sem);
1496 
1497 			truncate_pagecache_range(inode,
1498 				(loff_t)index << PAGE_SHIFT,
1499 				((loff_t)pg_end << PAGE_SHIFT) - 1);
1500 
1501 			f2fs_lock_op(sbi);
1502 
1503 			set_new_dnode(&dn, inode, NULL, NULL, 0);
1504 			ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1505 			if (ret) {
1506 				f2fs_unlock_op(sbi);
1507 				up_write(&F2FS_I(inode)->i_mmap_sem);
1508 				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1509 				goto out;
1510 			}
1511 
1512 			end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1513 			end = min(pg_end, end_offset - dn.ofs_in_node + index);
1514 
1515 			ret = f2fs_do_zero_range(&dn, index, end);
1516 			f2fs_put_dnode(&dn);
1517 
1518 			f2fs_unlock_op(sbi);
1519 			up_write(&F2FS_I(inode)->i_mmap_sem);
1520 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1521 
1522 			f2fs_balance_fs(sbi, dn.node_changed);
1523 
1524 			if (ret)
1525 				goto out;
1526 
1527 			index = end;
1528 			new_size = max_t(loff_t, new_size,
1529 					(loff_t)index << PAGE_SHIFT);
1530 		}
1531 
1532 		if (off_end) {
1533 			ret = fill_zero(inode, pg_end, 0, off_end);
1534 			if (ret)
1535 				goto out;
1536 
1537 			new_size = max_t(loff_t, new_size, offset + len);
1538 		}
1539 	}
1540 
1541 out:
1542 	if (new_size > i_size_read(inode)) {
1543 		if (mode & FALLOC_FL_KEEP_SIZE)
1544 			file_set_keep_isize(inode);
1545 		else
1546 			f2fs_i_size_write(inode, new_size);
1547 	}
1548 	return ret;
1549 }
1550 
f2fs_insert_range(struct inode * inode,loff_t offset,loff_t len)1551 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1552 {
1553 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1554 	pgoff_t nr, pg_start, pg_end, delta, idx;
1555 	loff_t new_size;
1556 	int ret = 0;
1557 
1558 	new_size = i_size_read(inode) + len;
1559 	ret = inode_newsize_ok(inode, new_size);
1560 	if (ret)
1561 		return ret;
1562 
1563 	if (offset >= i_size_read(inode))
1564 		return -EINVAL;
1565 
1566 	/* insert range should be aligned to block size of f2fs. */
1567 	if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1568 		return -EINVAL;
1569 
1570 	ret = f2fs_convert_inline_inode(inode);
1571 	if (ret)
1572 		return ret;
1573 
1574 	f2fs_balance_fs(sbi, true);
1575 
1576 	down_write(&F2FS_I(inode)->i_mmap_sem);
1577 	ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1578 	up_write(&F2FS_I(inode)->i_mmap_sem);
1579 	if (ret)
1580 		return ret;
1581 
1582 	/* write out all dirty pages from offset */
1583 	ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1584 	if (ret)
1585 		return ret;
1586 
1587 	pg_start = offset >> PAGE_SHIFT;
1588 	pg_end = (offset + len) >> PAGE_SHIFT;
1589 	delta = pg_end - pg_start;
1590 	idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1591 
1592 	/* avoid gc operation during block exchange */
1593 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1594 	down_write(&F2FS_I(inode)->i_mmap_sem);
1595 	truncate_pagecache(inode, offset);
1596 
1597 	while (!ret && idx > pg_start) {
1598 		nr = idx - pg_start;
1599 		if (nr > delta)
1600 			nr = delta;
1601 		idx -= nr;
1602 
1603 		f2fs_lock_op(sbi);
1604 		f2fs_drop_extent_tree(inode);
1605 
1606 		ret = __exchange_data_block(inode, inode, idx,
1607 					idx + delta, nr, false);
1608 		f2fs_unlock_op(sbi);
1609 	}
1610 	up_write(&F2FS_I(inode)->i_mmap_sem);
1611 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1612 
1613 	/* write out all moved pages, if possible */
1614 	down_write(&F2FS_I(inode)->i_mmap_sem);
1615 	filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1616 	truncate_pagecache(inode, offset);
1617 	up_write(&F2FS_I(inode)->i_mmap_sem);
1618 
1619 	if (!ret)
1620 		f2fs_i_size_write(inode, new_size);
1621 	return ret;
1622 }
1623 
expand_inode_data(struct inode * inode,loff_t offset,loff_t len,int mode)1624 static int expand_inode_data(struct inode *inode, loff_t offset,
1625 					loff_t len, int mode)
1626 {
1627 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1628 	struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1629 			.m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1630 			.m_may_create = true };
1631 	pgoff_t pg_start, pg_end;
1632 	loff_t new_size = i_size_read(inode);
1633 	loff_t off_end;
1634 	block_t expanded = 0;
1635 	int err;
1636 
1637 	err = inode_newsize_ok(inode, (len + offset));
1638 	if (err)
1639 		return err;
1640 
1641 	err = f2fs_convert_inline_inode(inode);
1642 	if (err)
1643 		return err;
1644 
1645 	f2fs_balance_fs(sbi, true);
1646 
1647 	pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1648 	pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1649 	off_end = (offset + len) & (PAGE_SIZE - 1);
1650 
1651 	map.m_lblk = pg_start;
1652 	map.m_len = pg_end - pg_start;
1653 	if (off_end)
1654 		map.m_len++;
1655 
1656 	if (!map.m_len)
1657 		return 0;
1658 
1659 	if (f2fs_is_pinned_file(inode)) {
1660 		block_t sec_blks = BLKS_PER_SEC(sbi);
1661 		block_t sec_len = roundup(map.m_len, sec_blks);
1662 
1663 		map.m_len = sec_blks;
1664 next_alloc:
1665 		if (has_not_enough_free_secs(sbi, 0,
1666 			GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1667 			down_write(&sbi->gc_lock);
1668 			err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1669 			if (err && err != -ENODATA && err != -EAGAIN)
1670 				goto out_err;
1671 		}
1672 
1673 		down_write(&sbi->pin_sem);
1674 
1675 		f2fs_lock_op(sbi);
1676 		f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED);
1677 		f2fs_unlock_op(sbi);
1678 
1679 		map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1680 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1681 
1682 		up_write(&sbi->pin_sem);
1683 
1684 		expanded += map.m_len;
1685 		sec_len -= map.m_len;
1686 		map.m_lblk += map.m_len;
1687 		if (!err && sec_len)
1688 			goto next_alloc;
1689 
1690 		map.m_len = expanded;
1691 	} else {
1692 		err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1693 		expanded = map.m_len;
1694 	}
1695 out_err:
1696 	if (err) {
1697 		pgoff_t last_off;
1698 
1699 		if (!expanded)
1700 			return err;
1701 
1702 		last_off = pg_start + expanded - 1;
1703 
1704 		/* update new size to the failed position */
1705 		new_size = (last_off == pg_end) ? offset + len :
1706 					(loff_t)(last_off + 1) << PAGE_SHIFT;
1707 	} else {
1708 		new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1709 	}
1710 
1711 	if (new_size > i_size_read(inode)) {
1712 		if (mode & FALLOC_FL_KEEP_SIZE)
1713 			file_set_keep_isize(inode);
1714 		else
1715 			f2fs_i_size_write(inode, new_size);
1716 	}
1717 
1718 	return err;
1719 }
1720 
f2fs_fallocate(struct file * file,int mode,loff_t offset,loff_t len)1721 static long f2fs_fallocate(struct file *file, int mode,
1722 				loff_t offset, loff_t len)
1723 {
1724 	struct inode *inode = file_inode(file);
1725 	long ret = 0;
1726 
1727 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1728 		return -EIO;
1729 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1730 		return -ENOSPC;
1731 	if (!f2fs_is_compress_backend_ready(inode))
1732 		return -EOPNOTSUPP;
1733 
1734 	/* f2fs only support ->fallocate for regular file */
1735 	if (!S_ISREG(inode->i_mode))
1736 		return -EINVAL;
1737 
1738 	if (IS_ENCRYPTED(inode) &&
1739 		(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1740 		return -EOPNOTSUPP;
1741 
1742 	if (f2fs_compressed_file(inode) &&
1743 		(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1744 			FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1745 		return -EOPNOTSUPP;
1746 
1747 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1748 			FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1749 			FALLOC_FL_INSERT_RANGE))
1750 		return -EOPNOTSUPP;
1751 
1752 	inode_lock(inode);
1753 
1754 	ret = file_modified(file);
1755 	if (ret)
1756 		goto out;
1757 
1758 	/*
1759 	 * wait for inflight dio, blocks should be removed after IO
1760 	 * completion.
1761 	 */
1762 	inode_dio_wait(inode);
1763 
1764 	if (mode & FALLOC_FL_PUNCH_HOLE) {
1765 		if (offset >= inode->i_size)
1766 			goto out;
1767 
1768 		ret = punch_hole(inode, offset, len);
1769 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1770 		ret = f2fs_collapse_range(inode, offset, len);
1771 	} else if (mode & FALLOC_FL_ZERO_RANGE) {
1772 		ret = f2fs_zero_range(inode, offset, len, mode);
1773 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
1774 		ret = f2fs_insert_range(inode, offset, len);
1775 	} else {
1776 		ret = expand_inode_data(inode, offset, len, mode);
1777 	}
1778 
1779 	if (!ret) {
1780 		inode->i_mtime = inode->i_ctime = current_time(inode);
1781 		f2fs_mark_inode_dirty_sync(inode, false);
1782 		f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1783 	}
1784 
1785 out:
1786 	inode_unlock(inode);
1787 
1788 	trace_f2fs_fallocate(inode, mode, offset, len, ret);
1789 	return ret;
1790 }
1791 
f2fs_release_file(struct inode * inode,struct file * filp)1792 static int f2fs_release_file(struct inode *inode, struct file *filp)
1793 {
1794 	/*
1795 	 * f2fs_relase_file is called at every close calls. So we should
1796 	 * not drop any inmemory pages by close called by other process.
1797 	 */
1798 	if (!(filp->f_mode & FMODE_WRITE) ||
1799 			atomic_read(&inode->i_writecount) != 1)
1800 		return 0;
1801 
1802 	/* some remained atomic pages should discarded */
1803 	if (f2fs_is_atomic_file(inode))
1804 		f2fs_drop_inmem_pages(inode);
1805 	if (f2fs_is_volatile_file(inode)) {
1806 		set_inode_flag(inode, FI_DROP_CACHE);
1807 		filemap_fdatawrite(inode->i_mapping);
1808 		clear_inode_flag(inode, FI_DROP_CACHE);
1809 		clear_inode_flag(inode, FI_VOLATILE_FILE);
1810 		stat_dec_volatile_write(inode);
1811 	}
1812 	return 0;
1813 }
1814 
f2fs_file_flush(struct file * file,fl_owner_t id)1815 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1816 {
1817 	struct inode *inode = file_inode(file);
1818 
1819 	/*
1820 	 * If the process doing a transaction is crashed, we should do
1821 	 * roll-back. Otherwise, other reader/write can see corrupted database
1822 	 * until all the writers close its file. Since this should be done
1823 	 * before dropping file lock, it needs to do in ->flush.
1824 	 */
1825 	if (f2fs_is_atomic_file(inode) &&
1826 			F2FS_I(inode)->inmem_task == current)
1827 		f2fs_drop_inmem_pages(inode);
1828 	return 0;
1829 }
1830 
f2fs_setflags_common(struct inode * inode,u32 iflags,u32 mask)1831 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1832 {
1833 	struct f2fs_inode_info *fi = F2FS_I(inode);
1834 	u32 masked_flags = fi->i_flags & mask;
1835 
1836 	f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
1837 
1838 	/* Is it quota file? Do not allow user to mess with it */
1839 	if (IS_NOQUOTA(inode))
1840 		return -EPERM;
1841 
1842 	if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1843 		if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1844 			return -EOPNOTSUPP;
1845 		if (!f2fs_empty_dir(inode))
1846 			return -ENOTEMPTY;
1847 	}
1848 
1849 	if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1850 		if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1851 			return -EOPNOTSUPP;
1852 		if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1853 			return -EINVAL;
1854 	}
1855 
1856 	if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1857 		if (masked_flags & F2FS_COMPR_FL) {
1858 			if (!f2fs_disable_compressed_file(inode))
1859 				return -EINVAL;
1860 		} else {
1861 			if (!f2fs_may_compress(inode))
1862 				return -EINVAL;
1863 			if (S_ISREG(inode->i_mode) && inode->i_size)
1864 				return -EINVAL;
1865 
1866 			set_compress_context(inode);
1867 		}
1868 	}
1869 
1870 	fi->i_flags = iflags | (fi->i_flags & ~mask);
1871 	f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1872 					(fi->i_flags & F2FS_NOCOMP_FL));
1873 
1874 	if (fi->i_flags & F2FS_PROJINHERIT_FL)
1875 		set_inode_flag(inode, FI_PROJ_INHERIT);
1876 	else
1877 		clear_inode_flag(inode, FI_PROJ_INHERIT);
1878 
1879 	inode->i_ctime = current_time(inode);
1880 	f2fs_set_inode_flags(inode);
1881 	f2fs_mark_inode_dirty_sync(inode, true);
1882 	return 0;
1883 }
1884 
1885 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1886 
1887 /*
1888  * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1889  * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1890  * F2FS_GETTABLE_FS_FL.  To also make it settable via FS_IOC_SETFLAGS, also add
1891  * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1892  */
1893 
1894 static const struct {
1895 	u32 iflag;
1896 	u32 fsflag;
1897 } f2fs_fsflags_map[] = {
1898 	{ F2FS_COMPR_FL,	FS_COMPR_FL },
1899 	{ F2FS_SYNC_FL,		FS_SYNC_FL },
1900 	{ F2FS_IMMUTABLE_FL,	FS_IMMUTABLE_FL },
1901 	{ F2FS_APPEND_FL,	FS_APPEND_FL },
1902 	{ F2FS_NODUMP_FL,	FS_NODUMP_FL },
1903 	{ F2FS_NOATIME_FL,	FS_NOATIME_FL },
1904 	{ F2FS_NOCOMP_FL,	FS_NOCOMP_FL },
1905 	{ F2FS_INDEX_FL,	FS_INDEX_FL },
1906 	{ F2FS_DIRSYNC_FL,	FS_DIRSYNC_FL },
1907 	{ F2FS_PROJINHERIT_FL,	FS_PROJINHERIT_FL },
1908 	{ F2FS_CASEFOLD_FL,	FS_CASEFOLD_FL },
1909 };
1910 
1911 #define F2FS_GETTABLE_FS_FL (		\
1912 		FS_COMPR_FL |		\
1913 		FS_SYNC_FL |		\
1914 		FS_IMMUTABLE_FL |	\
1915 		FS_APPEND_FL |		\
1916 		FS_NODUMP_FL |		\
1917 		FS_NOATIME_FL |		\
1918 		FS_NOCOMP_FL |		\
1919 		FS_INDEX_FL |		\
1920 		FS_DIRSYNC_FL |		\
1921 		FS_PROJINHERIT_FL |	\
1922 		FS_ENCRYPT_FL |		\
1923 		FS_INLINE_DATA_FL |	\
1924 		FS_NOCOW_FL |		\
1925 		FS_VERITY_FL |		\
1926 		FS_CASEFOLD_FL)
1927 
1928 #define F2FS_SETTABLE_FS_FL (		\
1929 		FS_COMPR_FL |		\
1930 		FS_SYNC_FL |		\
1931 		FS_IMMUTABLE_FL |	\
1932 		FS_APPEND_FL |		\
1933 		FS_NODUMP_FL |		\
1934 		FS_NOATIME_FL |		\
1935 		FS_NOCOMP_FL |		\
1936 		FS_DIRSYNC_FL |		\
1937 		FS_PROJINHERIT_FL |	\
1938 		FS_CASEFOLD_FL)
1939 
1940 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
f2fs_iflags_to_fsflags(u32 iflags)1941 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1942 {
1943 	u32 fsflags = 0;
1944 	int i;
1945 
1946 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1947 		if (iflags & f2fs_fsflags_map[i].iflag)
1948 			fsflags |= f2fs_fsflags_map[i].fsflag;
1949 
1950 	return fsflags;
1951 }
1952 
1953 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
f2fs_fsflags_to_iflags(u32 fsflags)1954 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1955 {
1956 	u32 iflags = 0;
1957 	int i;
1958 
1959 	for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1960 		if (fsflags & f2fs_fsflags_map[i].fsflag)
1961 			iflags |= f2fs_fsflags_map[i].iflag;
1962 
1963 	return iflags;
1964 }
1965 
f2fs_ioc_getflags(struct file * filp,unsigned long arg)1966 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1967 {
1968 	struct inode *inode = file_inode(filp);
1969 	struct f2fs_inode_info *fi = F2FS_I(inode);
1970 	u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1971 
1972 	if (IS_ENCRYPTED(inode))
1973 		fsflags |= FS_ENCRYPT_FL;
1974 	if (IS_VERITY(inode))
1975 		fsflags |= FS_VERITY_FL;
1976 	if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1977 		fsflags |= FS_INLINE_DATA_FL;
1978 	if (is_inode_flag_set(inode, FI_PIN_FILE))
1979 		fsflags |= FS_NOCOW_FL;
1980 
1981 	fsflags &= F2FS_GETTABLE_FS_FL;
1982 
1983 	return put_user(fsflags, (int __user *)arg);
1984 }
1985 
f2fs_ioc_setflags(struct file * filp,unsigned long arg)1986 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1987 {
1988 	struct inode *inode = file_inode(filp);
1989 	struct f2fs_inode_info *fi = F2FS_I(inode);
1990 	u32 fsflags, old_fsflags;
1991 	u32 iflags;
1992 	int ret;
1993 
1994 	if (!inode_owner_or_capable(inode))
1995 		return -EACCES;
1996 
1997 	if (get_user(fsflags, (int __user *)arg))
1998 		return -EFAULT;
1999 
2000 	if (fsflags & ~F2FS_GETTABLE_FS_FL)
2001 		return -EOPNOTSUPP;
2002 	fsflags &= F2FS_SETTABLE_FS_FL;
2003 
2004 	iflags = f2fs_fsflags_to_iflags(fsflags);
2005 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
2006 		return -EOPNOTSUPP;
2007 
2008 	ret = mnt_want_write_file(filp);
2009 	if (ret)
2010 		return ret;
2011 
2012 	inode_lock(inode);
2013 
2014 	old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2015 	ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2016 	if (ret)
2017 		goto out;
2018 
2019 	ret = f2fs_setflags_common(inode, iflags,
2020 			f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2021 out:
2022 	inode_unlock(inode);
2023 	mnt_drop_write_file(filp);
2024 	return ret;
2025 }
2026 
f2fs_ioc_getversion(struct file * filp,unsigned long arg)2027 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2028 {
2029 	struct inode *inode = file_inode(filp);
2030 
2031 	return put_user(inode->i_generation, (int __user *)arg);
2032 }
2033 
f2fs_ioc_start_atomic_write(struct file * filp)2034 static int f2fs_ioc_start_atomic_write(struct file *filp)
2035 {
2036 	struct inode *inode = file_inode(filp);
2037 	struct f2fs_inode_info *fi = F2FS_I(inode);
2038 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2039 	int ret;
2040 
2041 	if (!(filp->f_mode & FMODE_WRITE))
2042 		return -EBADF;
2043 
2044 	if (!inode_owner_or_capable(inode))
2045 		return -EACCES;
2046 
2047 	if (!S_ISREG(inode->i_mode))
2048 		return -EINVAL;
2049 
2050 	if (filp->f_flags & O_DIRECT)
2051 		return -EINVAL;
2052 
2053 	ret = mnt_want_write_file(filp);
2054 	if (ret)
2055 		return ret;
2056 
2057 	inode_lock(inode);
2058 
2059 	if (!f2fs_disable_compressed_file(inode)) {
2060 		ret = -EINVAL;
2061 		goto out;
2062 	}
2063 
2064 	if (f2fs_is_atomic_file(inode)) {
2065 		if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2066 			ret = -EINVAL;
2067 		goto out;
2068 	}
2069 
2070 	ret = f2fs_convert_inline_inode(inode);
2071 	if (ret)
2072 		goto out;
2073 
2074 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2075 
2076 	/*
2077 	 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2078 	 * f2fs_is_atomic_file.
2079 	 */
2080 	if (get_dirty_pages(inode))
2081 		f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2082 			  inode->i_ino, get_dirty_pages(inode));
2083 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2084 	if (ret) {
2085 		up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2086 		goto out;
2087 	}
2088 
2089 	spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2090 	if (list_empty(&fi->inmem_ilist))
2091 		list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2092 	sbi->atomic_files++;
2093 	spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2094 
2095 	/* add inode in inmem_list first and set atomic_file */
2096 	set_inode_flag(inode, FI_ATOMIC_FILE);
2097 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2098 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2099 
2100 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2101 	F2FS_I(inode)->inmem_task = current;
2102 	stat_update_max_atomic_write(inode);
2103 out:
2104 	inode_unlock(inode);
2105 	mnt_drop_write_file(filp);
2106 	return ret;
2107 }
2108 
f2fs_ioc_commit_atomic_write(struct file * filp)2109 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2110 {
2111 	struct inode *inode = file_inode(filp);
2112 	int ret;
2113 
2114 	if (!(filp->f_mode & FMODE_WRITE))
2115 		return -EBADF;
2116 
2117 	if (!inode_owner_or_capable(inode))
2118 		return -EACCES;
2119 
2120 	ret = mnt_want_write_file(filp);
2121 	if (ret)
2122 		return ret;
2123 
2124 	f2fs_balance_fs(F2FS_I_SB(inode), true);
2125 
2126 	inode_lock(inode);
2127 
2128 	if (f2fs_is_volatile_file(inode)) {
2129 		ret = -EINVAL;
2130 		goto err_out;
2131 	}
2132 
2133 	if (f2fs_is_atomic_file(inode)) {
2134 		ret = f2fs_commit_inmem_pages(inode);
2135 		if (ret)
2136 			goto err_out;
2137 
2138 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2139 		if (!ret)
2140 			f2fs_drop_inmem_pages(inode);
2141 	} else {
2142 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2143 	}
2144 err_out:
2145 	if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2146 		clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2147 		ret = -EINVAL;
2148 	}
2149 	inode_unlock(inode);
2150 	mnt_drop_write_file(filp);
2151 	return ret;
2152 }
2153 
f2fs_ioc_start_volatile_write(struct file * filp)2154 static int f2fs_ioc_start_volatile_write(struct file *filp)
2155 {
2156 	struct inode *inode = file_inode(filp);
2157 	int ret;
2158 
2159 	if (!(filp->f_mode & FMODE_WRITE))
2160 		return -EBADF;
2161 
2162 	if (!inode_owner_or_capable(inode))
2163 		return -EACCES;
2164 
2165 	if (!S_ISREG(inode->i_mode))
2166 		return -EINVAL;
2167 
2168 	ret = mnt_want_write_file(filp);
2169 	if (ret)
2170 		return ret;
2171 
2172 	inode_lock(inode);
2173 
2174 	if (f2fs_is_volatile_file(inode))
2175 		goto out;
2176 
2177 	ret = f2fs_convert_inline_inode(inode);
2178 	if (ret)
2179 		goto out;
2180 
2181 	stat_inc_volatile_write(inode);
2182 	stat_update_max_volatile_write(inode);
2183 
2184 	set_inode_flag(inode, FI_VOLATILE_FILE);
2185 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2186 out:
2187 	inode_unlock(inode);
2188 	mnt_drop_write_file(filp);
2189 	return ret;
2190 }
2191 
f2fs_ioc_release_volatile_write(struct file * filp)2192 static int f2fs_ioc_release_volatile_write(struct file *filp)
2193 {
2194 	struct inode *inode = file_inode(filp);
2195 	int ret;
2196 
2197 	if (!(filp->f_mode & FMODE_WRITE))
2198 		return -EBADF;
2199 
2200 	if (!inode_owner_or_capable(inode))
2201 		return -EACCES;
2202 
2203 	ret = mnt_want_write_file(filp);
2204 	if (ret)
2205 		return ret;
2206 
2207 	inode_lock(inode);
2208 
2209 	if (!f2fs_is_volatile_file(inode))
2210 		goto out;
2211 
2212 	if (!f2fs_is_first_block_written(inode)) {
2213 		ret = truncate_partial_data_page(inode, 0, true);
2214 		goto out;
2215 	}
2216 
2217 	ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2218 out:
2219 	inode_unlock(inode);
2220 	mnt_drop_write_file(filp);
2221 	return ret;
2222 }
2223 
f2fs_ioc_abort_volatile_write(struct file * filp)2224 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2225 {
2226 	struct inode *inode = file_inode(filp);
2227 	int ret;
2228 
2229 	if (!(filp->f_mode & FMODE_WRITE))
2230 		return -EBADF;
2231 
2232 	if (!inode_owner_or_capable(inode))
2233 		return -EACCES;
2234 
2235 	ret = mnt_want_write_file(filp);
2236 	if (ret)
2237 		return ret;
2238 
2239 	inode_lock(inode);
2240 
2241 	if (f2fs_is_atomic_file(inode))
2242 		f2fs_drop_inmem_pages(inode);
2243 	if (f2fs_is_volatile_file(inode)) {
2244 		clear_inode_flag(inode, FI_VOLATILE_FILE);
2245 		stat_dec_volatile_write(inode);
2246 		ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2247 	}
2248 
2249 	clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2250 
2251 	inode_unlock(inode);
2252 
2253 	mnt_drop_write_file(filp);
2254 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2255 	return ret;
2256 }
2257 
f2fs_ioc_shutdown(struct file * filp,unsigned long arg)2258 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2259 {
2260 	struct inode *inode = file_inode(filp);
2261 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2262 	struct super_block *sb = sbi->sb;
2263 	__u32 in;
2264 	int ret = 0;
2265 
2266 	if (!capable(CAP_SYS_ADMIN))
2267 		return -EPERM;
2268 
2269 	if (get_user(in, (__u32 __user *)arg))
2270 		return -EFAULT;
2271 
2272 	if (in != F2FS_GOING_DOWN_FULLSYNC) {
2273 		ret = mnt_want_write_file(filp);
2274 		if (ret) {
2275 			if (ret == -EROFS) {
2276 				ret = 0;
2277 				f2fs_stop_checkpoint(sbi, false);
2278 				set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2279 				trace_f2fs_shutdown(sbi, in, ret);
2280 			}
2281 			return ret;
2282 		}
2283 	}
2284 
2285 	switch (in) {
2286 	case F2FS_GOING_DOWN_FULLSYNC:
2287 		sb = freeze_bdev(sb->s_bdev);
2288 		if (IS_ERR(sb)) {
2289 			ret = PTR_ERR(sb);
2290 			goto out;
2291 		}
2292 		if (sb) {
2293 			f2fs_stop_checkpoint(sbi, false);
2294 			set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2295 			thaw_bdev(sb->s_bdev, sb);
2296 		}
2297 		break;
2298 	case F2FS_GOING_DOWN_METASYNC:
2299 		/* do checkpoint only */
2300 		ret = f2fs_sync_fs(sb, 1);
2301 		if (ret)
2302 			goto out;
2303 		f2fs_stop_checkpoint(sbi, false);
2304 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2305 		break;
2306 	case F2FS_GOING_DOWN_NOSYNC:
2307 		f2fs_stop_checkpoint(sbi, false);
2308 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2309 		break;
2310 	case F2FS_GOING_DOWN_METAFLUSH:
2311 		f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2312 		f2fs_stop_checkpoint(sbi, false);
2313 		set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2314 		break;
2315 	case F2FS_GOING_DOWN_NEED_FSCK:
2316 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2317 		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2318 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2319 		/* do checkpoint only */
2320 		ret = f2fs_sync_fs(sb, 1);
2321 		goto out;
2322 	default:
2323 		ret = -EINVAL;
2324 		goto out;
2325 	}
2326 
2327 	/*
2328 	 * grab sb->s_umount to avoid racing w/ remount() and other shutdown
2329 	 * paths.
2330 	 */
2331 	down_write(&sbi->sb->s_umount);
2332 
2333 	f2fs_stop_gc_thread(sbi);
2334 	f2fs_stop_discard_thread(sbi);
2335 
2336 	f2fs_drop_discard_cmd(sbi);
2337 	clear_opt(sbi, DISCARD);
2338 
2339 	up_write(&sbi->sb->s_umount);
2340 
2341 	f2fs_update_time(sbi, REQ_TIME);
2342 out:
2343 	if (in != F2FS_GOING_DOWN_FULLSYNC)
2344 		mnt_drop_write_file(filp);
2345 
2346 	trace_f2fs_shutdown(sbi, in, ret);
2347 
2348 	return ret;
2349 }
2350 
f2fs_ioc_fitrim(struct file * filp,unsigned long arg)2351 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2352 {
2353 	struct inode *inode = file_inode(filp);
2354 	struct super_block *sb = inode->i_sb;
2355 	struct request_queue *q = bdev_get_queue(sb->s_bdev);
2356 	struct fstrim_range range;
2357 	int ret;
2358 
2359 	if (!capable(CAP_SYS_ADMIN))
2360 		return -EPERM;
2361 
2362 	if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2363 		return -EOPNOTSUPP;
2364 
2365 	if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2366 				sizeof(range)))
2367 		return -EFAULT;
2368 
2369 	ret = mnt_want_write_file(filp);
2370 	if (ret)
2371 		return ret;
2372 
2373 	range.minlen = max((unsigned int)range.minlen,
2374 				q->limits.discard_granularity);
2375 	ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2376 	mnt_drop_write_file(filp);
2377 	if (ret < 0)
2378 		return ret;
2379 
2380 	if (copy_to_user((struct fstrim_range __user *)arg, &range,
2381 				sizeof(range)))
2382 		return -EFAULT;
2383 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2384 	return 0;
2385 }
2386 
uuid_is_nonzero(__u8 u[16])2387 static bool uuid_is_nonzero(__u8 u[16])
2388 {
2389 	int i;
2390 
2391 	for (i = 0; i < 16; i++)
2392 		if (u[i])
2393 			return true;
2394 	return false;
2395 }
2396 
f2fs_ioc_set_encryption_policy(struct file * filp,unsigned long arg)2397 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2398 {
2399 	struct inode *inode = file_inode(filp);
2400 
2401 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2402 		return -EOPNOTSUPP;
2403 
2404 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2405 
2406 	return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2407 }
2408 
f2fs_ioc_get_encryption_policy(struct file * filp,unsigned long arg)2409 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2410 {
2411 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2412 		return -EOPNOTSUPP;
2413 	return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2414 }
2415 
f2fs_ioc_get_encryption_pwsalt(struct file * filp,unsigned long arg)2416 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2417 {
2418 	struct inode *inode = file_inode(filp);
2419 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2420 	int err;
2421 
2422 	if (!f2fs_sb_has_encrypt(sbi))
2423 		return -EOPNOTSUPP;
2424 
2425 	err = mnt_want_write_file(filp);
2426 	if (err)
2427 		return err;
2428 
2429 	down_write(&sbi->sb_lock);
2430 
2431 	if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2432 		goto got_it;
2433 
2434 	/* update superblock with uuid */
2435 	generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2436 
2437 	err = f2fs_commit_super(sbi, false);
2438 	if (err) {
2439 		/* undo new data */
2440 		memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2441 		goto out_err;
2442 	}
2443 got_it:
2444 	if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2445 									16))
2446 		err = -EFAULT;
2447 out_err:
2448 	up_write(&sbi->sb_lock);
2449 	mnt_drop_write_file(filp);
2450 	return err;
2451 }
2452 
f2fs_ioc_get_encryption_policy_ex(struct file * filp,unsigned long arg)2453 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2454 					     unsigned long arg)
2455 {
2456 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2457 		return -EOPNOTSUPP;
2458 
2459 	return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2460 }
2461 
f2fs_ioc_add_encryption_key(struct file * filp,unsigned long arg)2462 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2463 {
2464 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2465 		return -EOPNOTSUPP;
2466 
2467 	return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2468 }
2469 
f2fs_ioc_remove_encryption_key(struct file * filp,unsigned long arg)2470 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2471 {
2472 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2473 		return -EOPNOTSUPP;
2474 
2475 	return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2476 }
2477 
f2fs_ioc_remove_encryption_key_all_users(struct file * filp,unsigned long arg)2478 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2479 						    unsigned long arg)
2480 {
2481 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2482 		return -EOPNOTSUPP;
2483 
2484 	return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2485 }
2486 
f2fs_ioc_get_encryption_key_status(struct file * filp,unsigned long arg)2487 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2488 					      unsigned long arg)
2489 {
2490 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2491 		return -EOPNOTSUPP;
2492 
2493 	return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2494 }
2495 
f2fs_ioc_get_encryption_nonce(struct file * filp,unsigned long arg)2496 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2497 {
2498 	if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2499 		return -EOPNOTSUPP;
2500 
2501 	return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2502 }
2503 
f2fs_ioc_gc(struct file * filp,unsigned long arg)2504 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2505 {
2506 	struct inode *inode = file_inode(filp);
2507 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2508 	__u32 sync;
2509 	int ret;
2510 
2511 	if (!capable(CAP_SYS_ADMIN))
2512 		return -EPERM;
2513 
2514 	if (get_user(sync, (__u32 __user *)arg))
2515 		return -EFAULT;
2516 
2517 	if (f2fs_readonly(sbi->sb))
2518 		return -EROFS;
2519 
2520 	ret = mnt_want_write_file(filp);
2521 	if (ret)
2522 		return ret;
2523 
2524 	if (!sync) {
2525 		if (!down_write_trylock(&sbi->gc_lock)) {
2526 			ret = -EBUSY;
2527 			goto out;
2528 		}
2529 	} else {
2530 		down_write(&sbi->gc_lock);
2531 	}
2532 
2533 	ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2534 out:
2535 	mnt_drop_write_file(filp);
2536 	return ret;
2537 }
2538 
__f2fs_ioc_gc_range(struct file * filp,struct f2fs_gc_range * range)2539 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2540 {
2541 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2542 	u64 end;
2543 	int ret;
2544 
2545 	if (!capable(CAP_SYS_ADMIN))
2546 		return -EPERM;
2547 	if (f2fs_readonly(sbi->sb))
2548 		return -EROFS;
2549 
2550 	end = range->start + range->len;
2551 	if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2552 					end >= MAX_BLKADDR(sbi))
2553 		return -EINVAL;
2554 
2555 	ret = mnt_want_write_file(filp);
2556 	if (ret)
2557 		return ret;
2558 
2559 do_more:
2560 	if (!range->sync) {
2561 		if (!down_write_trylock(&sbi->gc_lock)) {
2562 			ret = -EBUSY;
2563 			goto out;
2564 		}
2565 	} else {
2566 		down_write(&sbi->gc_lock);
2567 	}
2568 
2569 	ret = f2fs_gc(sbi, range->sync, true, false,
2570 				GET_SEGNO(sbi, range->start));
2571 	if (ret) {
2572 		if (ret == -EBUSY)
2573 			ret = -EAGAIN;
2574 		goto out;
2575 	}
2576 	range->start += BLKS_PER_SEC(sbi);
2577 	if (range->start <= end)
2578 		goto do_more;
2579 out:
2580 	mnt_drop_write_file(filp);
2581 	return ret;
2582 }
2583 
f2fs_ioc_gc_range(struct file * filp,unsigned long arg)2584 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2585 {
2586 	struct f2fs_gc_range range;
2587 
2588 	if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2589 							sizeof(range)))
2590 		return -EFAULT;
2591 	return __f2fs_ioc_gc_range(filp, &range);
2592 }
2593 
f2fs_ioc_write_checkpoint(struct file * filp,unsigned long arg)2594 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2595 {
2596 	struct inode *inode = file_inode(filp);
2597 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2598 	int ret;
2599 
2600 	if (!capable(CAP_SYS_ADMIN))
2601 		return -EPERM;
2602 
2603 	if (f2fs_readonly(sbi->sb))
2604 		return -EROFS;
2605 
2606 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2607 		f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2608 		return -EINVAL;
2609 	}
2610 
2611 	ret = mnt_want_write_file(filp);
2612 	if (ret)
2613 		return ret;
2614 
2615 	ret = f2fs_sync_fs(sbi->sb, 1);
2616 
2617 	mnt_drop_write_file(filp);
2618 	return ret;
2619 }
2620 
f2fs_defragment_range(struct f2fs_sb_info * sbi,struct file * filp,struct f2fs_defragment * range)2621 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2622 					struct file *filp,
2623 					struct f2fs_defragment *range)
2624 {
2625 	struct inode *inode = file_inode(filp);
2626 	struct f2fs_map_blocks map = { .m_next_extent = NULL,
2627 					.m_seg_type = NO_CHECK_TYPE ,
2628 					.m_may_create = false };
2629 	struct extent_info ei = {0, 0, 0};
2630 	pgoff_t pg_start, pg_end, next_pgofs;
2631 	unsigned int blk_per_seg = sbi->blocks_per_seg;
2632 	unsigned int total = 0, sec_num;
2633 	block_t blk_end = 0;
2634 	bool fragmented = false;
2635 	int err;
2636 
2637 	/* if in-place-update policy is enabled, don't waste time here */
2638 	if (f2fs_should_update_inplace(inode, NULL))
2639 		return -EINVAL;
2640 
2641 	pg_start = range->start >> PAGE_SHIFT;
2642 	pg_end = (range->start + range->len) >> PAGE_SHIFT;
2643 
2644 	f2fs_balance_fs(sbi, true);
2645 
2646 	inode_lock(inode);
2647 
2648 	if (f2fs_is_atomic_file(inode)) {
2649 		err = -EINVAL;
2650 		goto out;
2651 	}
2652 
2653 	/* writeback all dirty pages in the range */
2654 	err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2655 						range->start + range->len - 1);
2656 	if (err)
2657 		goto out;
2658 
2659 	/*
2660 	 * lookup mapping info in extent cache, skip defragmenting if physical
2661 	 * block addresses are continuous.
2662 	 */
2663 	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2664 		if (ei.fofs + ei.len >= pg_end)
2665 			goto out;
2666 	}
2667 
2668 	map.m_lblk = pg_start;
2669 	map.m_next_pgofs = &next_pgofs;
2670 
2671 	/*
2672 	 * lookup mapping info in dnode page cache, skip defragmenting if all
2673 	 * physical block addresses are continuous even if there are hole(s)
2674 	 * in logical blocks.
2675 	 */
2676 	while (map.m_lblk < pg_end) {
2677 		map.m_len = pg_end - map.m_lblk;
2678 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2679 		if (err)
2680 			goto out;
2681 
2682 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2683 			map.m_lblk = next_pgofs;
2684 			continue;
2685 		}
2686 
2687 		if (blk_end && blk_end != map.m_pblk)
2688 			fragmented = true;
2689 
2690 		/* record total count of block that we're going to move */
2691 		total += map.m_len;
2692 
2693 		blk_end = map.m_pblk + map.m_len;
2694 
2695 		map.m_lblk += map.m_len;
2696 	}
2697 
2698 	if (!fragmented) {
2699 		total = 0;
2700 		goto out;
2701 	}
2702 
2703 	sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2704 
2705 	/*
2706 	 * make sure there are enough free section for LFS allocation, this can
2707 	 * avoid defragment running in SSR mode when free section are allocated
2708 	 * intensively
2709 	 */
2710 	if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2711 		err = -EAGAIN;
2712 		goto out;
2713 	}
2714 
2715 	map.m_lblk = pg_start;
2716 	map.m_len = pg_end - pg_start;
2717 	total = 0;
2718 
2719 	while (map.m_lblk < pg_end) {
2720 		pgoff_t idx;
2721 		int cnt = 0;
2722 
2723 do_map:
2724 		map.m_len = pg_end - map.m_lblk;
2725 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2726 		if (err)
2727 			goto clear_out;
2728 
2729 		if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2730 			map.m_lblk = next_pgofs;
2731 			goto check;
2732 		}
2733 
2734 		set_inode_flag(inode, FI_DO_DEFRAG);
2735 
2736 		idx = map.m_lblk;
2737 		while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2738 			struct page *page;
2739 
2740 			page = f2fs_get_lock_data_page(inode, idx, true);
2741 			if (IS_ERR(page)) {
2742 				err = PTR_ERR(page);
2743 				goto clear_out;
2744 			}
2745 
2746 			set_page_dirty(page);
2747 			f2fs_put_page(page, 1);
2748 
2749 			idx++;
2750 			cnt++;
2751 			total++;
2752 		}
2753 
2754 		map.m_lblk = idx;
2755 check:
2756 		if (map.m_lblk < pg_end && cnt < blk_per_seg)
2757 			goto do_map;
2758 
2759 		clear_inode_flag(inode, FI_DO_DEFRAG);
2760 
2761 		err = filemap_fdatawrite(inode->i_mapping);
2762 		if (err)
2763 			goto out;
2764 	}
2765 clear_out:
2766 	clear_inode_flag(inode, FI_DO_DEFRAG);
2767 out:
2768 	inode_unlock(inode);
2769 	if (!err)
2770 		range->len = (u64)total << PAGE_SHIFT;
2771 	return err;
2772 }
2773 
f2fs_ioc_defragment(struct file * filp,unsigned long arg)2774 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2775 {
2776 	struct inode *inode = file_inode(filp);
2777 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2778 	struct f2fs_defragment range;
2779 	int err;
2780 
2781 	if (!capable(CAP_SYS_ADMIN))
2782 		return -EPERM;
2783 
2784 	if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2785 		return -EINVAL;
2786 
2787 	if (f2fs_readonly(sbi->sb))
2788 		return -EROFS;
2789 
2790 	if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2791 							sizeof(range)))
2792 		return -EFAULT;
2793 
2794 	/* verify alignment of offset & size */
2795 	if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2796 		return -EINVAL;
2797 
2798 	if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2799 					sbi->max_file_blocks))
2800 		return -EINVAL;
2801 
2802 	err = mnt_want_write_file(filp);
2803 	if (err)
2804 		return err;
2805 
2806 	err = f2fs_defragment_range(sbi, filp, &range);
2807 	mnt_drop_write_file(filp);
2808 
2809 	f2fs_update_time(sbi, REQ_TIME);
2810 	if (err < 0)
2811 		return err;
2812 
2813 	if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2814 							sizeof(range)))
2815 		return -EFAULT;
2816 
2817 	return 0;
2818 }
2819 
f2fs_move_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t len)2820 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2821 			struct file *file_out, loff_t pos_out, size_t len)
2822 {
2823 	struct inode *src = file_inode(file_in);
2824 	struct inode *dst = file_inode(file_out);
2825 	struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2826 	size_t olen = len, dst_max_i_size = 0;
2827 	size_t dst_osize;
2828 	int ret;
2829 
2830 	if (file_in->f_path.mnt != file_out->f_path.mnt ||
2831 				src->i_sb != dst->i_sb)
2832 		return -EXDEV;
2833 
2834 	if (unlikely(f2fs_readonly(src->i_sb)))
2835 		return -EROFS;
2836 
2837 	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2838 		return -EINVAL;
2839 
2840 	if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2841 		return -EOPNOTSUPP;
2842 
2843 	if (pos_out < 0 || pos_in < 0)
2844 		return -EINVAL;
2845 
2846 	if (src == dst) {
2847 		if (pos_in == pos_out)
2848 			return 0;
2849 		if (pos_out > pos_in && pos_out < pos_in + len)
2850 			return -EINVAL;
2851 	}
2852 
2853 	inode_lock(src);
2854 	if (src != dst) {
2855 		ret = -EBUSY;
2856 		if (!inode_trylock(dst))
2857 			goto out;
2858 	}
2859 
2860 	if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
2861 		ret = -EOPNOTSUPP;
2862 		goto out_unlock;
2863 	}
2864 
2865 	if (f2fs_is_atomic_file(src) || f2fs_is_atomic_file(dst)) {
2866 		ret = -EINVAL;
2867 		goto out_unlock;
2868 	}
2869 
2870 	ret = -EINVAL;
2871 	if (pos_in + len > src->i_size || pos_in + len < pos_in)
2872 		goto out_unlock;
2873 	if (len == 0)
2874 		olen = len = src->i_size - pos_in;
2875 	if (pos_in + len == src->i_size)
2876 		len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2877 	if (len == 0) {
2878 		ret = 0;
2879 		goto out_unlock;
2880 	}
2881 
2882 	dst_osize = dst->i_size;
2883 	if (pos_out + olen > dst->i_size)
2884 		dst_max_i_size = pos_out + olen;
2885 
2886 	/* verify the end result is block aligned */
2887 	if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2888 			!IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2889 			!IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2890 		goto out_unlock;
2891 
2892 	ret = f2fs_convert_inline_inode(src);
2893 	if (ret)
2894 		goto out_unlock;
2895 
2896 	ret = f2fs_convert_inline_inode(dst);
2897 	if (ret)
2898 		goto out_unlock;
2899 
2900 	/* write out all dirty pages from offset */
2901 	ret = filemap_write_and_wait_range(src->i_mapping,
2902 					pos_in, pos_in + len);
2903 	if (ret)
2904 		goto out_unlock;
2905 
2906 	ret = filemap_write_and_wait_range(dst->i_mapping,
2907 					pos_out, pos_out + len);
2908 	if (ret)
2909 		goto out_unlock;
2910 
2911 	f2fs_balance_fs(sbi, true);
2912 
2913 	down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2914 	if (src != dst) {
2915 		ret = -EBUSY;
2916 		if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2917 			goto out_src;
2918 	}
2919 
2920 	f2fs_lock_op(sbi);
2921 	ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2922 				pos_out >> F2FS_BLKSIZE_BITS,
2923 				len >> F2FS_BLKSIZE_BITS, false);
2924 
2925 	if (!ret) {
2926 		if (dst_max_i_size)
2927 			f2fs_i_size_write(dst, dst_max_i_size);
2928 		else if (dst_osize != dst->i_size)
2929 			f2fs_i_size_write(dst, dst_osize);
2930 	}
2931 	f2fs_unlock_op(sbi);
2932 
2933 	if (src != dst)
2934 		up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2935 out_src:
2936 	up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2937 out_unlock:
2938 	if (src != dst)
2939 		inode_unlock(dst);
2940 out:
2941 	inode_unlock(src);
2942 	return ret;
2943 }
2944 
__f2fs_ioc_move_range(struct file * filp,struct f2fs_move_range * range)2945 static int __f2fs_ioc_move_range(struct file *filp,
2946 				struct f2fs_move_range *range)
2947 {
2948 	struct fd dst;
2949 	int err;
2950 
2951 	if (!(filp->f_mode & FMODE_READ) ||
2952 			!(filp->f_mode & FMODE_WRITE))
2953 		return -EBADF;
2954 
2955 	dst = fdget(range->dst_fd);
2956 	if (!dst.file)
2957 		return -EBADF;
2958 
2959 	if (!(dst.file->f_mode & FMODE_WRITE)) {
2960 		err = -EBADF;
2961 		goto err_out;
2962 	}
2963 
2964 	err = mnt_want_write_file(filp);
2965 	if (err)
2966 		goto err_out;
2967 
2968 	err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2969 					range->pos_out, range->len);
2970 
2971 	mnt_drop_write_file(filp);
2972 err_out:
2973 	fdput(dst);
2974 	return err;
2975 }
2976 
f2fs_ioc_move_range(struct file * filp,unsigned long arg)2977 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2978 {
2979 	struct f2fs_move_range range;
2980 
2981 	if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2982 							sizeof(range)))
2983 		return -EFAULT;
2984 	return __f2fs_ioc_move_range(filp, &range);
2985 }
2986 
f2fs_ioc_flush_device(struct file * filp,unsigned long arg)2987 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2988 {
2989 	struct inode *inode = file_inode(filp);
2990 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2991 	struct sit_info *sm = SIT_I(sbi);
2992 	unsigned int start_segno = 0, end_segno = 0;
2993 	unsigned int dev_start_segno = 0, dev_end_segno = 0;
2994 	struct f2fs_flush_device range;
2995 	int ret;
2996 
2997 	if (!capable(CAP_SYS_ADMIN))
2998 		return -EPERM;
2999 
3000 	if (f2fs_readonly(sbi->sb))
3001 		return -EROFS;
3002 
3003 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
3004 		return -EINVAL;
3005 
3006 	if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
3007 							sizeof(range)))
3008 		return -EFAULT;
3009 
3010 	if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
3011 			__is_large_section(sbi)) {
3012 		f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
3013 			  range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
3014 		return -EINVAL;
3015 	}
3016 
3017 	ret = mnt_want_write_file(filp);
3018 	if (ret)
3019 		return ret;
3020 
3021 	if (range.dev_num != 0)
3022 		dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
3023 	dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
3024 
3025 	start_segno = sm->last_victim[FLUSH_DEVICE];
3026 	if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
3027 		start_segno = dev_start_segno;
3028 	end_segno = min(start_segno + range.segments, dev_end_segno);
3029 
3030 	while (start_segno < end_segno) {
3031 		if (!down_write_trylock(&sbi->gc_lock)) {
3032 			ret = -EBUSY;
3033 			goto out;
3034 		}
3035 		sm->last_victim[GC_CB] = end_segno + 1;
3036 		sm->last_victim[GC_GREEDY] = end_segno + 1;
3037 		sm->last_victim[ALLOC_NEXT] = end_segno + 1;
3038 		ret = f2fs_gc(sbi, true, true, true, start_segno);
3039 		if (ret == -EAGAIN)
3040 			ret = 0;
3041 		else if (ret < 0)
3042 			break;
3043 		start_segno++;
3044 	}
3045 out:
3046 	mnt_drop_write_file(filp);
3047 	return ret;
3048 }
3049 
f2fs_ioc_get_features(struct file * filp,unsigned long arg)3050 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3051 {
3052 	struct inode *inode = file_inode(filp);
3053 	u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3054 
3055 	/* Must validate to set it with SQLite behavior in Android. */
3056 	sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3057 
3058 	return put_user(sb_feature, (u32 __user *)arg);
3059 }
3060 
3061 #ifdef CONFIG_QUOTA
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3062 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3063 {
3064 	struct dquot *transfer_to[MAXQUOTAS] = {};
3065 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3066 	struct super_block *sb = sbi->sb;
3067 	int err;
3068 
3069 	transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3070 	if (IS_ERR(transfer_to[PRJQUOTA]))
3071 		return PTR_ERR(transfer_to[PRJQUOTA]);
3072 
3073 	err = __dquot_transfer(inode, transfer_to);
3074 	if (err)
3075 		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3076 	dqput(transfer_to[PRJQUOTA]);
3077 	return err;
3078 }
3079 
f2fs_ioc_setproject(struct file * filp,__u32 projid)3080 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3081 {
3082 	struct inode *inode = file_inode(filp);
3083 	struct f2fs_inode_info *fi = F2FS_I(inode);
3084 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3085 	struct page *ipage;
3086 	kprojid_t kprojid;
3087 	int err;
3088 
3089 	if (!f2fs_sb_has_project_quota(sbi)) {
3090 		if (projid != F2FS_DEF_PROJID)
3091 			return -EOPNOTSUPP;
3092 		else
3093 			return 0;
3094 	}
3095 
3096 	if (!f2fs_has_extra_attr(inode))
3097 		return -EOPNOTSUPP;
3098 
3099 	kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3100 
3101 	if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3102 		return 0;
3103 
3104 	err = -EPERM;
3105 	/* Is it quota file? Do not allow user to mess with it */
3106 	if (IS_NOQUOTA(inode))
3107 		return err;
3108 
3109 	ipage = f2fs_get_node_page(sbi, inode->i_ino);
3110 	if (IS_ERR(ipage))
3111 		return PTR_ERR(ipage);
3112 
3113 	if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3114 								i_projid)) {
3115 		err = -EOVERFLOW;
3116 		f2fs_put_page(ipage, 1);
3117 		return err;
3118 	}
3119 	f2fs_put_page(ipage, 1);
3120 
3121 	err = dquot_initialize(inode);
3122 	if (err)
3123 		return err;
3124 
3125 	f2fs_lock_op(sbi);
3126 	err = f2fs_transfer_project_quota(inode, kprojid);
3127 	if (err)
3128 		goto out_unlock;
3129 
3130 	F2FS_I(inode)->i_projid = kprojid;
3131 	inode->i_ctime = current_time(inode);
3132 	f2fs_mark_inode_dirty_sync(inode, true);
3133 out_unlock:
3134 	f2fs_unlock_op(sbi);
3135 	return err;
3136 }
3137 #else
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3138 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3139 {
3140 	return 0;
3141 }
3142 
f2fs_ioc_setproject(struct file * filp,__u32 projid)3143 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3144 {
3145 	if (projid != F2FS_DEF_PROJID)
3146 		return -EOPNOTSUPP;
3147 	return 0;
3148 }
3149 #endif
3150 
3151 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3152 
3153 /*
3154  * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3155  * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3156  * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3157  */
3158 
3159 static const struct {
3160 	u32 iflag;
3161 	u32 xflag;
3162 } f2fs_xflags_map[] = {
3163 	{ F2FS_SYNC_FL,		FS_XFLAG_SYNC },
3164 	{ F2FS_IMMUTABLE_FL,	FS_XFLAG_IMMUTABLE },
3165 	{ F2FS_APPEND_FL,	FS_XFLAG_APPEND },
3166 	{ F2FS_NODUMP_FL,	FS_XFLAG_NODUMP },
3167 	{ F2FS_NOATIME_FL,	FS_XFLAG_NOATIME },
3168 	{ F2FS_PROJINHERIT_FL,	FS_XFLAG_PROJINHERIT },
3169 };
3170 
3171 #define F2FS_SUPPORTED_XFLAGS (		\
3172 		FS_XFLAG_SYNC |		\
3173 		FS_XFLAG_IMMUTABLE |	\
3174 		FS_XFLAG_APPEND |	\
3175 		FS_XFLAG_NODUMP |	\
3176 		FS_XFLAG_NOATIME |	\
3177 		FS_XFLAG_PROJINHERIT)
3178 
3179 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
f2fs_iflags_to_xflags(u32 iflags)3180 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3181 {
3182 	u32 xflags = 0;
3183 	int i;
3184 
3185 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3186 		if (iflags & f2fs_xflags_map[i].iflag)
3187 			xflags |= f2fs_xflags_map[i].xflag;
3188 
3189 	return xflags;
3190 }
3191 
3192 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
f2fs_xflags_to_iflags(u32 xflags)3193 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3194 {
3195 	u32 iflags = 0;
3196 	int i;
3197 
3198 	for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3199 		if (xflags & f2fs_xflags_map[i].xflag)
3200 			iflags |= f2fs_xflags_map[i].iflag;
3201 
3202 	return iflags;
3203 }
3204 
f2fs_fill_fsxattr(struct inode * inode,struct fsxattr * fa)3205 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3206 {
3207 	struct f2fs_inode_info *fi = F2FS_I(inode);
3208 
3209 	simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3210 
3211 	if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3212 		fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3213 }
3214 
f2fs_ioc_fsgetxattr(struct file * filp,unsigned long arg)3215 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3216 {
3217 	struct inode *inode = file_inode(filp);
3218 	struct fsxattr fa;
3219 
3220 	f2fs_fill_fsxattr(inode, &fa);
3221 
3222 	if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3223 		return -EFAULT;
3224 	return 0;
3225 }
3226 
f2fs_ioc_fssetxattr(struct file * filp,unsigned long arg)3227 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3228 {
3229 	struct inode *inode = file_inode(filp);
3230 	struct fsxattr fa, old_fa;
3231 	u32 iflags;
3232 	int err;
3233 
3234 	if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3235 		return -EFAULT;
3236 
3237 	/* Make sure caller has proper permission */
3238 	if (!inode_owner_or_capable(inode))
3239 		return -EACCES;
3240 
3241 	if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3242 		return -EOPNOTSUPP;
3243 
3244 	iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3245 	if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3246 		return -EOPNOTSUPP;
3247 
3248 	err = mnt_want_write_file(filp);
3249 	if (err)
3250 		return err;
3251 
3252 	inode_lock(inode);
3253 
3254 	f2fs_fill_fsxattr(inode, &old_fa);
3255 	err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3256 	if (err)
3257 		goto out;
3258 
3259 	err = f2fs_setflags_common(inode, iflags,
3260 			f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3261 	if (err)
3262 		goto out;
3263 
3264 	err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3265 out:
3266 	inode_unlock(inode);
3267 	mnt_drop_write_file(filp);
3268 	return err;
3269 }
3270 
f2fs_pin_file_control(struct inode * inode,bool inc)3271 int f2fs_pin_file_control(struct inode *inode, bool inc)
3272 {
3273 	struct f2fs_inode_info *fi = F2FS_I(inode);
3274 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3275 
3276 	/* Use i_gc_failures for normal file as a risk signal. */
3277 	if (inc)
3278 		f2fs_i_gc_failures_write(inode,
3279 				fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3280 
3281 	if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3282 		f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3283 			  __func__, inode->i_ino,
3284 			  fi->i_gc_failures[GC_FAILURE_PIN]);
3285 		clear_inode_flag(inode, FI_PIN_FILE);
3286 		return -EAGAIN;
3287 	}
3288 	return 0;
3289 }
3290 
f2fs_ioc_set_pin_file(struct file * filp,unsigned long arg)3291 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3292 {
3293 	struct inode *inode = file_inode(filp);
3294 	__u32 pin;
3295 	int ret = 0;
3296 
3297 	if (get_user(pin, (__u32 __user *)arg))
3298 		return -EFAULT;
3299 
3300 	if (!S_ISREG(inode->i_mode))
3301 		return -EINVAL;
3302 
3303 	if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3304 		return -EROFS;
3305 
3306 	ret = mnt_want_write_file(filp);
3307 	if (ret)
3308 		return ret;
3309 
3310 	inode_lock(inode);
3311 
3312 	if (f2fs_should_update_outplace(inode, NULL)) {
3313 		ret = -EINVAL;
3314 		goto out;
3315 	}
3316 
3317 	if (f2fs_is_atomic_file(inode)) {
3318 		ret = -EINVAL;
3319 		goto out;
3320 	}
3321 
3322 	if (!pin) {
3323 		clear_inode_flag(inode, FI_PIN_FILE);
3324 		f2fs_i_gc_failures_write(inode, 0);
3325 		goto done;
3326 	}
3327 
3328 	if (f2fs_pin_file_control(inode, false)) {
3329 		ret = -EAGAIN;
3330 		goto out;
3331 	}
3332 
3333 	ret = f2fs_convert_inline_inode(inode);
3334 	if (ret)
3335 		goto out;
3336 
3337 	if (!f2fs_disable_compressed_file(inode)) {
3338 		ret = -EOPNOTSUPP;
3339 		goto out;
3340 	}
3341 
3342 	set_inode_flag(inode, FI_PIN_FILE);
3343 	ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3344 done:
3345 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3346 out:
3347 	inode_unlock(inode);
3348 	mnt_drop_write_file(filp);
3349 	return ret;
3350 }
3351 
f2fs_ioc_get_pin_file(struct file * filp,unsigned long arg)3352 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3353 {
3354 	struct inode *inode = file_inode(filp);
3355 	__u32 pin = 0;
3356 
3357 	if (is_inode_flag_set(inode, FI_PIN_FILE))
3358 		pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3359 	return put_user(pin, (u32 __user *)arg);
3360 }
3361 
f2fs_precache_extents(struct inode * inode)3362 int f2fs_precache_extents(struct inode *inode)
3363 {
3364 	struct f2fs_inode_info *fi = F2FS_I(inode);
3365 	struct f2fs_map_blocks map;
3366 	pgoff_t m_next_extent;
3367 	loff_t end;
3368 	int err;
3369 
3370 	if (is_inode_flag_set(inode, FI_NO_EXTENT))
3371 		return -EOPNOTSUPP;
3372 
3373 	map.m_lblk = 0;
3374 	map.m_pblk = 0;
3375 	map.m_next_pgofs = NULL;
3376 	map.m_next_extent = &m_next_extent;
3377 	map.m_seg_type = NO_CHECK_TYPE;
3378 	map.m_may_create = false;
3379 	end = F2FS_I_SB(inode)->max_file_blocks;
3380 
3381 	while (map.m_lblk < end) {
3382 		map.m_len = end - map.m_lblk;
3383 
3384 		down_write(&fi->i_gc_rwsem[WRITE]);
3385 		err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3386 		up_write(&fi->i_gc_rwsem[WRITE]);
3387 		if (err)
3388 			return err;
3389 
3390 		map.m_lblk = m_next_extent;
3391 	}
3392 
3393 	return err;
3394 }
3395 
f2fs_ioc_precache_extents(struct file * filp,unsigned long arg)3396 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3397 {
3398 	return f2fs_precache_extents(file_inode(filp));
3399 }
3400 
f2fs_ioc_resize_fs(struct file * filp,unsigned long arg)3401 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3402 {
3403 	struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3404 	__u64 block_count;
3405 
3406 	if (!capable(CAP_SYS_ADMIN))
3407 		return -EPERM;
3408 
3409 	if (f2fs_readonly(sbi->sb))
3410 		return -EROFS;
3411 
3412 	if (copy_from_user(&block_count, (void __user *)arg,
3413 			   sizeof(block_count)))
3414 		return -EFAULT;
3415 
3416 	return f2fs_resize_fs(filp, block_count);
3417 }
3418 
f2fs_has_feature_verity(struct file * filp)3419 static inline int f2fs_has_feature_verity(struct file *filp)
3420 {
3421 	struct inode *inode = file_inode(filp);
3422 
3423 	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3424 
3425 	if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3426 		f2fs_warn(F2FS_I_SB(inode),
3427 			  "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3428 			  inode->i_ino);
3429 		return -EOPNOTSUPP;
3430 	}
3431 	return 0;
3432 }
3433 
f2fs_ioc_enable_verity(struct file * filp,unsigned long arg)3434 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3435 {
3436 	int err = f2fs_has_feature_verity(filp);
3437 
3438 	if (err)
3439 		return err;
3440 
3441 	return fsverity_ioctl_enable(filp, (const void __user *)arg);
3442 }
3443 
f2fs_ioc_enable_code_sign(struct file * filp,unsigned long arg)3444 static int f2fs_ioc_enable_code_sign(struct file *filp, unsigned long arg)
3445 {
3446 	int err = f2fs_has_feature_verity(filp);
3447 
3448 	if (err)
3449 		return err;
3450 
3451 	return fsverity_ioctl_enable_code_sign(filp, (const void __user *)arg);
3452 }
3453 
f2fs_ioc_measure_verity(struct file * filp,unsigned long arg)3454 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3455 {
3456 	if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3457 		return -EOPNOTSUPP;
3458 
3459 	return fsverity_ioctl_measure(filp, (void __user *)arg);
3460 }
3461 
f2fs_ioc_getfslabel(struct file * filp,unsigned long arg)3462 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3463 {
3464 	struct inode *inode = file_inode(filp);
3465 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3466 	char *vbuf;
3467 	int count;
3468 	int err = 0;
3469 
3470 	vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3471 	if (!vbuf)
3472 		return -ENOMEM;
3473 
3474 	down_read(&sbi->sb_lock);
3475 	count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3476 			ARRAY_SIZE(sbi->raw_super->volume_name),
3477 			UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3478 	up_read(&sbi->sb_lock);
3479 
3480 	if (copy_to_user((char __user *)arg, vbuf,
3481 				min(FSLABEL_MAX, count)))
3482 		err = -EFAULT;
3483 
3484 	kfree(vbuf);
3485 	return err;
3486 }
3487 
f2fs_ioc_setfslabel(struct file * filp,unsigned long arg)3488 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3489 {
3490 	struct inode *inode = file_inode(filp);
3491 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3492 	char *vbuf;
3493 	int err = 0;
3494 
3495 	if (!capable(CAP_SYS_ADMIN))
3496 		return -EPERM;
3497 
3498 	vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3499 	if (IS_ERR(vbuf))
3500 		return PTR_ERR(vbuf);
3501 
3502 	err = mnt_want_write_file(filp);
3503 	if (err)
3504 		goto out;
3505 
3506 	down_write(&sbi->sb_lock);
3507 
3508 	memset(sbi->raw_super->volume_name, 0,
3509 			sizeof(sbi->raw_super->volume_name));
3510 	utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3511 			sbi->raw_super->volume_name,
3512 			ARRAY_SIZE(sbi->raw_super->volume_name));
3513 
3514 	err = f2fs_commit_super(sbi, false);
3515 
3516 	up_write(&sbi->sb_lock);
3517 
3518 	mnt_drop_write_file(filp);
3519 out:
3520 	kfree(vbuf);
3521 	return err;
3522 }
3523 
f2fs_get_compress_blocks(struct file * filp,unsigned long arg)3524 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3525 {
3526 	struct inode *inode = file_inode(filp);
3527 	__u64 blocks;
3528 
3529 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3530 		return -EOPNOTSUPP;
3531 
3532 	if (!f2fs_compressed_file(inode))
3533 		return -EINVAL;
3534 
3535 	blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3536 	return put_user(blocks, (u64 __user *)arg);
3537 }
3538 
release_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3539 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3540 {
3541 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3542 	unsigned int released_blocks = 0;
3543 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3544 	block_t blkaddr;
3545 	int i;
3546 
3547 	for (i = 0; i < count; i++) {
3548 		blkaddr = data_blkaddr(dn->inode, dn->node_page,
3549 						dn->ofs_in_node + i);
3550 
3551 		if (!__is_valid_data_blkaddr(blkaddr))
3552 			continue;
3553 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3554 					DATA_GENERIC_ENHANCE)))
3555 			return -EFSCORRUPTED;
3556 	}
3557 
3558 	while (count) {
3559 		int compr_blocks = 0;
3560 
3561 		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3562 			blkaddr = f2fs_data_blkaddr(dn);
3563 
3564 			if (i == 0) {
3565 				if (blkaddr == COMPRESS_ADDR)
3566 					continue;
3567 				dn->ofs_in_node += cluster_size;
3568 				goto next;
3569 			}
3570 
3571 			if (__is_valid_data_blkaddr(blkaddr))
3572 				compr_blocks++;
3573 
3574 			if (blkaddr != NEW_ADDR)
3575 				continue;
3576 
3577 			dn->data_blkaddr = NULL_ADDR;
3578 			f2fs_set_data_blkaddr(dn);
3579 		}
3580 
3581 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3582 		dec_valid_block_count(sbi, dn->inode,
3583 					cluster_size - compr_blocks);
3584 
3585 		released_blocks += cluster_size - compr_blocks;
3586 next:
3587 		count -= cluster_size;
3588 	}
3589 
3590 	return released_blocks;
3591 }
3592 
f2fs_release_compress_blocks(struct file * filp,unsigned long arg)3593 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3594 {
3595 	struct inode *inode = file_inode(filp);
3596 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3597 	pgoff_t page_idx = 0, last_idx;
3598 	unsigned int released_blocks = 0;
3599 	int ret;
3600 	int writecount;
3601 
3602 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3603 		return -EOPNOTSUPP;
3604 
3605 	if (!f2fs_compressed_file(inode))
3606 		return -EINVAL;
3607 
3608 	if (f2fs_readonly(sbi->sb))
3609 		return -EROFS;
3610 
3611 	ret = mnt_want_write_file(filp);
3612 	if (ret)
3613 		return ret;
3614 
3615 	f2fs_balance_fs(F2FS_I_SB(inode), true);
3616 
3617 	inode_lock(inode);
3618 
3619 	writecount = atomic_read(&inode->i_writecount);
3620 	if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3621 			(!(filp->f_mode & FMODE_WRITE) && writecount)) {
3622 		ret = -EBUSY;
3623 		goto out;
3624 	}
3625 
3626 	if (IS_IMMUTABLE(inode)) {
3627 		ret = -EINVAL;
3628 		goto out;
3629 	}
3630 
3631 	ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3632 	if (ret)
3633 		goto out;
3634 
3635 	F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3636 	f2fs_set_inode_flags(inode);
3637 	inode->i_ctime = current_time(inode);
3638 	f2fs_mark_inode_dirty_sync(inode, true);
3639 
3640 	if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3641 		goto out;
3642 
3643 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3644 	down_write(&F2FS_I(inode)->i_mmap_sem);
3645 
3646 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3647 
3648 	while (page_idx < last_idx) {
3649 		struct dnode_of_data dn;
3650 		pgoff_t end_offset, count;
3651 
3652 		f2fs_lock_op(sbi);
3653 
3654 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3655 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3656 		if (ret) {
3657 			f2fs_unlock_op(sbi);
3658 			if (ret == -ENOENT) {
3659 				page_idx = f2fs_get_next_page_offset(&dn,
3660 								page_idx);
3661 				ret = 0;
3662 				continue;
3663 			}
3664 			break;
3665 		}
3666 
3667 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3668 		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3669 		count = round_up(count, F2FS_I(inode)->i_cluster_size);
3670 
3671 		ret = release_compress_blocks(&dn, count);
3672 
3673 		f2fs_put_dnode(&dn);
3674 
3675 		f2fs_unlock_op(sbi);
3676 
3677 		if (ret < 0)
3678 			break;
3679 
3680 		page_idx += count;
3681 		released_blocks += ret;
3682 	}
3683 
3684 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3685 	up_write(&F2FS_I(inode)->i_mmap_sem);
3686 out:
3687 	inode_unlock(inode);
3688 
3689 	mnt_drop_write_file(filp);
3690 
3691 	if (ret >= 0) {
3692 		ret = put_user(released_blocks, (u64 __user *)arg);
3693 	} else if (released_blocks &&
3694 			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3695 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3696 		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3697 			"iblocks=%llu, released=%u, compr_blocks=%u, "
3698 			"run fsck to fix.",
3699 			__func__, inode->i_ino, inode->i_blocks,
3700 			released_blocks,
3701 			atomic_read(&F2FS_I(inode)->i_compr_blocks));
3702 	}
3703 
3704 	return ret;
3705 }
3706 
reserve_compress_blocks(struct dnode_of_data * dn,pgoff_t count,unsigned int * reserved_blocks)3707 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
3708 		unsigned int *reserved_blocks)
3709 {
3710 	struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3711 	int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3712 	block_t blkaddr;
3713 	int i;
3714 
3715 	for (i = 0; i < count; i++) {
3716 		blkaddr = data_blkaddr(dn->inode, dn->node_page,
3717 						dn->ofs_in_node + i);
3718 
3719 		if (!__is_valid_data_blkaddr(blkaddr))
3720 			continue;
3721 		if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3722 					DATA_GENERIC_ENHANCE)))
3723 			return -EFSCORRUPTED;
3724 	}
3725 
3726 	while (count) {
3727 		int compr_blocks = 0;
3728 		blkcnt_t reserved;
3729 		int ret;
3730 
3731 		for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3732 			blkaddr = f2fs_data_blkaddr(dn);
3733 
3734 			if (i == 0) {
3735 				if (blkaddr == COMPRESS_ADDR)
3736 					continue;
3737 				dn->ofs_in_node += cluster_size;
3738 				goto next;
3739 			}
3740 
3741 			if (__is_valid_data_blkaddr(blkaddr)) {
3742 				compr_blocks++;
3743 				continue;
3744 			}
3745 
3746 			dn->data_blkaddr = NEW_ADDR;
3747 			f2fs_set_data_blkaddr(dn);
3748 		}
3749 
3750 		reserved = cluster_size - compr_blocks;
3751 		ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3752 		if (ret)
3753 			return ret;
3754 
3755 		if (reserved != cluster_size - compr_blocks)
3756 			return -ENOSPC;
3757 
3758 		f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3759 
3760 		*reserved_blocks += reserved;
3761 next:
3762 		count -= cluster_size;
3763 	}
3764 
3765 	return 0;
3766 }
3767 
f2fs_reserve_compress_blocks(struct file * filp,unsigned long arg)3768 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3769 {
3770 	struct inode *inode = file_inode(filp);
3771 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3772 	pgoff_t page_idx = 0, last_idx;
3773 	unsigned int reserved_blocks = 0;
3774 	int ret;
3775 
3776 	if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3777 		return -EOPNOTSUPP;
3778 
3779 	if (!f2fs_compressed_file(inode))
3780 		return -EINVAL;
3781 
3782 	if (f2fs_readonly(sbi->sb))
3783 		return -EROFS;
3784 
3785 	ret = mnt_want_write_file(filp);
3786 	if (ret)
3787 		return ret;
3788 
3789 	if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3790 		goto out;
3791 
3792 	f2fs_balance_fs(F2FS_I_SB(inode), true);
3793 
3794 	inode_lock(inode);
3795 
3796 	if (!IS_IMMUTABLE(inode)) {
3797 		ret = -EINVAL;
3798 		goto unlock_inode;
3799 	}
3800 
3801 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3802 	down_write(&F2FS_I(inode)->i_mmap_sem);
3803 
3804 	last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3805 
3806 	while (page_idx < last_idx) {
3807 		struct dnode_of_data dn;
3808 		pgoff_t end_offset, count;
3809 
3810 		f2fs_lock_op(sbi);
3811 
3812 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3813 		ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3814 		if (ret) {
3815 			f2fs_unlock_op(sbi);
3816 			if (ret == -ENOENT) {
3817 				page_idx = f2fs_get_next_page_offset(&dn,
3818 								page_idx);
3819 				ret = 0;
3820 				continue;
3821 			}
3822 			break;
3823 		}
3824 
3825 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3826 		count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3827 		count = round_up(count, F2FS_I(inode)->i_cluster_size);
3828 
3829 		ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
3830 
3831 		f2fs_put_dnode(&dn);
3832 
3833 		f2fs_unlock_op(sbi);
3834 
3835 		if (ret < 0)
3836 			break;
3837 
3838 		page_idx += count;
3839 	}
3840 
3841 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3842 	up_write(&F2FS_I(inode)->i_mmap_sem);
3843 
3844 	if (ret >= 0) {
3845 		F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3846 		f2fs_set_inode_flags(inode);
3847 		inode->i_ctime = current_time(inode);
3848 		f2fs_mark_inode_dirty_sync(inode, true);
3849 	}
3850 unlock_inode:
3851 	inode_unlock(inode);
3852 out:
3853 	mnt_drop_write_file(filp);
3854 
3855 	if (!ret) {
3856 		ret = put_user(reserved_blocks, (u64 __user *)arg);
3857 	} else if (reserved_blocks &&
3858 			atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3859 		set_sbi_flag(sbi, SBI_NEED_FSCK);
3860 		f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3861 			"iblocks=%llu, reserved=%u, compr_blocks=%u, "
3862 			"run fsck to fix.",
3863 			__func__, inode->i_ino, inode->i_blocks,
3864 			reserved_blocks,
3865 			atomic_read(&F2FS_I(inode)->i_compr_blocks));
3866 	}
3867 
3868 	return ret;
3869 }
3870 
f2fs_secure_erase(struct block_device * bdev,struct inode * inode,pgoff_t off,block_t block,block_t len,u32 flags)3871 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3872 		pgoff_t off, block_t block, block_t len, u32 flags)
3873 {
3874 	struct request_queue *q = bdev_get_queue(bdev);
3875 	sector_t sector = SECTOR_FROM_BLOCK(block);
3876 	sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3877 	int ret = 0;
3878 
3879 	if (!q)
3880 		return -ENXIO;
3881 
3882 	if (flags & F2FS_TRIM_FILE_DISCARD)
3883 		ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3884 						blk_queue_secure_erase(q) ?
3885 						BLKDEV_DISCARD_SECURE : 0);
3886 
3887 	if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3888 		if (IS_ENCRYPTED(inode))
3889 			ret = fscrypt_zeroout_range(inode, off, block, len);
3890 		else
3891 			ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3892 					GFP_NOFS, 0);
3893 	}
3894 
3895 	return ret;
3896 }
3897 
f2fs_sec_trim_file(struct file * filp,unsigned long arg)3898 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3899 {
3900 	struct inode *inode = file_inode(filp);
3901 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3902 	struct address_space *mapping = inode->i_mapping;
3903 	struct block_device *prev_bdev = NULL;
3904 	struct f2fs_sectrim_range range;
3905 	pgoff_t index, pg_end, prev_index = 0;
3906 	block_t prev_block = 0, len = 0;
3907 	loff_t end_addr;
3908 	bool to_end = false;
3909 	int ret = 0;
3910 
3911 	if (!(filp->f_mode & FMODE_WRITE))
3912 		return -EBADF;
3913 
3914 	if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3915 				sizeof(range)))
3916 		return -EFAULT;
3917 
3918 	if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3919 			!S_ISREG(inode->i_mode))
3920 		return -EINVAL;
3921 
3922 	if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3923 			!f2fs_hw_support_discard(sbi)) ||
3924 			((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3925 			 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3926 		return -EOPNOTSUPP;
3927 
3928 	file_start_write(filp);
3929 	inode_lock(inode);
3930 
3931 	if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3932 			range.start >= inode->i_size) {
3933 		ret = -EINVAL;
3934 		goto err;
3935 	}
3936 
3937 	if (range.len == 0)
3938 		goto err;
3939 
3940 	if (inode->i_size - range.start > range.len) {
3941 		end_addr = range.start + range.len;
3942 	} else {
3943 		end_addr = range.len == (u64)-1 ?
3944 			sbi->sb->s_maxbytes : inode->i_size;
3945 		to_end = true;
3946 	}
3947 
3948 	if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3949 			(!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3950 		ret = -EINVAL;
3951 		goto err;
3952 	}
3953 
3954 	index = F2FS_BYTES_TO_BLK(range.start);
3955 	pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3956 
3957 	ret = f2fs_convert_inline_inode(inode);
3958 	if (ret)
3959 		goto err;
3960 
3961 	down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3962 	down_write(&F2FS_I(inode)->i_mmap_sem);
3963 
3964 	ret = filemap_write_and_wait_range(mapping, range.start,
3965 			to_end ? LLONG_MAX : end_addr - 1);
3966 	if (ret)
3967 		goto out;
3968 
3969 	truncate_inode_pages_range(mapping, range.start,
3970 			to_end ? -1 : end_addr - 1);
3971 
3972 	while (index < pg_end) {
3973 		struct dnode_of_data dn;
3974 		pgoff_t end_offset, count;
3975 		int i;
3976 
3977 		set_new_dnode(&dn, inode, NULL, NULL, 0);
3978 		ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3979 		if (ret) {
3980 			if (ret == -ENOENT) {
3981 				index = f2fs_get_next_page_offset(&dn, index);
3982 				continue;
3983 			}
3984 			goto out;
3985 		}
3986 
3987 		end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3988 		count = min(end_offset - dn.ofs_in_node, pg_end - index);
3989 		for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3990 			struct block_device *cur_bdev;
3991 			block_t blkaddr = f2fs_data_blkaddr(&dn);
3992 
3993 			if (!__is_valid_data_blkaddr(blkaddr))
3994 				continue;
3995 
3996 			if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3997 						DATA_GENERIC_ENHANCE)) {
3998 				ret = -EFSCORRUPTED;
3999 				f2fs_put_dnode(&dn);
4000 				goto out;
4001 			}
4002 
4003 			cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
4004 			if (f2fs_is_multi_device(sbi)) {
4005 				int di = f2fs_target_device_index(sbi, blkaddr);
4006 
4007 				blkaddr -= FDEV(di).start_blk;
4008 			}
4009 
4010 			if (len) {
4011 				if (prev_bdev == cur_bdev &&
4012 						index == prev_index + len &&
4013 						blkaddr == prev_block + len) {
4014 					len++;
4015 				} else {
4016 					ret = f2fs_secure_erase(prev_bdev,
4017 						inode, prev_index, prev_block,
4018 						len, range.flags);
4019 					if (ret) {
4020 						f2fs_put_dnode(&dn);
4021 						goto out;
4022 					}
4023 
4024 					len = 0;
4025 				}
4026 			}
4027 
4028 			if (!len) {
4029 				prev_bdev = cur_bdev;
4030 				prev_index = index;
4031 				prev_block = blkaddr;
4032 				len = 1;
4033 			}
4034 		}
4035 
4036 		f2fs_put_dnode(&dn);
4037 
4038 		if (fatal_signal_pending(current)) {
4039 			ret = -EINTR;
4040 			goto out;
4041 		}
4042 		cond_resched();
4043 	}
4044 
4045 	if (len)
4046 		ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
4047 				prev_block, len, range.flags);
4048 out:
4049 	up_write(&F2FS_I(inode)->i_mmap_sem);
4050 	up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4051 err:
4052 	inode_unlock(inode);
4053 	file_end_write(filp);
4054 
4055 	return ret;
4056 }
4057 
__f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4058 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4059 {
4060 	switch (cmd) {
4061 	case FS_IOC_GETFLAGS:
4062 		return f2fs_ioc_getflags(filp, arg);
4063 	case FS_IOC_SETFLAGS:
4064 		return f2fs_ioc_setflags(filp, arg);
4065 	case FS_IOC_GETVERSION:
4066 		return f2fs_ioc_getversion(filp, arg);
4067 	case F2FS_IOC_START_ATOMIC_WRITE:
4068 		return f2fs_ioc_start_atomic_write(filp);
4069 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4070 		return f2fs_ioc_commit_atomic_write(filp);
4071 	case F2FS_IOC_START_VOLATILE_WRITE:
4072 		return f2fs_ioc_start_volatile_write(filp);
4073 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4074 		return f2fs_ioc_release_volatile_write(filp);
4075 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
4076 		return f2fs_ioc_abort_volatile_write(filp);
4077 	case F2FS_IOC_SHUTDOWN:
4078 		return f2fs_ioc_shutdown(filp, arg);
4079 	case FITRIM:
4080 		return f2fs_ioc_fitrim(filp, arg);
4081 	case FS_IOC_SET_ENCRYPTION_POLICY:
4082 		return f2fs_ioc_set_encryption_policy(filp, arg);
4083 	case FS_IOC_GET_ENCRYPTION_POLICY:
4084 		return f2fs_ioc_get_encryption_policy(filp, arg);
4085 	case FS_IOC_GET_ENCRYPTION_PWSALT:
4086 		return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4087 	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4088 		return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4089 	case FS_IOC_ADD_ENCRYPTION_KEY:
4090 		return f2fs_ioc_add_encryption_key(filp, arg);
4091 	case FS_IOC_REMOVE_ENCRYPTION_KEY:
4092 		return f2fs_ioc_remove_encryption_key(filp, arg);
4093 	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4094 		return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4095 	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4096 		return f2fs_ioc_get_encryption_key_status(filp, arg);
4097 	case FS_IOC_GET_ENCRYPTION_NONCE:
4098 		return f2fs_ioc_get_encryption_nonce(filp, arg);
4099 	case F2FS_IOC_GARBAGE_COLLECT:
4100 		return f2fs_ioc_gc(filp, arg);
4101 	case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4102 		return f2fs_ioc_gc_range(filp, arg);
4103 	case F2FS_IOC_WRITE_CHECKPOINT:
4104 		return f2fs_ioc_write_checkpoint(filp, arg);
4105 	case F2FS_IOC_DEFRAGMENT:
4106 		return f2fs_ioc_defragment(filp, arg);
4107 	case F2FS_IOC_MOVE_RANGE:
4108 		return f2fs_ioc_move_range(filp, arg);
4109 	case F2FS_IOC_FLUSH_DEVICE:
4110 		return f2fs_ioc_flush_device(filp, arg);
4111 	case F2FS_IOC_GET_FEATURES:
4112 		return f2fs_ioc_get_features(filp, arg);
4113 	case FS_IOC_FSGETXATTR:
4114 		return f2fs_ioc_fsgetxattr(filp, arg);
4115 	case FS_IOC_FSSETXATTR:
4116 		return f2fs_ioc_fssetxattr(filp, arg);
4117 	case F2FS_IOC_GET_PIN_FILE:
4118 		return f2fs_ioc_get_pin_file(filp, arg);
4119 	case F2FS_IOC_SET_PIN_FILE:
4120 		return f2fs_ioc_set_pin_file(filp, arg);
4121 	case F2FS_IOC_PRECACHE_EXTENTS:
4122 		return f2fs_ioc_precache_extents(filp, arg);
4123 	case F2FS_IOC_RESIZE_FS:
4124 		return f2fs_ioc_resize_fs(filp, arg);
4125 	case FS_IOC_ENABLE_VERITY:
4126 		return f2fs_ioc_enable_verity(filp, arg);
4127 	case FS_IOC_MEASURE_VERITY:
4128 		return f2fs_ioc_measure_verity(filp, arg);
4129 	case FS_IOC_ENABLE_CODE_SIGN:
4130 		return f2fs_ioc_enable_code_sign(filp, arg);
4131 	case FS_IOC_GETFSLABEL:
4132 		return f2fs_ioc_getfslabel(filp, arg);
4133 	case FS_IOC_SETFSLABEL:
4134 		return f2fs_ioc_setfslabel(filp, arg);
4135 	case F2FS_IOC_GET_COMPRESS_BLOCKS:
4136 		return f2fs_get_compress_blocks(filp, arg);
4137 	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4138 		return f2fs_release_compress_blocks(filp, arg);
4139 	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4140 		return f2fs_reserve_compress_blocks(filp, arg);
4141 	case F2FS_IOC_SEC_TRIM_FILE:
4142 		return f2fs_sec_trim_file(filp, arg);
4143 	default:
4144 		return -ENOTTY;
4145 	}
4146 }
4147 
f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4148 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4149 {
4150 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4151 		return -EIO;
4152 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4153 		return -ENOSPC;
4154 
4155 	return __f2fs_ioctl(filp, cmd, arg);
4156 }
4157 
f2fs_file_read_iter(struct kiocb * iocb,struct iov_iter * iter)4158 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4159 {
4160 	struct file *file = iocb->ki_filp;
4161 	struct inode *inode = file_inode(file);
4162 	int ret;
4163 
4164 	if (!f2fs_is_compress_backend_ready(inode))
4165 		return -EOPNOTSUPP;
4166 
4167 	ret = generic_file_read_iter(iocb, iter);
4168 
4169 	if (ret > 0)
4170 		f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4171 
4172 	return ret;
4173 }
4174 
f2fs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)4175 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4176 {
4177 	struct file *file = iocb->ki_filp;
4178 	struct inode *inode = file_inode(file);
4179 	ssize_t ret;
4180 
4181 	if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4182 		ret = -EIO;
4183 		goto out;
4184 	}
4185 
4186 	if (!f2fs_is_compress_backend_ready(inode)) {
4187 		ret = -EOPNOTSUPP;
4188 		goto out;
4189 	}
4190 
4191 	if (iocb->ki_flags & IOCB_NOWAIT) {
4192 		if (!inode_trylock(inode)) {
4193 			ret = -EAGAIN;
4194 			goto out;
4195 		}
4196 	} else {
4197 		inode_lock(inode);
4198 	}
4199 
4200 	if (unlikely(IS_IMMUTABLE(inode))) {
4201 		ret = -EPERM;
4202 		goto unlock;
4203 	}
4204 
4205 	ret = generic_write_checks(iocb, from);
4206 	if (ret > 0) {
4207 		bool preallocated = false;
4208 		size_t target_size = 0;
4209 		int err;
4210 
4211 		if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4212 			set_inode_flag(inode, FI_NO_PREALLOC);
4213 
4214 		if ((iocb->ki_flags & IOCB_NOWAIT)) {
4215 			if (!f2fs_overwrite_io(inode, iocb->ki_pos,
4216 						iov_iter_count(from)) ||
4217 				f2fs_has_inline_data(inode) ||
4218 				f2fs_force_buffered_io(inode, iocb, from)) {
4219 				clear_inode_flag(inode, FI_NO_PREALLOC);
4220 				inode_unlock(inode);
4221 				ret = -EAGAIN;
4222 				goto out;
4223 			}
4224 			goto write;
4225 		}
4226 
4227 		if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4228 			goto write;
4229 
4230 		if (iocb->ki_flags & IOCB_DIRECT) {
4231 			/*
4232 			 * Convert inline data for Direct I/O before entering
4233 			 * f2fs_direct_IO().
4234 			 */
4235 			err = f2fs_convert_inline_inode(inode);
4236 			if (err)
4237 				goto out_err;
4238 			/*
4239 			 * If force_buffere_io() is true, we have to allocate
4240 			 * blocks all the time, since f2fs_direct_IO will fall
4241 			 * back to buffered IO.
4242 			 */
4243 			if (!f2fs_force_buffered_io(inode, iocb, from) &&
4244 					allow_outplace_dio(inode, iocb, from))
4245 				goto write;
4246 		}
4247 		preallocated = true;
4248 		target_size = iocb->ki_pos + iov_iter_count(from);
4249 
4250 		err = f2fs_preallocate_blocks(iocb, from);
4251 		if (err) {
4252 out_err:
4253 			clear_inode_flag(inode, FI_NO_PREALLOC);
4254 			inode_unlock(inode);
4255 			ret = err;
4256 			goto out;
4257 		}
4258 write:
4259 		ret = __generic_file_write_iter(iocb, from);
4260 		clear_inode_flag(inode, FI_NO_PREALLOC);
4261 
4262 		/* if we couldn't write data, we should deallocate blocks. */
4263 		if (preallocated && i_size_read(inode) < target_size)
4264 			f2fs_truncate(inode);
4265 
4266 		if (ret > 0)
4267 			f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
4268 	}
4269 unlock:
4270 	inode_unlock(inode);
4271 out:
4272 	trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4273 					iov_iter_count(from), ret);
4274 	if (ret > 0)
4275 		ret = generic_write_sync(iocb, ret);
4276 	return ret;
4277 }
4278 
4279 #ifdef CONFIG_COMPAT
4280 struct compat_f2fs_gc_range {
4281 	u32 sync;
4282 	compat_u64 start;
4283 	compat_u64 len;
4284 };
4285 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE	_IOW(F2FS_IOCTL_MAGIC, 11,\
4286 						struct compat_f2fs_gc_range)
4287 
f2fs_compat_ioc_gc_range(struct file * file,unsigned long arg)4288 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4289 {
4290 	struct compat_f2fs_gc_range __user *urange;
4291 	struct f2fs_gc_range range;
4292 	int err;
4293 
4294 	urange = compat_ptr(arg);
4295 	err = get_user(range.sync, &urange->sync);
4296 	err |= get_user(range.start, &urange->start);
4297 	err |= get_user(range.len, &urange->len);
4298 	if (err)
4299 		return -EFAULT;
4300 
4301 	return __f2fs_ioc_gc_range(file, &range);
4302 }
4303 
4304 struct compat_f2fs_move_range {
4305 	u32 dst_fd;
4306 	compat_u64 pos_in;
4307 	compat_u64 pos_out;
4308 	compat_u64 len;
4309 };
4310 #define F2FS_IOC32_MOVE_RANGE		_IOWR(F2FS_IOCTL_MAGIC, 9,	\
4311 					struct compat_f2fs_move_range)
4312 
f2fs_compat_ioc_move_range(struct file * file,unsigned long arg)4313 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4314 {
4315 	struct compat_f2fs_move_range __user *urange;
4316 	struct f2fs_move_range range;
4317 	int err;
4318 
4319 	urange = compat_ptr(arg);
4320 	err = get_user(range.dst_fd, &urange->dst_fd);
4321 	err |= get_user(range.pos_in, &urange->pos_in);
4322 	err |= get_user(range.pos_out, &urange->pos_out);
4323 	err |= get_user(range.len, &urange->len);
4324 	if (err)
4325 		return -EFAULT;
4326 
4327 	return __f2fs_ioc_move_range(file, &range);
4328 }
4329 
f2fs_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)4330 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4331 {
4332 	if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4333 		return -EIO;
4334 	if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4335 		return -ENOSPC;
4336 
4337 	switch (cmd) {
4338 	case FS_IOC32_GETFLAGS:
4339 		cmd = FS_IOC_GETFLAGS;
4340 		break;
4341 	case FS_IOC32_SETFLAGS:
4342 		cmd = FS_IOC_SETFLAGS;
4343 		break;
4344 	case FS_IOC32_GETVERSION:
4345 		cmd = FS_IOC_GETVERSION;
4346 		break;
4347 	case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4348 		return f2fs_compat_ioc_gc_range(file, arg);
4349 	case F2FS_IOC32_MOVE_RANGE:
4350 		return f2fs_compat_ioc_move_range(file, arg);
4351 	case F2FS_IOC_START_ATOMIC_WRITE:
4352 	case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4353 	case F2FS_IOC_START_VOLATILE_WRITE:
4354 	case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4355 	case F2FS_IOC_ABORT_VOLATILE_WRITE:
4356 	case F2FS_IOC_SHUTDOWN:
4357 	case FITRIM:
4358 	case FS_IOC_SET_ENCRYPTION_POLICY:
4359 	case FS_IOC_GET_ENCRYPTION_PWSALT:
4360 	case FS_IOC_GET_ENCRYPTION_POLICY:
4361 	case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4362 	case FS_IOC_ADD_ENCRYPTION_KEY:
4363 	case FS_IOC_REMOVE_ENCRYPTION_KEY:
4364 	case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4365 	case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4366 	case FS_IOC_GET_ENCRYPTION_NONCE:
4367 	case F2FS_IOC_GARBAGE_COLLECT:
4368 	case F2FS_IOC_WRITE_CHECKPOINT:
4369 	case F2FS_IOC_DEFRAGMENT:
4370 	case F2FS_IOC_FLUSH_DEVICE:
4371 	case F2FS_IOC_GET_FEATURES:
4372 	case FS_IOC_FSGETXATTR:
4373 	case FS_IOC_FSSETXATTR:
4374 	case F2FS_IOC_GET_PIN_FILE:
4375 	case F2FS_IOC_SET_PIN_FILE:
4376 	case F2FS_IOC_PRECACHE_EXTENTS:
4377 	case F2FS_IOC_RESIZE_FS:
4378 	case FS_IOC_ENABLE_VERITY:
4379 	case FS_IOC_MEASURE_VERITY:
4380 	case FS_IOC_ENABLE_CODE_SIGN:
4381 	case FS_IOC_GETFSLABEL:
4382 	case FS_IOC_SETFSLABEL:
4383 	case F2FS_IOC_GET_COMPRESS_BLOCKS:
4384 	case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4385 	case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4386 	case F2FS_IOC_SEC_TRIM_FILE:
4387 		break;
4388 	default:
4389 		return -ENOIOCTLCMD;
4390 	}
4391 	return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4392 }
4393 #endif
4394 
4395 const struct file_operations f2fs_file_operations = {
4396 	.llseek		= f2fs_llseek,
4397 	.read_iter	= f2fs_file_read_iter,
4398 	.write_iter	= f2fs_file_write_iter,
4399 	.open		= f2fs_file_open,
4400 	.release	= f2fs_release_file,
4401 	.mmap		= f2fs_file_mmap,
4402 	.flush		= f2fs_file_flush,
4403 	.fsync		= f2fs_sync_file,
4404 	.fallocate	= f2fs_fallocate,
4405 	.unlocked_ioctl	= f2fs_ioctl,
4406 #ifdef CONFIG_COMPAT
4407 	.compat_ioctl	= f2fs_compat_ioctl,
4408 #endif
4409 	.splice_read	= generic_file_splice_read,
4410 	.splice_write	= iter_file_splice_write,
4411 };
4412