1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/file.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/stat.h>
11 #include <linux/buffer_head.h>
12 #include <linux/writeback.h>
13 #include <linux/blkdev.h>
14 #include <linux/falloc.h>
15 #include <linux/types.h>
16 #include <linux/compat.h>
17 #include <linux/uaccess.h>
18 #include <linux/mount.h>
19 #include <linux/pagevec.h>
20 #include <linux/uio.h>
21 #include <linux/uuid.h>
22 #include <linux/file.h>
23 #include <linux/nls.h>
24 #include <linux/sched/signal.h>
25 #include <linux/fadvise.h>
26 #include <linux/iomap.h>
27
28 #include "f2fs.h"
29 #include "node.h"
30 #include "segment.h"
31 #include "xattr.h"
32 #include "acl.h"
33 #include "gc.h"
34 #include "iostat.h"
35 #include <trace/events/f2fs.h>
36 #include <trace/events/android_fs.h>
37 #include <uapi/linux/f2fs.h>
38
f2fs_filemap_fault(struct vm_fault * vmf)39 static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
40 {
41 struct inode *inode = file_inode(vmf->vma->vm_file);
42 vm_fault_t ret;
43
44 f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
45 ret = filemap_fault(vmf);
46 f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
47
48 if (ret & VM_FAULT_LOCKED)
49 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
50 F2FS_BLKSIZE);
51
52 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
53
54 return ret;
55 }
56
f2fs_vm_page_mkwrite(struct vm_fault * vmf)57 static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
58 {
59 struct page *page = vmf->page;
60 struct inode *inode = file_inode(vmf->vma->vm_file);
61 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
62 struct dnode_of_data dn;
63 bool need_alloc = true;
64 int err = 0;
65
66 if (unlikely(IS_IMMUTABLE(inode)))
67 return VM_FAULT_SIGBUS;
68
69 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
70 return VM_FAULT_SIGBUS;
71
72 if (unlikely(f2fs_cp_error(sbi))) {
73 err = -EIO;
74 goto err;
75 }
76
77 if (!f2fs_is_checkpoint_ready(sbi)) {
78 err = -ENOSPC;
79 goto err;
80 }
81
82 err = f2fs_convert_inline_inode(inode);
83 if (err)
84 goto err;
85
86 #ifdef CONFIG_F2FS_FS_COMPRESSION
87 if (f2fs_compressed_file(inode)) {
88 int ret = f2fs_is_compressed_cluster(inode, page->index);
89
90 if (ret < 0) {
91 err = ret;
92 goto err;
93 } else if (ret) {
94 need_alloc = false;
95 }
96 }
97 #endif
98 /* should do out of any locked page */
99 if (need_alloc)
100 f2fs_balance_fs(sbi, true);
101
102 sb_start_pagefault(inode->i_sb);
103
104 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
105
106 file_update_time(vmf->vma->vm_file);
107 f2fs_down_read(&F2FS_I(inode)->i_mmap_sem);
108 lock_page(page);
109 if (unlikely(page->mapping != inode->i_mapping ||
110 page_offset(page) > i_size_read(inode) ||
111 !PageUptodate(page))) {
112 unlock_page(page);
113 err = -EFAULT;
114 goto out_sem;
115 }
116
117 if (need_alloc) {
118 /* block allocation */
119 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
120 set_new_dnode(&dn, inode, NULL, NULL, 0);
121 err = f2fs_get_block(&dn, page->index);
122 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
123 }
124
125 #ifdef CONFIG_F2FS_FS_COMPRESSION
126 if (!need_alloc) {
127 set_new_dnode(&dn, inode, NULL, NULL, 0);
128 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
129 f2fs_put_dnode(&dn);
130 }
131 #endif
132 if (err) {
133 unlock_page(page);
134 goto out_sem;
135 }
136
137 f2fs_wait_on_page_writeback(page, DATA, false, true);
138
139 /* wait for GCed page writeback via META_MAPPING */
140 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
141
142 /*
143 * check to see if the page is mapped already (no holes)
144 */
145 if (PageMappedToDisk(page))
146 goto out_sem;
147
148 /* page is wholly or partially inside EOF */
149 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
150 i_size_read(inode)) {
151 loff_t offset;
152
153 offset = i_size_read(inode) & ~PAGE_MASK;
154 zero_user_segment(page, offset, PAGE_SIZE);
155 }
156 set_page_dirty(page);
157 if (!PageUptodate(page))
158 SetPageUptodate(page);
159
160 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
161 f2fs_update_time(sbi, REQ_TIME);
162
163 trace_f2fs_vm_page_mkwrite(page, DATA);
164 out_sem:
165 f2fs_up_read(&F2FS_I(inode)->i_mmap_sem);
166
167 sb_end_pagefault(inode->i_sb);
168 err:
169 return block_page_mkwrite_return(err);
170 }
171
172 static const struct vm_operations_struct f2fs_file_vm_ops = {
173 .fault = f2fs_filemap_fault,
174 .map_pages = filemap_map_pages,
175 .page_mkwrite = f2fs_vm_page_mkwrite,
176 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
177 .allow_speculation = filemap_allow_speculation,
178 #endif
179 };
180
get_parent_ino(struct inode * inode,nid_t * pino)181 static int get_parent_ino(struct inode *inode, nid_t *pino)
182 {
183 struct dentry *dentry;
184
185 /*
186 * Make sure to get the non-deleted alias. The alias associated with
187 * the open file descriptor being fsync()'ed may be deleted already.
188 */
189 dentry = d_find_alias(inode);
190 if (!dentry)
191 return 0;
192
193 *pino = parent_ino(dentry);
194 dput(dentry);
195 return 1;
196 }
197
need_do_checkpoint(struct inode * inode)198 static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
199 {
200 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
201 enum cp_reason_type cp_reason = CP_NO_NEEDED;
202
203 if (!S_ISREG(inode->i_mode))
204 cp_reason = CP_NON_REGULAR;
205 else if (f2fs_compressed_file(inode))
206 cp_reason = CP_COMPRESSED;
207 else if (inode->i_nlink != 1)
208 cp_reason = CP_HARDLINK;
209 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
210 cp_reason = CP_SB_NEED_CP;
211 else if (file_wrong_pino(inode))
212 cp_reason = CP_WRONG_PINO;
213 else if (!f2fs_space_for_roll_forward(sbi))
214 cp_reason = CP_NO_SPC_ROLL;
215 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
216 cp_reason = CP_NODE_NEED_CP;
217 else if (test_opt(sbi, FASTBOOT))
218 cp_reason = CP_FASTBOOT_MODE;
219 else if (F2FS_OPTION(sbi).active_logs == 2)
220 cp_reason = CP_SPEC_LOG_NUM;
221 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
222 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
223 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
224 TRANS_DIR_INO))
225 cp_reason = CP_RECOVER_DIR;
226
227 return cp_reason;
228 }
229
need_inode_page_update(struct f2fs_sb_info * sbi,nid_t ino)230 static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
231 {
232 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
233 bool ret = false;
234 /* But we need to avoid that there are some inode updates */
235 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
236 ret = true;
237 f2fs_put_page(i, 0);
238 return ret;
239 }
240
try_to_fix_pino(struct inode * inode)241 static void try_to_fix_pino(struct inode *inode)
242 {
243 struct f2fs_inode_info *fi = F2FS_I(inode);
244 nid_t pino;
245
246 f2fs_down_write(&fi->i_sem);
247 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
248 get_parent_ino(inode, &pino)) {
249 f2fs_i_pino_write(inode, pino);
250 file_got_pino(inode);
251 }
252 f2fs_up_write(&fi->i_sem);
253 }
254
f2fs_do_sync_file(struct file * file,loff_t start,loff_t end,int datasync,bool atomic)255 static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
256 int datasync, bool atomic)
257 {
258 struct inode *inode = file->f_mapping->host;
259 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
260 nid_t ino = inode->i_ino;
261 int ret = 0;
262 enum cp_reason_type cp_reason = 0;
263 struct writeback_control wbc = {
264 .sync_mode = WB_SYNC_ALL,
265 .nr_to_write = LONG_MAX,
266 .for_reclaim = 0,
267 };
268 unsigned int seq_id = 0;
269
270 if (unlikely(f2fs_readonly(inode->i_sb)))
271 return 0;
272
273 trace_f2fs_sync_file_enter(inode);
274
275 if (S_ISDIR(inode->i_mode))
276 goto go_write;
277
278 /* if fdatasync is triggered, let's do in-place-update */
279 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
280 set_inode_flag(inode, FI_NEED_IPU);
281 ret = file_write_and_wait_range(file, start, end);
282 clear_inode_flag(inode, FI_NEED_IPU);
283
284 if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
285 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
286 return ret;
287 }
288
289 /* if the inode is dirty, let's recover all the time */
290 if (!f2fs_skip_inode_update(inode, datasync)) {
291 f2fs_write_inode(inode, NULL);
292 goto go_write;
293 }
294
295 /*
296 * if there is no written data, don't waste time to write recovery info.
297 */
298 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
299 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
300
301 /* it may call write_inode just prior to fsync */
302 if (need_inode_page_update(sbi, ino))
303 goto go_write;
304
305 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
306 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
307 goto flush_out;
308 goto out;
309 } else {
310 /*
311 * for OPU case, during fsync(), node can be persisted before
312 * data when lower device doesn't support write barrier, result
313 * in data corruption after SPO.
314 * So for strict fsync mode, force to use atomic write sematics
315 * to keep write order in between data/node and last node to
316 * avoid potential data corruption.
317 */
318 if (F2FS_OPTION(sbi).fsync_mode ==
319 FSYNC_MODE_STRICT && !atomic)
320 atomic = true;
321 }
322 go_write:
323 /*
324 * Both of fdatasync() and fsync() are able to be recovered from
325 * sudden-power-off.
326 */
327 f2fs_down_read(&F2FS_I(inode)->i_sem);
328 cp_reason = need_do_checkpoint(inode);
329 f2fs_up_read(&F2FS_I(inode)->i_sem);
330
331 if (cp_reason) {
332 /* all the dirty node pages should be flushed for POR */
333 ret = f2fs_sync_fs(inode->i_sb, 1);
334
335 /*
336 * We've secured consistency through sync_fs. Following pino
337 * will be used only for fsynced inodes after checkpoint.
338 */
339 try_to_fix_pino(inode);
340 clear_inode_flag(inode, FI_APPEND_WRITE);
341 clear_inode_flag(inode, FI_UPDATE_WRITE);
342 goto out;
343 }
344 sync_nodes:
345 atomic_inc(&sbi->wb_sync_req[NODE]);
346 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
347 atomic_dec(&sbi->wb_sync_req[NODE]);
348 if (ret)
349 goto out;
350
351 /* if cp_error was enabled, we should avoid infinite loop */
352 if (unlikely(f2fs_cp_error(sbi))) {
353 ret = -EIO;
354 goto out;
355 }
356
357 if (f2fs_need_inode_block_update(sbi, ino)) {
358 f2fs_mark_inode_dirty_sync(inode, true);
359 f2fs_write_inode(inode, NULL);
360 goto sync_nodes;
361 }
362
363 /*
364 * If it's atomic_write, it's just fine to keep write ordering. So
365 * here we don't need to wait for node write completion, since we use
366 * node chain which serializes node blocks. If one of node writes are
367 * reordered, we can see simply broken chain, resulting in stopping
368 * roll-forward recovery. It means we'll recover all or none node blocks
369 * given fsync mark.
370 */
371 if (!atomic) {
372 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
373 if (ret)
374 goto out;
375 }
376
377 /* once recovery info is written, don't need to tack this */
378 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
379 clear_inode_flag(inode, FI_APPEND_WRITE);
380 flush_out:
381 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
382 ret = f2fs_issue_flush(sbi, inode->i_ino);
383 if (!ret) {
384 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
385 clear_inode_flag(inode, FI_UPDATE_WRITE);
386 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
387 }
388 f2fs_update_time(sbi, REQ_TIME);
389 out:
390 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
391 return ret;
392 }
393
f2fs_sync_file(struct file * file,loff_t start,loff_t end,int datasync)394 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
395 {
396 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
397 return -EIO;
398 return f2fs_do_sync_file(file, start, end, datasync, false);
399 }
400
__found_offset(struct address_space * mapping,block_t blkaddr,pgoff_t index,int whence)401 static bool __found_offset(struct address_space *mapping, block_t blkaddr,
402 pgoff_t index, int whence)
403 {
404 switch (whence) {
405 case SEEK_DATA:
406 if (__is_valid_data_blkaddr(blkaddr))
407 return true;
408 if (blkaddr == NEW_ADDR &&
409 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
410 return true;
411 break;
412 case SEEK_HOLE:
413 if (blkaddr == NULL_ADDR)
414 return true;
415 break;
416 }
417 return false;
418 }
419
f2fs_seek_block(struct file * file,loff_t offset,int whence)420 static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
421 {
422 struct inode *inode = file->f_mapping->host;
423 loff_t maxbytes = inode->i_sb->s_maxbytes;
424 struct dnode_of_data dn;
425 pgoff_t pgofs, end_offset;
426 loff_t data_ofs = offset;
427 loff_t isize;
428 int err = 0;
429
430 inode_lock(inode);
431
432 isize = i_size_read(inode);
433 if (offset >= isize)
434 goto fail;
435
436 /* handle inline data case */
437 if (f2fs_has_inline_data(inode)) {
438 if (whence == SEEK_HOLE) {
439 data_ofs = isize;
440 goto found;
441 } else if (whence == SEEK_DATA) {
442 data_ofs = offset;
443 goto found;
444 }
445 }
446
447 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
448
449 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
450 set_new_dnode(&dn, inode, NULL, NULL, 0);
451 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
452 if (err && err != -ENOENT) {
453 goto fail;
454 } else if (err == -ENOENT) {
455 /* direct node does not exists */
456 if (whence == SEEK_DATA) {
457 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
458 continue;
459 } else {
460 goto found;
461 }
462 }
463
464 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
465
466 /* find data/hole in dnode block */
467 for (; dn.ofs_in_node < end_offset;
468 dn.ofs_in_node++, pgofs++,
469 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
470 block_t blkaddr;
471
472 blkaddr = f2fs_data_blkaddr(&dn);
473
474 if (__is_valid_data_blkaddr(blkaddr) &&
475 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
476 blkaddr, DATA_GENERIC_ENHANCE)) {
477 f2fs_put_dnode(&dn);
478 goto fail;
479 }
480
481 if (__found_offset(file->f_mapping, blkaddr,
482 pgofs, whence)) {
483 f2fs_put_dnode(&dn);
484 goto found;
485 }
486 }
487 f2fs_put_dnode(&dn);
488 }
489
490 if (whence == SEEK_DATA)
491 goto fail;
492 found:
493 if (whence == SEEK_HOLE && data_ofs > isize)
494 data_ofs = isize;
495 inode_unlock(inode);
496 return vfs_setpos(file, data_ofs, maxbytes);
497 fail:
498 inode_unlock(inode);
499 return -ENXIO;
500 }
501
f2fs_llseek(struct file * file,loff_t offset,int whence)502 static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
503 {
504 struct inode *inode = file->f_mapping->host;
505 loff_t maxbytes = inode->i_sb->s_maxbytes;
506
507 if (f2fs_compressed_file(inode))
508 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
509
510 switch (whence) {
511 case SEEK_SET:
512 case SEEK_CUR:
513 case SEEK_END:
514 return generic_file_llseek_size(file, offset, whence,
515 maxbytes, i_size_read(inode));
516 case SEEK_DATA:
517 case SEEK_HOLE:
518 if (offset < 0)
519 return -ENXIO;
520 return f2fs_seek_block(file, offset, whence);
521 }
522
523 return -EINVAL;
524 }
525
f2fs_file_mmap(struct file * file,struct vm_area_struct * vma)526 static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
527 {
528 struct inode *inode = file_inode(file);
529
530 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
531 return -EIO;
532
533 if (!f2fs_is_compress_backend_ready(inode))
534 return -EOPNOTSUPP;
535
536 file_accessed(file);
537 vma->vm_ops = &f2fs_file_vm_ops;
538 set_inode_flag(inode, FI_MMAP_FILE);
539 return 0;
540 }
541
f2fs_file_open(struct inode * inode,struct file * filp)542 static int f2fs_file_open(struct inode *inode, struct file *filp)
543 {
544 int err = fscrypt_file_open(inode, filp);
545
546 if (err)
547 return err;
548
549 if (!f2fs_is_compress_backend_ready(inode))
550 return -EOPNOTSUPP;
551
552 err = fsverity_file_open(inode, filp);
553 if (err)
554 return err;
555
556 filp->f_mode |= FMODE_NOWAIT;
557
558 return dquot_file_open(inode, filp);
559 }
560
f2fs_truncate_data_blocks_range(struct dnode_of_data * dn,int count)561 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
562 {
563 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
564 struct f2fs_node *raw_node;
565 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
566 __le32 *addr;
567 int base = 0;
568 bool compressed_cluster = false;
569 int cluster_index = 0, valid_blocks = 0;
570 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
571 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
572
573 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
574 base = get_extra_isize(dn->inode);
575
576 raw_node = F2FS_NODE(dn->node_page);
577 addr = blkaddr_in_node(raw_node) + base + ofs;
578
579 /* Assumption: truncateion starts with cluster */
580 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
581 block_t blkaddr = le32_to_cpu(*addr);
582
583 if (f2fs_compressed_file(dn->inode) &&
584 !(cluster_index & (cluster_size - 1))) {
585 if (compressed_cluster)
586 f2fs_i_compr_blocks_update(dn->inode,
587 valid_blocks, false);
588 compressed_cluster = (blkaddr == COMPRESS_ADDR);
589 valid_blocks = 0;
590 }
591
592 if (blkaddr == NULL_ADDR)
593 continue;
594
595 dn->data_blkaddr = NULL_ADDR;
596 f2fs_set_data_blkaddr(dn);
597
598 if (__is_valid_data_blkaddr(blkaddr)) {
599 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
600 DATA_GENERIC_ENHANCE))
601 continue;
602 if (compressed_cluster)
603 valid_blocks++;
604 }
605
606 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
607 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
608
609 f2fs_invalidate_blocks(sbi, blkaddr);
610
611 if (!released || blkaddr != COMPRESS_ADDR)
612 nr_free++;
613 }
614
615 if (compressed_cluster)
616 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
617
618 if (nr_free) {
619 pgoff_t fofs;
620 /*
621 * once we invalidate valid blkaddr in range [ofs, ofs + count],
622 * we will invalidate all blkaddr in the whole range.
623 */
624 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
625 dn->inode) + ofs;
626 f2fs_update_extent_cache_range(dn, fofs, 0, len);
627 dec_valid_block_count(sbi, dn->inode, nr_free);
628 }
629 dn->ofs_in_node = ofs;
630
631 f2fs_update_time(sbi, REQ_TIME);
632 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
633 dn->ofs_in_node, nr_free);
634 }
635
f2fs_truncate_data_blocks(struct dnode_of_data * dn)636 void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
637 {
638 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
639 }
640
truncate_partial_data_page(struct inode * inode,u64 from,bool cache_only)641 static int truncate_partial_data_page(struct inode *inode, u64 from,
642 bool cache_only)
643 {
644 loff_t offset = from & (PAGE_SIZE - 1);
645 pgoff_t index = from >> PAGE_SHIFT;
646 struct address_space *mapping = inode->i_mapping;
647 struct page *page;
648
649 if (!offset && !cache_only)
650 return 0;
651
652 if (cache_only) {
653 page = find_lock_page(mapping, index);
654 if (page && PageUptodate(page))
655 goto truncate_out;
656 f2fs_put_page(page, 1);
657 return 0;
658 }
659
660 page = f2fs_get_lock_data_page(inode, index, true);
661 if (IS_ERR(page))
662 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
663 truncate_out:
664 f2fs_wait_on_page_writeback(page, DATA, true, true);
665 zero_user(page, offset, PAGE_SIZE - offset);
666
667 /* An encrypted inode should have a key and truncate the last page. */
668 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
669 if (!cache_only)
670 set_page_dirty(page);
671 f2fs_put_page(page, 1);
672 return 0;
673 }
674
f2fs_do_truncate_blocks(struct inode * inode,u64 from,bool lock)675 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
676 {
677 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
678 struct dnode_of_data dn;
679 pgoff_t free_from;
680 int count = 0, err = 0;
681 struct page *ipage;
682 bool truncate_page = false;
683
684 trace_f2fs_truncate_blocks_enter(inode, from);
685
686 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
687
688 if (free_from >= max_file_blocks(inode))
689 goto free_partial;
690
691 if (lock)
692 f2fs_lock_op(sbi);
693
694 ipage = f2fs_get_node_page(sbi, inode->i_ino);
695 if (IS_ERR(ipage)) {
696 err = PTR_ERR(ipage);
697 goto out;
698 }
699
700 if (f2fs_has_inline_data(inode)) {
701 f2fs_truncate_inline_inode(inode, ipage, from);
702 f2fs_put_page(ipage, 1);
703 truncate_page = true;
704 goto out;
705 }
706
707 set_new_dnode(&dn, inode, ipage, NULL, 0);
708 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
709 if (err) {
710 if (err == -ENOENT)
711 goto free_next;
712 goto out;
713 }
714
715 count = ADDRS_PER_PAGE(dn.node_page, inode);
716
717 count -= dn.ofs_in_node;
718 f2fs_bug_on(sbi, count < 0);
719
720 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
721 f2fs_truncate_data_blocks_range(&dn, count);
722 free_from += count;
723 }
724
725 f2fs_put_dnode(&dn);
726 free_next:
727 err = f2fs_truncate_inode_blocks(inode, free_from);
728 out:
729 if (lock)
730 f2fs_unlock_op(sbi);
731 free_partial:
732 /* lastly zero out the first data page */
733 if (!err)
734 err = truncate_partial_data_page(inode, from, truncate_page);
735
736 trace_f2fs_truncate_blocks_exit(inode, err);
737 return err;
738 }
739
f2fs_truncate_blocks(struct inode * inode,u64 from,bool lock)740 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
741 {
742 u64 free_from = from;
743 int err;
744
745 #ifdef CONFIG_F2FS_FS_COMPRESSION
746 /*
747 * for compressed file, only support cluster size
748 * aligned truncation.
749 */
750 if (f2fs_compressed_file(inode))
751 free_from = round_up(from,
752 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
753 #endif
754
755 err = f2fs_do_truncate_blocks(inode, free_from, lock);
756 if (err)
757 return err;
758
759 #ifdef CONFIG_F2FS_FS_COMPRESSION
760 /*
761 * For compressed file, after release compress blocks, don't allow write
762 * direct, but we should allow write direct after truncate to zero.
763 */
764 if (f2fs_compressed_file(inode) && !free_from
765 && is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
766 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
767
768 if (from != free_from) {
769 err = f2fs_truncate_partial_cluster(inode, from, lock);
770 if (err)
771 return err;
772 }
773 #endif
774
775 return 0;
776 }
777
f2fs_truncate(struct inode * inode)778 int f2fs_truncate(struct inode *inode)
779 {
780 int err;
781
782 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
783 return -EIO;
784
785 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
786 S_ISLNK(inode->i_mode)))
787 return 0;
788
789 trace_f2fs_truncate(inode);
790
791 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
792 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
793 return -EIO;
794 }
795
796 err = f2fs_dquot_initialize(inode);
797 if (err)
798 return err;
799
800 /* we should check inline_data size */
801 if (!f2fs_may_inline_data(inode)) {
802 err = f2fs_convert_inline_inode(inode);
803 if (err)
804 return err;
805 }
806
807 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
808 if (err)
809 return err;
810
811 inode->i_mtime = inode->i_ctime = current_time(inode);
812 f2fs_mark_inode_dirty_sync(inode, false);
813 return 0;
814 }
815
f2fs_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)816 int f2fs_getattr(const struct path *path, struct kstat *stat,
817 u32 request_mask, unsigned int query_flags)
818 {
819 struct inode *inode = d_inode(path->dentry);
820 struct f2fs_inode_info *fi = F2FS_I(inode);
821 struct f2fs_inode *ri = NULL;
822 unsigned int flags;
823
824 if (f2fs_has_extra_attr(inode) &&
825 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
826 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
827 stat->result_mask |= STATX_BTIME;
828 stat->btime.tv_sec = fi->i_crtime.tv_sec;
829 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
830 }
831
832 flags = fi->i_flags;
833 if (flags & F2FS_COMPR_FL)
834 stat->attributes |= STATX_ATTR_COMPRESSED;
835 if (flags & F2FS_APPEND_FL)
836 stat->attributes |= STATX_ATTR_APPEND;
837 if (IS_ENCRYPTED(inode))
838 stat->attributes |= STATX_ATTR_ENCRYPTED;
839 if (flags & F2FS_IMMUTABLE_FL)
840 stat->attributes |= STATX_ATTR_IMMUTABLE;
841 if (flags & F2FS_NODUMP_FL)
842 stat->attributes |= STATX_ATTR_NODUMP;
843 if (IS_VERITY(inode))
844 stat->attributes |= STATX_ATTR_VERITY;
845
846 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
847 STATX_ATTR_APPEND |
848 STATX_ATTR_ENCRYPTED |
849 STATX_ATTR_IMMUTABLE |
850 STATX_ATTR_NODUMP |
851 STATX_ATTR_VERITY);
852
853 generic_fillattr(inode, stat);
854
855 /* we need to show initial sectors used for inline_data/dentries */
856 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
857 f2fs_has_inline_dentry(inode))
858 stat->blocks += (stat->size + 511) >> 9;
859
860 return 0;
861 }
862
863 #ifdef CONFIG_F2FS_FS_POSIX_ACL
__setattr_copy(struct inode * inode,const struct iattr * attr)864 static void __setattr_copy(struct inode *inode, const struct iattr *attr)
865 {
866 unsigned int ia_valid = attr->ia_valid;
867
868 if (ia_valid & ATTR_UID)
869 inode->i_uid = attr->ia_uid;
870 if (ia_valid & ATTR_GID)
871 inode->i_gid = attr->ia_gid;
872 if (ia_valid & ATTR_ATIME)
873 inode->i_atime = attr->ia_atime;
874 if (ia_valid & ATTR_MTIME)
875 inode->i_mtime = attr->ia_mtime;
876 if (ia_valid & ATTR_CTIME)
877 inode->i_ctime = attr->ia_ctime;
878 if (ia_valid & ATTR_MODE) {
879 umode_t mode = attr->ia_mode;
880
881 if (!in_group_p(inode->i_gid) &&
882 !capable_wrt_inode_uidgid(inode, CAP_FSETID))
883 mode &= ~S_ISGID;
884 set_acl_inode(inode, mode);
885 }
886 }
887 #else
888 #define __setattr_copy setattr_copy
889 #endif
890
f2fs_setattr(struct dentry * dentry,struct iattr * attr)891 int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
892 {
893 struct inode *inode = d_inode(dentry);
894 int err;
895
896 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
897 return -EIO;
898
899 if (unlikely(IS_IMMUTABLE(inode)))
900 return -EPERM;
901
902 if (unlikely(IS_APPEND(inode) &&
903 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
904 ATTR_GID | ATTR_TIMES_SET))))
905 return -EPERM;
906
907 if ((attr->ia_valid & ATTR_SIZE) &&
908 !f2fs_is_compress_backend_ready(inode))
909 return -EOPNOTSUPP;
910
911 err = setattr_prepare(dentry, attr);
912 if (err)
913 return err;
914
915 err = fscrypt_prepare_setattr(dentry, attr);
916 if (err)
917 return err;
918
919 err = fsverity_prepare_setattr(dentry, attr);
920 if (err)
921 return err;
922
923 if (is_quota_modification(inode, attr)) {
924 err = f2fs_dquot_initialize(inode);
925 if (err)
926 return err;
927 }
928 if ((attr->ia_valid & ATTR_UID &&
929 !uid_eq(attr->ia_uid, inode->i_uid)) ||
930 (attr->ia_valid & ATTR_GID &&
931 !gid_eq(attr->ia_gid, inode->i_gid))) {
932 f2fs_lock_op(F2FS_I_SB(inode));
933 err = dquot_transfer(inode, attr);
934 if (err) {
935 set_sbi_flag(F2FS_I_SB(inode),
936 SBI_QUOTA_NEED_REPAIR);
937 f2fs_unlock_op(F2FS_I_SB(inode));
938 return err;
939 }
940 /*
941 * update uid/gid under lock_op(), so that dquot and inode can
942 * be updated atomically.
943 */
944 if (attr->ia_valid & ATTR_UID)
945 inode->i_uid = attr->ia_uid;
946 if (attr->ia_valid & ATTR_GID)
947 inode->i_gid = attr->ia_gid;
948 f2fs_mark_inode_dirty_sync(inode, true);
949 f2fs_unlock_op(F2FS_I_SB(inode));
950 }
951
952 if (attr->ia_valid & ATTR_SIZE) {
953 loff_t old_size = i_size_read(inode);
954
955 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
956 /*
957 * should convert inline inode before i_size_write to
958 * keep smaller than inline_data size with inline flag.
959 */
960 err = f2fs_convert_inline_inode(inode);
961 if (err)
962 return err;
963 }
964
965 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
966 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
967
968 truncate_setsize(inode, attr->ia_size);
969
970 if (attr->ia_size <= old_size)
971 err = f2fs_truncate(inode);
972 /*
973 * do not trim all blocks after i_size if target size is
974 * larger than i_size.
975 */
976 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
977 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
978 if (err)
979 return err;
980
981 spin_lock(&F2FS_I(inode)->i_size_lock);
982 inode->i_mtime = inode->i_ctime = current_time(inode);
983 F2FS_I(inode)->last_disk_size = i_size_read(inode);
984 spin_unlock(&F2FS_I(inode)->i_size_lock);
985 }
986
987 __setattr_copy(inode, attr);
988
989 if (attr->ia_valid & ATTR_MODE) {
990 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
991
992 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
993 if (!err)
994 inode->i_mode = F2FS_I(inode)->i_acl_mode;
995 clear_inode_flag(inode, FI_ACL_MODE);
996 }
997 }
998
999 /* file size may changed here */
1000 f2fs_mark_inode_dirty_sync(inode, true);
1001
1002 /* inode change will produce dirty node pages flushed by checkpoint */
1003 f2fs_balance_fs(F2FS_I_SB(inode), true);
1004
1005 return err;
1006 }
1007
1008 const struct inode_operations f2fs_file_inode_operations = {
1009 .getattr = f2fs_getattr,
1010 .setattr = f2fs_setattr,
1011 .get_acl = f2fs_get_acl,
1012 .set_acl = f2fs_set_acl,
1013 .listxattr = f2fs_listxattr,
1014 .fiemap = f2fs_fiemap,
1015 };
1016
fill_zero(struct inode * inode,pgoff_t index,loff_t start,loff_t len)1017 static int fill_zero(struct inode *inode, pgoff_t index,
1018 loff_t start, loff_t len)
1019 {
1020 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1021 struct page *page;
1022
1023 if (!len)
1024 return 0;
1025
1026 f2fs_balance_fs(sbi, true);
1027
1028 f2fs_lock_op(sbi);
1029 page = f2fs_get_new_data_page(inode, NULL, index, false);
1030 f2fs_unlock_op(sbi);
1031
1032 if (IS_ERR(page))
1033 return PTR_ERR(page);
1034
1035 f2fs_wait_on_page_writeback(page, DATA, true, true);
1036 zero_user(page, start, len);
1037 set_page_dirty(page);
1038 f2fs_put_page(page, 1);
1039 return 0;
1040 }
1041
f2fs_truncate_hole(struct inode * inode,pgoff_t pg_start,pgoff_t pg_end)1042 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
1043 {
1044 int err;
1045
1046 while (pg_start < pg_end) {
1047 struct dnode_of_data dn;
1048 pgoff_t end_offset, count;
1049
1050 set_new_dnode(&dn, inode, NULL, NULL, 0);
1051 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
1052 if (err) {
1053 if (err == -ENOENT) {
1054 pg_start = f2fs_get_next_page_offset(&dn,
1055 pg_start);
1056 continue;
1057 }
1058 return err;
1059 }
1060
1061 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1062 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1063
1064 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1065
1066 f2fs_truncate_data_blocks_range(&dn, count);
1067 f2fs_put_dnode(&dn);
1068
1069 pg_start += count;
1070 }
1071 return 0;
1072 }
1073
punch_hole(struct inode * inode,loff_t offset,loff_t len)1074 static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
1075 {
1076 pgoff_t pg_start, pg_end;
1077 loff_t off_start, off_end;
1078 int ret;
1079
1080 ret = f2fs_convert_inline_inode(inode);
1081 if (ret)
1082 return ret;
1083
1084 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1085 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1086
1087 off_start = offset & (PAGE_SIZE - 1);
1088 off_end = (offset + len) & (PAGE_SIZE - 1);
1089
1090 if (pg_start == pg_end) {
1091 ret = fill_zero(inode, pg_start, off_start,
1092 off_end - off_start);
1093 if (ret)
1094 return ret;
1095 } else {
1096 if (off_start) {
1097 ret = fill_zero(inode, pg_start++, off_start,
1098 PAGE_SIZE - off_start);
1099 if (ret)
1100 return ret;
1101 }
1102 if (off_end) {
1103 ret = fill_zero(inode, pg_end, 0, off_end);
1104 if (ret)
1105 return ret;
1106 }
1107
1108 if (pg_start < pg_end) {
1109 loff_t blk_start, blk_end;
1110 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1111
1112 f2fs_balance_fs(sbi, true);
1113
1114 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1115 blk_end = (loff_t)pg_end << PAGE_SHIFT;
1116
1117 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1118 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1119
1120 truncate_pagecache_range(inode, blk_start, blk_end - 1);
1121
1122 f2fs_lock_op(sbi);
1123 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
1124 f2fs_unlock_op(sbi);
1125
1126 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1127 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1128 }
1129 }
1130
1131 return ret;
1132 }
1133
__read_out_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,pgoff_t len)1134 static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1135 int *do_replace, pgoff_t off, pgoff_t len)
1136 {
1137 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1138 struct dnode_of_data dn;
1139 int ret, done, i;
1140
1141 next_dnode:
1142 set_new_dnode(&dn, inode, NULL, NULL, 0);
1143 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
1144 if (ret && ret != -ENOENT) {
1145 return ret;
1146 } else if (ret == -ENOENT) {
1147 if (dn.max_level == 0)
1148 return -ENOENT;
1149 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1150 dn.ofs_in_node, len);
1151 blkaddr += done;
1152 do_replace += done;
1153 goto next;
1154 }
1155
1156 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1157 dn.ofs_in_node, len);
1158 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
1159 *blkaddr = f2fs_data_blkaddr(&dn);
1160
1161 if (__is_valid_data_blkaddr(*blkaddr) &&
1162 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1163 DATA_GENERIC_ENHANCE)) {
1164 f2fs_put_dnode(&dn);
1165 return -EFSCORRUPTED;
1166 }
1167
1168 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
1169
1170 if (f2fs_lfs_mode(sbi)) {
1171 f2fs_put_dnode(&dn);
1172 return -EOPNOTSUPP;
1173 }
1174
1175 /* do not invalidate this block address */
1176 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
1177 *do_replace = 1;
1178 }
1179 }
1180 f2fs_put_dnode(&dn);
1181 next:
1182 len -= done;
1183 off += done;
1184 if (len)
1185 goto next_dnode;
1186 return 0;
1187 }
1188
__roll_back_blkaddrs(struct inode * inode,block_t * blkaddr,int * do_replace,pgoff_t off,int len)1189 static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1190 int *do_replace, pgoff_t off, int len)
1191 {
1192 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1193 struct dnode_of_data dn;
1194 int ret, i;
1195
1196 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1197 if (*do_replace == 0)
1198 continue;
1199
1200 set_new_dnode(&dn, inode, NULL, NULL, 0);
1201 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
1202 if (ret) {
1203 dec_valid_block_count(sbi, inode, 1);
1204 f2fs_invalidate_blocks(sbi, *blkaddr);
1205 } else {
1206 f2fs_update_data_blkaddr(&dn, *blkaddr);
1207 }
1208 f2fs_put_dnode(&dn);
1209 }
1210 return 0;
1211 }
1212
__clone_blkaddrs(struct inode * src_inode,struct inode * dst_inode,block_t * blkaddr,int * do_replace,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1213 static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1214 block_t *blkaddr, int *do_replace,
1215 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1216 {
1217 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1218 pgoff_t i = 0;
1219 int ret;
1220
1221 while (i < len) {
1222 if (blkaddr[i] == NULL_ADDR && !full) {
1223 i++;
1224 continue;
1225 }
1226
1227 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1228 struct dnode_of_data dn;
1229 struct node_info ni;
1230 size_t new_size;
1231 pgoff_t ilen;
1232
1233 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
1234 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
1235 if (ret)
1236 return ret;
1237
1238 ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
1239 if (ret) {
1240 f2fs_put_dnode(&dn);
1241 return ret;
1242 }
1243
1244 ilen = min((pgoff_t)
1245 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1246 dn.ofs_in_node, len - i);
1247 do {
1248 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
1249 f2fs_truncate_data_blocks_range(&dn, 1);
1250
1251 if (do_replace[i]) {
1252 f2fs_i_blocks_write(src_inode,
1253 1, false, false);
1254 f2fs_i_blocks_write(dst_inode,
1255 1, true, false);
1256 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1257 blkaddr[i], ni.version, true, false);
1258
1259 do_replace[i] = 0;
1260 }
1261 dn.ofs_in_node++;
1262 i++;
1263 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
1264 if (dst_inode->i_size < new_size)
1265 f2fs_i_size_write(dst_inode, new_size);
1266 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
1267
1268 f2fs_put_dnode(&dn);
1269 } else {
1270 struct page *psrc, *pdst;
1271
1272 psrc = f2fs_get_lock_data_page(src_inode,
1273 src + i, true);
1274 if (IS_ERR(psrc))
1275 return PTR_ERR(psrc);
1276 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
1277 true);
1278 if (IS_ERR(pdst)) {
1279 f2fs_put_page(psrc, 1);
1280 return PTR_ERR(pdst);
1281 }
1282 f2fs_copy_page(psrc, pdst);
1283 set_page_dirty(pdst);
1284 f2fs_put_page(pdst, 1);
1285 f2fs_put_page(psrc, 1);
1286
1287 ret = f2fs_truncate_hole(src_inode,
1288 src + i, src + i + 1);
1289 if (ret)
1290 return ret;
1291 i++;
1292 }
1293 }
1294 return 0;
1295 }
1296
__exchange_data_block(struct inode * src_inode,struct inode * dst_inode,pgoff_t src,pgoff_t dst,pgoff_t len,bool full)1297 static int __exchange_data_block(struct inode *src_inode,
1298 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
1299 pgoff_t len, bool full)
1300 {
1301 block_t *src_blkaddr;
1302 int *do_replace;
1303 pgoff_t olen;
1304 int ret;
1305
1306 while (len) {
1307 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
1308
1309 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1310 array_size(olen, sizeof(block_t)),
1311 GFP_NOFS);
1312 if (!src_blkaddr)
1313 return -ENOMEM;
1314
1315 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
1316 array_size(olen, sizeof(int)),
1317 GFP_NOFS);
1318 if (!do_replace) {
1319 kvfree(src_blkaddr);
1320 return -ENOMEM;
1321 }
1322
1323 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1324 do_replace, src, olen);
1325 if (ret)
1326 goto roll_back;
1327
1328 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1329 do_replace, src, dst, olen, full);
1330 if (ret)
1331 goto roll_back;
1332
1333 src += olen;
1334 dst += olen;
1335 len -= olen;
1336
1337 kvfree(src_blkaddr);
1338 kvfree(do_replace);
1339 }
1340 return 0;
1341
1342 roll_back:
1343 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
1344 kvfree(src_blkaddr);
1345 kvfree(do_replace);
1346 return ret;
1347 }
1348
f2fs_do_collapse(struct inode * inode,loff_t offset,loff_t len)1349 static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1350 {
1351 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1352 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1353 pgoff_t start = offset >> PAGE_SHIFT;
1354 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1355 int ret;
1356
1357 f2fs_balance_fs(sbi, true);
1358
1359 /* avoid gc operation during block exchange */
1360 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1361 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1362
1363 f2fs_lock_op(sbi);
1364 f2fs_drop_extent_tree(inode);
1365 truncate_pagecache(inode, offset);
1366 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1367 f2fs_unlock_op(sbi);
1368
1369 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1370 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1371 return ret;
1372 }
1373
f2fs_collapse_range(struct inode * inode,loff_t offset,loff_t len)1374 static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1375 {
1376 loff_t new_size;
1377 int ret;
1378
1379 if (offset + len >= i_size_read(inode))
1380 return -EINVAL;
1381
1382 /* collapse range should be aligned to block size of f2fs. */
1383 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1384 return -EINVAL;
1385
1386 ret = f2fs_convert_inline_inode(inode);
1387 if (ret)
1388 return ret;
1389
1390 /* write out all dirty pages from offset */
1391 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1392 if (ret)
1393 return ret;
1394
1395 ret = f2fs_do_collapse(inode, offset, len);
1396 if (ret)
1397 return ret;
1398
1399 /* write out all moved pages, if possible */
1400 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1401 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1402 truncate_pagecache(inode, offset);
1403
1404 new_size = i_size_read(inode) - len;
1405 ret = f2fs_truncate_blocks(inode, new_size, true);
1406 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1407 if (!ret)
1408 f2fs_i_size_write(inode, new_size);
1409 return ret;
1410 }
1411
f2fs_do_zero_range(struct dnode_of_data * dn,pgoff_t start,pgoff_t end)1412 static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1413 pgoff_t end)
1414 {
1415 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1416 pgoff_t index = start;
1417 unsigned int ofs_in_node = dn->ofs_in_node;
1418 blkcnt_t count = 0;
1419 int ret;
1420
1421 for (; index < end; index++, dn->ofs_in_node++) {
1422 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
1423 count++;
1424 }
1425
1426 dn->ofs_in_node = ofs_in_node;
1427 ret = f2fs_reserve_new_blocks(dn, count);
1428 if (ret)
1429 return ret;
1430
1431 dn->ofs_in_node = ofs_in_node;
1432 for (index = start; index < end; index++, dn->ofs_in_node++) {
1433 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1434 /*
1435 * f2fs_reserve_new_blocks will not guarantee entire block
1436 * allocation.
1437 */
1438 if (dn->data_blkaddr == NULL_ADDR) {
1439 ret = -ENOSPC;
1440 break;
1441 }
1442
1443 if (dn->data_blkaddr == NEW_ADDR)
1444 continue;
1445
1446 if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr,
1447 DATA_GENERIC_ENHANCE)) {
1448 ret = -EFSCORRUPTED;
1449 break;
1450 }
1451
1452 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
1453 dn->data_blkaddr = NEW_ADDR;
1454 f2fs_set_data_blkaddr(dn);
1455 }
1456
1457 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1458
1459 return ret;
1460 }
1461
f2fs_zero_range(struct inode * inode,loff_t offset,loff_t len,int mode)1462 static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1463 int mode)
1464 {
1465 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1466 struct address_space *mapping = inode->i_mapping;
1467 pgoff_t index, pg_start, pg_end;
1468 loff_t new_size = i_size_read(inode);
1469 loff_t off_start, off_end;
1470 int ret = 0;
1471
1472 ret = inode_newsize_ok(inode, (len + offset));
1473 if (ret)
1474 return ret;
1475
1476 ret = f2fs_convert_inline_inode(inode);
1477 if (ret)
1478 return ret;
1479
1480 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1481 if (ret)
1482 return ret;
1483
1484 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1485 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
1486
1487 off_start = offset & (PAGE_SIZE - 1);
1488 off_end = (offset + len) & (PAGE_SIZE - 1);
1489
1490 if (pg_start == pg_end) {
1491 ret = fill_zero(inode, pg_start, off_start,
1492 off_end - off_start);
1493 if (ret)
1494 return ret;
1495
1496 new_size = max_t(loff_t, new_size, offset + len);
1497 } else {
1498 if (off_start) {
1499 ret = fill_zero(inode, pg_start++, off_start,
1500 PAGE_SIZE - off_start);
1501 if (ret)
1502 return ret;
1503
1504 new_size = max_t(loff_t, new_size,
1505 (loff_t)pg_start << PAGE_SHIFT);
1506 }
1507
1508 for (index = pg_start; index < pg_end;) {
1509 struct dnode_of_data dn;
1510 unsigned int end_offset;
1511 pgoff_t end;
1512
1513 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1514 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1515
1516 truncate_pagecache_range(inode,
1517 (loff_t)index << PAGE_SHIFT,
1518 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1519
1520 f2fs_lock_op(sbi);
1521
1522 set_new_dnode(&dn, inode, NULL, NULL, 0);
1523 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
1524 if (ret) {
1525 f2fs_unlock_op(sbi);
1526 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1527 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1528 goto out;
1529 }
1530
1531 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1532 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1533
1534 ret = f2fs_do_zero_range(&dn, index, end);
1535 f2fs_put_dnode(&dn);
1536
1537 f2fs_unlock_op(sbi);
1538 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1539 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1540
1541 f2fs_balance_fs(sbi, dn.node_changed);
1542
1543 if (ret)
1544 goto out;
1545
1546 index = end;
1547 new_size = max_t(loff_t, new_size,
1548 (loff_t)index << PAGE_SHIFT);
1549 }
1550
1551 if (off_end) {
1552 ret = fill_zero(inode, pg_end, 0, off_end);
1553 if (ret)
1554 goto out;
1555
1556 new_size = max_t(loff_t, new_size, offset + len);
1557 }
1558 }
1559
1560 out:
1561 if (new_size > i_size_read(inode)) {
1562 if (mode & FALLOC_FL_KEEP_SIZE)
1563 file_set_keep_isize(inode);
1564 else
1565 f2fs_i_size_write(inode, new_size);
1566 }
1567 return ret;
1568 }
1569
f2fs_insert_range(struct inode * inode,loff_t offset,loff_t len)1570 static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1571 {
1572 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1573 pgoff_t nr, pg_start, pg_end, delta, idx;
1574 loff_t new_size;
1575 int ret = 0;
1576
1577 new_size = i_size_read(inode) + len;
1578 ret = inode_newsize_ok(inode, new_size);
1579 if (ret)
1580 return ret;
1581
1582 if (offset >= i_size_read(inode))
1583 return -EINVAL;
1584
1585 /* insert range should be aligned to block size of f2fs. */
1586 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1587 return -EINVAL;
1588
1589 ret = f2fs_convert_inline_inode(inode);
1590 if (ret)
1591 return ret;
1592
1593 f2fs_balance_fs(sbi, true);
1594
1595 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1596 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
1597 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1598 if (ret)
1599 return ret;
1600
1601 /* write out all dirty pages from offset */
1602 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1603 if (ret)
1604 return ret;
1605
1606 pg_start = offset >> PAGE_SHIFT;
1607 pg_end = (offset + len) >> PAGE_SHIFT;
1608 delta = pg_end - pg_start;
1609 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1610
1611 /* avoid gc operation during block exchange */
1612 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1613 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1614 truncate_pagecache(inode, offset);
1615
1616 while (!ret && idx > pg_start) {
1617 nr = idx - pg_start;
1618 if (nr > delta)
1619 nr = delta;
1620 idx -= nr;
1621
1622 f2fs_lock_op(sbi);
1623 f2fs_drop_extent_tree(inode);
1624
1625 ret = __exchange_data_block(inode, inode, idx,
1626 idx + delta, nr, false);
1627 f2fs_unlock_op(sbi);
1628 }
1629 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1630 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1631
1632 /* write out all moved pages, if possible */
1633 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
1634 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1635 truncate_pagecache(inode, offset);
1636 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
1637
1638 if (!ret)
1639 f2fs_i_size_write(inode, new_size);
1640 return ret;
1641 }
1642
expand_inode_data(struct inode * inode,loff_t offset,loff_t len,int mode)1643 static int expand_inode_data(struct inode *inode, loff_t offset,
1644 loff_t len, int mode)
1645 {
1646 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1647 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
1648 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1649 .m_may_create = true };
1650 pgoff_t pg_start, pg_end;
1651 loff_t new_size = i_size_read(inode);
1652 loff_t off_end;
1653 block_t expanded = 0;
1654 int err;
1655
1656 err = inode_newsize_ok(inode, (len + offset));
1657 if (err)
1658 return err;
1659
1660 err = f2fs_convert_inline_inode(inode);
1661 if (err)
1662 return err;
1663
1664 f2fs_balance_fs(sbi, true);
1665
1666 pg_start = ((unsigned long long)offset) >> PAGE_SHIFT;
1667 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
1668 off_end = (offset + len) & (PAGE_SIZE - 1);
1669
1670 map.m_lblk = pg_start;
1671 map.m_len = pg_end - pg_start;
1672 if (off_end)
1673 map.m_len++;
1674
1675 if (!map.m_len)
1676 return 0;
1677
1678 if (f2fs_is_pinned_file(inode)) {
1679 block_t sec_blks = BLKS_PER_SEC(sbi);
1680 block_t sec_len = roundup(map.m_len, sec_blks);
1681
1682 map.m_len = sec_blks;
1683 next_alloc:
1684 if (has_not_enough_free_secs(sbi, 0,
1685 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
1686 f2fs_down_write(&sbi->gc_lock);
1687 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
1688 if (err && err != -ENODATA && err != -EAGAIN)
1689 goto out_err;
1690 }
1691
1692 f2fs_down_write(&sbi->pin_sem);
1693
1694 f2fs_lock_op(sbi);
1695 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
1696 f2fs_unlock_op(sbi);
1697
1698 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
1699 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
1700 file_dont_truncate(inode);
1701
1702 f2fs_up_write(&sbi->pin_sem);
1703
1704 expanded += map.m_len;
1705 sec_len -= map.m_len;
1706 map.m_lblk += map.m_len;
1707 if (!err && sec_len)
1708 goto next_alloc;
1709
1710 map.m_len = expanded;
1711 } else {
1712 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1713 expanded = map.m_len;
1714 }
1715 out_err:
1716 if (err) {
1717 pgoff_t last_off;
1718
1719 if (!expanded)
1720 return err;
1721
1722 last_off = pg_start + expanded - 1;
1723
1724 /* update new size to the failed position */
1725 new_size = (last_off == pg_end) ? offset + len :
1726 (loff_t)(last_off + 1) << PAGE_SHIFT;
1727 } else {
1728 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
1729 }
1730
1731 if (new_size > i_size_read(inode)) {
1732 if (mode & FALLOC_FL_KEEP_SIZE)
1733 file_set_keep_isize(inode);
1734 else
1735 f2fs_i_size_write(inode, new_size);
1736 }
1737
1738 return err;
1739 }
1740
f2fs_fallocate(struct file * file,int mode,loff_t offset,loff_t len)1741 static long f2fs_fallocate(struct file *file, int mode,
1742 loff_t offset, loff_t len)
1743 {
1744 struct inode *inode = file_inode(file);
1745 long ret = 0;
1746
1747 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1748 return -EIO;
1749 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1750 return -ENOSPC;
1751 if (!f2fs_is_compress_backend_ready(inode))
1752 return -EOPNOTSUPP;
1753
1754 /* f2fs only support ->fallocate for regular file */
1755 if (!S_ISREG(inode->i_mode))
1756 return -EINVAL;
1757
1758 if (IS_ENCRYPTED(inode) &&
1759 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
1760 return -EOPNOTSUPP;
1761
1762 /*
1763 * Pinned file should not support partial trucation since the block
1764 * can be used by applications.
1765 */
1766 if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
1767 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1768 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1769 return -EOPNOTSUPP;
1770
1771 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
1772 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1773 FALLOC_FL_INSERT_RANGE))
1774 return -EOPNOTSUPP;
1775
1776 inode_lock(inode);
1777
1778 ret = file_modified(file);
1779 if (ret)
1780 goto out;
1781
1782 if (mode & FALLOC_FL_PUNCH_HOLE) {
1783 if (offset >= inode->i_size)
1784 goto out;
1785
1786 ret = punch_hole(inode, offset, len);
1787 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1788 ret = f2fs_collapse_range(inode, offset, len);
1789 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1790 ret = f2fs_zero_range(inode, offset, len, mode);
1791 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1792 ret = f2fs_insert_range(inode, offset, len);
1793 } else {
1794 ret = expand_inode_data(inode, offset, len, mode);
1795 }
1796
1797 if (!ret) {
1798 inode->i_mtime = inode->i_ctime = current_time(inode);
1799 f2fs_mark_inode_dirty_sync(inode, false);
1800 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1801 }
1802
1803 out:
1804 inode_unlock(inode);
1805
1806 trace_f2fs_fallocate(inode, mode, offset, len, ret);
1807 return ret;
1808 }
1809
f2fs_release_file(struct inode * inode,struct file * filp)1810 static int f2fs_release_file(struct inode *inode, struct file *filp)
1811 {
1812 /*
1813 * f2fs_relase_file is called at every close calls. So we should
1814 * not drop any inmemory pages by close called by other process.
1815 */
1816 if (!(filp->f_mode & FMODE_WRITE) ||
1817 atomic_read(&inode->i_writecount) != 1)
1818 return 0;
1819
1820 /* some remained atomic pages should discarded */
1821 if (f2fs_is_atomic_file(inode))
1822 f2fs_drop_inmem_pages(inode);
1823 if (f2fs_is_volatile_file(inode)) {
1824 set_inode_flag(inode, FI_DROP_CACHE);
1825 filemap_fdatawrite(inode->i_mapping);
1826 clear_inode_flag(inode, FI_DROP_CACHE);
1827 clear_inode_flag(inode, FI_VOLATILE_FILE);
1828 stat_dec_volatile_write(inode);
1829 }
1830 return 0;
1831 }
1832
f2fs_file_flush(struct file * file,fl_owner_t id)1833 static int f2fs_file_flush(struct file *file, fl_owner_t id)
1834 {
1835 struct inode *inode = file_inode(file);
1836
1837 /*
1838 * If the process doing a transaction is crashed, we should do
1839 * roll-back. Otherwise, other reader/write can see corrupted database
1840 * until all the writers close its file. Since this should be done
1841 * before dropping file lock, it needs to do in ->flush.
1842 */
1843 if (f2fs_is_atomic_file(inode) &&
1844 F2FS_I(inode)->inmem_task == current)
1845 f2fs_drop_inmem_pages(inode);
1846 return 0;
1847 }
1848
f2fs_setflags_common(struct inode * inode,u32 iflags,u32 mask)1849 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1850 {
1851 struct f2fs_inode_info *fi = F2FS_I(inode);
1852 u32 masked_flags = fi->i_flags & mask;
1853
1854 /* mask can be shrunk by flags_valid selector */
1855 iflags &= mask;
1856
1857 /* Is it quota file? Do not allow user to mess with it */
1858 if (IS_NOQUOTA(inode))
1859 return -EPERM;
1860
1861 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
1862 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1863 return -EOPNOTSUPP;
1864 if (!f2fs_empty_dir(inode))
1865 return -ENOTEMPTY;
1866 }
1867
1868 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1869 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1870 return -EOPNOTSUPP;
1871 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1872 return -EINVAL;
1873 }
1874
1875 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
1876 if (masked_flags & F2FS_COMPR_FL) {
1877 if (!f2fs_disable_compressed_file(inode))
1878 return -EINVAL;
1879 } else {
1880 if (!f2fs_may_compress(inode))
1881 return -EINVAL;
1882 if (S_ISREG(inode->i_mode) && inode->i_size)
1883 return -EINVAL;
1884 if (set_compress_context(inode))
1885 return -EOPNOTSUPP;
1886 }
1887 }
1888
1889 fi->i_flags = iflags | (fi->i_flags & ~mask);
1890 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1891 (fi->i_flags & F2FS_NOCOMP_FL));
1892
1893 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1894 set_inode_flag(inode, FI_PROJ_INHERIT);
1895 else
1896 clear_inode_flag(inode, FI_PROJ_INHERIT);
1897
1898 inode->i_ctime = current_time(inode);
1899 f2fs_set_inode_flags(inode);
1900 f2fs_mark_inode_dirty_sync(inode, true);
1901 return 0;
1902 }
1903
1904 /* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1905
1906 /*
1907 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1908 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1909 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1910 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1911 */
1912
1913 static const struct {
1914 u32 iflag;
1915 u32 fsflag;
1916 } f2fs_fsflags_map[] = {
1917 { F2FS_COMPR_FL, FS_COMPR_FL },
1918 { F2FS_SYNC_FL, FS_SYNC_FL },
1919 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1920 { F2FS_APPEND_FL, FS_APPEND_FL },
1921 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1922 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1923 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
1924 { F2FS_INDEX_FL, FS_INDEX_FL },
1925 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1926 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1927 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
1928 };
1929
1930 #define F2FS_GETTABLE_FS_FL ( \
1931 FS_COMPR_FL | \
1932 FS_SYNC_FL | \
1933 FS_IMMUTABLE_FL | \
1934 FS_APPEND_FL | \
1935 FS_NODUMP_FL | \
1936 FS_NOATIME_FL | \
1937 FS_NOCOMP_FL | \
1938 FS_INDEX_FL | \
1939 FS_DIRSYNC_FL | \
1940 FS_PROJINHERIT_FL | \
1941 FS_ENCRYPT_FL | \
1942 FS_INLINE_DATA_FL | \
1943 FS_NOCOW_FL | \
1944 FS_VERITY_FL | \
1945 FS_CASEFOLD_FL)
1946
1947 #define F2FS_SETTABLE_FS_FL ( \
1948 FS_COMPR_FL | \
1949 FS_SYNC_FL | \
1950 FS_IMMUTABLE_FL | \
1951 FS_APPEND_FL | \
1952 FS_NODUMP_FL | \
1953 FS_NOATIME_FL | \
1954 FS_NOCOMP_FL | \
1955 FS_DIRSYNC_FL | \
1956 FS_PROJINHERIT_FL | \
1957 FS_CASEFOLD_FL)
1958
1959 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
f2fs_iflags_to_fsflags(u32 iflags)1960 static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1961 {
1962 u32 fsflags = 0;
1963 int i;
1964
1965 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1966 if (iflags & f2fs_fsflags_map[i].iflag)
1967 fsflags |= f2fs_fsflags_map[i].fsflag;
1968
1969 return fsflags;
1970 }
1971
1972 /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
f2fs_fsflags_to_iflags(u32 fsflags)1973 static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1974 {
1975 u32 iflags = 0;
1976 int i;
1977
1978 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1979 if (fsflags & f2fs_fsflags_map[i].fsflag)
1980 iflags |= f2fs_fsflags_map[i].iflag;
1981
1982 return iflags;
1983 }
1984
f2fs_ioc_getflags(struct file * filp,unsigned long arg)1985 static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1986 {
1987 struct inode *inode = file_inode(filp);
1988 struct f2fs_inode_info *fi = F2FS_I(inode);
1989 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1990
1991 if (IS_ENCRYPTED(inode))
1992 fsflags |= FS_ENCRYPT_FL;
1993 if (IS_VERITY(inode))
1994 fsflags |= FS_VERITY_FL;
1995 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1996 fsflags |= FS_INLINE_DATA_FL;
1997 if (is_inode_flag_set(inode, FI_PIN_FILE))
1998 fsflags |= FS_NOCOW_FL;
1999
2000 fsflags &= F2FS_GETTABLE_FS_FL;
2001
2002 return put_user(fsflags, (int __user *)arg);
2003 }
2004
f2fs_ioc_setflags(struct file * filp,unsigned long arg)2005 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
2006 {
2007 struct inode *inode = file_inode(filp);
2008 struct f2fs_inode_info *fi = F2FS_I(inode);
2009 u32 fsflags, old_fsflags;
2010 u32 iflags;
2011 int ret;
2012
2013 if (!inode_owner_or_capable(inode))
2014 return -EACCES;
2015
2016 if (get_user(fsflags, (int __user *)arg))
2017 return -EFAULT;
2018
2019 if (fsflags & ~F2FS_GETTABLE_FS_FL)
2020 return -EOPNOTSUPP;
2021 fsflags &= F2FS_SETTABLE_FS_FL;
2022
2023 iflags = f2fs_fsflags_to_iflags(fsflags);
2024 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
2025 return -EOPNOTSUPP;
2026
2027 ret = mnt_want_write_file(filp);
2028 if (ret)
2029 return ret;
2030
2031 inode_lock(inode);
2032
2033 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2034 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2035 if (ret)
2036 goto out;
2037
2038 ret = f2fs_setflags_common(inode, iflags,
2039 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
2040 out:
2041 inode_unlock(inode);
2042 mnt_drop_write_file(filp);
2043 return ret;
2044 }
2045
f2fs_ioc_getversion(struct file * filp,unsigned long arg)2046 static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2047 {
2048 struct inode *inode = file_inode(filp);
2049
2050 return put_user(inode->i_generation, (int __user *)arg);
2051 }
2052
f2fs_ioc_start_atomic_write(struct file * filp)2053 static int f2fs_ioc_start_atomic_write(struct file *filp)
2054 {
2055 struct inode *inode = file_inode(filp);
2056 struct f2fs_inode_info *fi = F2FS_I(inode);
2057 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2058 int ret;
2059
2060 if (!inode_owner_or_capable(inode))
2061 return -EACCES;
2062
2063 if (!S_ISREG(inode->i_mode))
2064 return -EINVAL;
2065
2066 if (filp->f_flags & O_DIRECT)
2067 return -EINVAL;
2068
2069 ret = mnt_want_write_file(filp);
2070 if (ret)
2071 return ret;
2072
2073 inode_lock(inode);
2074
2075 if (!f2fs_disable_compressed_file(inode)) {
2076 ret = -EINVAL;
2077 goto out;
2078 }
2079
2080 if (f2fs_is_atomic_file(inode)) {
2081 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2082 ret = -EINVAL;
2083 goto out;
2084 }
2085
2086 ret = f2fs_convert_inline_inode(inode);
2087 if (ret)
2088 goto out;
2089
2090 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2091
2092 /*
2093 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2094 * f2fs_is_atomic_file.
2095 */
2096 if (get_dirty_pages(inode))
2097 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2098 inode->i_ino, get_dirty_pages(inode));
2099 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
2100 if (ret) {
2101 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2102 goto out;
2103 }
2104
2105 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2106 if (list_empty(&fi->inmem_ilist))
2107 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
2108 sbi->atomic_files++;
2109 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2110
2111 /* add inode in inmem_list first and set atomic_file */
2112 set_inode_flag(inode, FI_ATOMIC_FILE);
2113 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2114 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2115
2116 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2117 F2FS_I(inode)->inmem_task = current;
2118 stat_update_max_atomic_write(inode);
2119 out:
2120 inode_unlock(inode);
2121 mnt_drop_write_file(filp);
2122 return ret;
2123 }
2124
f2fs_ioc_commit_atomic_write(struct file * filp)2125 static int f2fs_ioc_commit_atomic_write(struct file *filp)
2126 {
2127 struct inode *inode = file_inode(filp);
2128 int ret;
2129
2130 if (!inode_owner_or_capable(inode))
2131 return -EACCES;
2132
2133 ret = mnt_want_write_file(filp);
2134 if (ret)
2135 return ret;
2136
2137 f2fs_balance_fs(F2FS_I_SB(inode), true);
2138
2139 inode_lock(inode);
2140
2141 if (f2fs_is_volatile_file(inode)) {
2142 ret = -EINVAL;
2143 goto err_out;
2144 }
2145
2146 if (f2fs_is_atomic_file(inode)) {
2147 ret = f2fs_commit_inmem_pages(inode);
2148 if (ret)
2149 goto err_out;
2150
2151 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2152 if (!ret)
2153 f2fs_drop_inmem_pages(inode);
2154 } else {
2155 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
2156 }
2157 err_out:
2158 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2159 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2160 ret = -EINVAL;
2161 }
2162 inode_unlock(inode);
2163 mnt_drop_write_file(filp);
2164 return ret;
2165 }
2166
f2fs_ioc_start_volatile_write(struct file * filp)2167 static int f2fs_ioc_start_volatile_write(struct file *filp)
2168 {
2169 struct inode *inode = file_inode(filp);
2170 int ret;
2171
2172 if (!inode_owner_or_capable(inode))
2173 return -EACCES;
2174
2175 if (!S_ISREG(inode->i_mode))
2176 return -EINVAL;
2177
2178 ret = mnt_want_write_file(filp);
2179 if (ret)
2180 return ret;
2181
2182 inode_lock(inode);
2183
2184 if (f2fs_is_volatile_file(inode))
2185 goto out;
2186
2187 ret = f2fs_convert_inline_inode(inode);
2188 if (ret)
2189 goto out;
2190
2191 stat_inc_volatile_write(inode);
2192 stat_update_max_volatile_write(inode);
2193
2194 set_inode_flag(inode, FI_VOLATILE_FILE);
2195 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2196 out:
2197 inode_unlock(inode);
2198 mnt_drop_write_file(filp);
2199 return ret;
2200 }
2201
f2fs_ioc_release_volatile_write(struct file * filp)2202 static int f2fs_ioc_release_volatile_write(struct file *filp)
2203 {
2204 struct inode *inode = file_inode(filp);
2205 int ret;
2206
2207 if (!inode_owner_or_capable(inode))
2208 return -EACCES;
2209
2210 ret = mnt_want_write_file(filp);
2211 if (ret)
2212 return ret;
2213
2214 inode_lock(inode);
2215
2216 if (!f2fs_is_volatile_file(inode))
2217 goto out;
2218
2219 if (!f2fs_is_first_block_written(inode)) {
2220 ret = truncate_partial_data_page(inode, 0, true);
2221 goto out;
2222 }
2223
2224 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2225 out:
2226 inode_unlock(inode);
2227 mnt_drop_write_file(filp);
2228 return ret;
2229 }
2230
f2fs_ioc_abort_volatile_write(struct file * filp)2231 static int f2fs_ioc_abort_volatile_write(struct file *filp)
2232 {
2233 struct inode *inode = file_inode(filp);
2234 int ret;
2235
2236 if (!inode_owner_or_capable(inode))
2237 return -EACCES;
2238
2239 ret = mnt_want_write_file(filp);
2240 if (ret)
2241 return ret;
2242
2243 inode_lock(inode);
2244
2245 if (f2fs_is_atomic_file(inode))
2246 f2fs_drop_inmem_pages(inode);
2247 if (f2fs_is_volatile_file(inode)) {
2248 clear_inode_flag(inode, FI_VOLATILE_FILE);
2249 stat_dec_volatile_write(inode);
2250 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
2251 }
2252
2253 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2254
2255 inode_unlock(inode);
2256
2257 mnt_drop_write_file(filp);
2258 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2259 return ret;
2260 }
2261
f2fs_ioc_shutdown(struct file * filp,unsigned long arg)2262 static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2263 {
2264 struct inode *inode = file_inode(filp);
2265 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2266 struct super_block *sb = sbi->sb;
2267 __u32 in;
2268 int ret = 0;
2269
2270 if (!capable(CAP_SYS_ADMIN))
2271 return -EPERM;
2272
2273 if (get_user(in, (__u32 __user *)arg))
2274 return -EFAULT;
2275
2276 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2277 ret = mnt_want_write_file(filp);
2278 if (ret) {
2279 if (ret == -EROFS) {
2280 ret = 0;
2281 f2fs_stop_checkpoint(sbi, false,
2282 STOP_CP_REASON_SHUTDOWN);
2283 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2284 trace_f2fs_shutdown(sbi, in, ret);
2285 }
2286 return ret;
2287 }
2288 }
2289
2290 switch (in) {
2291 case F2FS_GOING_DOWN_FULLSYNC:
2292 ret = freeze_bdev(sb->s_bdev);
2293 if (ret)
2294 goto out;
2295 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2296 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2297 thaw_bdev(sb->s_bdev);
2298 break;
2299 case F2FS_GOING_DOWN_METASYNC:
2300 /* do checkpoint only */
2301 ret = f2fs_sync_fs(sb, 1);
2302 if (ret)
2303 goto out;
2304 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2305 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2306 break;
2307 case F2FS_GOING_DOWN_NOSYNC:
2308 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2309 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2310 break;
2311 case F2FS_GOING_DOWN_METAFLUSH:
2312 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
2313 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN);
2314 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2315 break;
2316 case F2FS_GOING_DOWN_NEED_FSCK:
2317 set_sbi_flag(sbi, SBI_NEED_FSCK);
2318 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2319 set_sbi_flag(sbi, SBI_IS_DIRTY);
2320 /* do checkpoint only */
2321 ret = f2fs_sync_fs(sb, 1);
2322 goto out;
2323 default:
2324 ret = -EINVAL;
2325 goto out;
2326 }
2327
2328 f2fs_stop_gc_thread(sbi);
2329 f2fs_stop_discard_thread(sbi);
2330
2331 f2fs_drop_discard_cmd(sbi);
2332 clear_opt(sbi, DISCARD);
2333
2334 f2fs_update_time(sbi, REQ_TIME);
2335 out:
2336 if (in != F2FS_GOING_DOWN_FULLSYNC)
2337 mnt_drop_write_file(filp);
2338
2339 trace_f2fs_shutdown(sbi, in, ret);
2340
2341 return ret;
2342 }
2343
f2fs_ioc_fitrim(struct file * filp,unsigned long arg)2344 static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2345 {
2346 struct inode *inode = file_inode(filp);
2347 struct super_block *sb = inode->i_sb;
2348 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2349 struct fstrim_range range;
2350 int ret;
2351
2352 if (!capable(CAP_SYS_ADMIN))
2353 return -EPERM;
2354
2355 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
2356 return -EOPNOTSUPP;
2357
2358 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2359 sizeof(range)))
2360 return -EFAULT;
2361
2362 ret = mnt_want_write_file(filp);
2363 if (ret)
2364 return ret;
2365
2366 range.minlen = max((unsigned int)range.minlen,
2367 q->limits.discard_granularity);
2368 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
2369 mnt_drop_write_file(filp);
2370 if (ret < 0)
2371 return ret;
2372
2373 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2374 sizeof(range)))
2375 return -EFAULT;
2376 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2377 return 0;
2378 }
2379
uuid_is_nonzero(__u8 u[16])2380 static bool uuid_is_nonzero(__u8 u[16])
2381 {
2382 int i;
2383
2384 for (i = 0; i < 16; i++)
2385 if (u[i])
2386 return true;
2387 return false;
2388 }
2389
f2fs_ioc_set_encryption_policy(struct file * filp,unsigned long arg)2390 static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2391 {
2392 struct inode *inode = file_inode(filp);
2393
2394 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
2395 return -EOPNOTSUPP;
2396
2397 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
2398
2399 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
2400 }
2401
f2fs_ioc_get_encryption_policy(struct file * filp,unsigned long arg)2402 static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2403 {
2404 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2405 return -EOPNOTSUPP;
2406 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
2407 }
2408
f2fs_ioc_get_encryption_pwsalt(struct file * filp,unsigned long arg)2409 static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2410 {
2411 struct inode *inode = file_inode(filp);
2412 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2413 int err;
2414
2415 if (!f2fs_sb_has_encrypt(sbi))
2416 return -EOPNOTSUPP;
2417
2418 err = mnt_want_write_file(filp);
2419 if (err)
2420 return err;
2421
2422 f2fs_down_write(&sbi->sb_lock);
2423
2424 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2425 goto got_it;
2426
2427 /* update superblock with uuid */
2428 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2429
2430 err = f2fs_commit_super(sbi, false);
2431 if (err) {
2432 /* undo new data */
2433 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
2434 goto out_err;
2435 }
2436 got_it:
2437 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2438 16))
2439 err = -EFAULT;
2440 out_err:
2441 f2fs_up_write(&sbi->sb_lock);
2442 mnt_drop_write_file(filp);
2443 return err;
2444 }
2445
f2fs_ioc_get_encryption_policy_ex(struct file * filp,unsigned long arg)2446 static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2447 unsigned long arg)
2448 {
2449 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2450 return -EOPNOTSUPP;
2451
2452 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2453 }
2454
f2fs_ioc_add_encryption_key(struct file * filp,unsigned long arg)2455 static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2456 {
2457 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2458 return -EOPNOTSUPP;
2459
2460 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2461 }
2462
f2fs_ioc_remove_encryption_key(struct file * filp,unsigned long arg)2463 static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2464 {
2465 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2466 return -EOPNOTSUPP;
2467
2468 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2469 }
2470
f2fs_ioc_remove_encryption_key_all_users(struct file * filp,unsigned long arg)2471 static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2472 unsigned long arg)
2473 {
2474 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2475 return -EOPNOTSUPP;
2476
2477 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2478 }
2479
f2fs_ioc_get_encryption_key_status(struct file * filp,unsigned long arg)2480 static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2481 unsigned long arg)
2482 {
2483 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2484 return -EOPNOTSUPP;
2485
2486 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2487 }
2488
f2fs_ioc_get_encryption_nonce(struct file * filp,unsigned long arg)2489 static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2490 {
2491 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2492 return -EOPNOTSUPP;
2493
2494 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2495 }
2496
f2fs_ioc_gc(struct file * filp,unsigned long arg)2497 static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2498 {
2499 struct inode *inode = file_inode(filp);
2500 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2501 __u32 sync;
2502 int ret;
2503
2504 if (!capable(CAP_SYS_ADMIN))
2505 return -EPERM;
2506
2507 if (get_user(sync, (__u32 __user *)arg))
2508 return -EFAULT;
2509
2510 if (f2fs_readonly(sbi->sb))
2511 return -EROFS;
2512
2513 ret = mnt_want_write_file(filp);
2514 if (ret)
2515 return ret;
2516
2517 if (!sync) {
2518 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2519 ret = -EBUSY;
2520 goto out;
2521 }
2522 } else {
2523 f2fs_down_write(&sbi->gc_lock);
2524 }
2525
2526 ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO);
2527 out:
2528 mnt_drop_write_file(filp);
2529 return ret;
2530 }
2531
__f2fs_ioc_gc_range(struct file * filp,struct f2fs_gc_range * range)2532 static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
2533 {
2534 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2535 u64 end;
2536 int ret;
2537
2538 if (!capable(CAP_SYS_ADMIN))
2539 return -EPERM;
2540 if (f2fs_readonly(sbi->sb))
2541 return -EROFS;
2542
2543 end = range->start + range->len;
2544 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
2545 end >= MAX_BLKADDR(sbi))
2546 return -EINVAL;
2547
2548 ret = mnt_want_write_file(filp);
2549 if (ret)
2550 return ret;
2551
2552 do_more:
2553 if (!range->sync) {
2554 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2555 ret = -EBUSY;
2556 goto out;
2557 }
2558 } else {
2559 f2fs_down_write(&sbi->gc_lock);
2560 }
2561
2562 ret = f2fs_gc(sbi, range->sync, true, false,
2563 GET_SEGNO(sbi, range->start));
2564 if (ret) {
2565 if (ret == -EBUSY)
2566 ret = -EAGAIN;
2567 goto out;
2568 }
2569 range->start += BLKS_PER_SEC(sbi);
2570 if (range->start <= end)
2571 goto do_more;
2572 out:
2573 mnt_drop_write_file(filp);
2574 return ret;
2575 }
2576
f2fs_ioc_gc_range(struct file * filp,unsigned long arg)2577 static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2578 {
2579 struct f2fs_gc_range range;
2580
2581 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2582 sizeof(range)))
2583 return -EFAULT;
2584 return __f2fs_ioc_gc_range(filp, &range);
2585 }
2586
f2fs_ioc_write_checkpoint(struct file * filp,unsigned long arg)2587 static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2588 {
2589 struct inode *inode = file_inode(filp);
2590 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2591 int ret;
2592
2593 if (!capable(CAP_SYS_ADMIN))
2594 return -EPERM;
2595
2596 if (f2fs_readonly(sbi->sb))
2597 return -EROFS;
2598
2599 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2600 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2601 return -EINVAL;
2602 }
2603
2604 ret = mnt_want_write_file(filp);
2605 if (ret)
2606 return ret;
2607
2608 ret = f2fs_sync_fs(sbi->sb, 1);
2609
2610 mnt_drop_write_file(filp);
2611 return ret;
2612 }
2613
f2fs_defragment_range(struct f2fs_sb_info * sbi,struct file * filp,struct f2fs_defragment * range)2614 static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2615 struct file *filp,
2616 struct f2fs_defragment *range)
2617 {
2618 struct inode *inode = file_inode(filp);
2619 struct f2fs_map_blocks map = { .m_next_extent = NULL,
2620 .m_seg_type = NO_CHECK_TYPE,
2621 .m_may_create = false };
2622 struct extent_info ei = {0, 0, 0};
2623 pgoff_t pg_start, pg_end, next_pgofs;
2624 unsigned int blk_per_seg = sbi->blocks_per_seg;
2625 unsigned int total = 0, sec_num;
2626 block_t blk_end = 0;
2627 bool fragmented = false;
2628 int err;
2629
2630 pg_start = range->start >> PAGE_SHIFT;
2631 pg_end = (range->start + range->len) >> PAGE_SHIFT;
2632
2633 f2fs_balance_fs(sbi, true);
2634
2635 inode_lock(inode);
2636
2637 /* if in-place-update policy is enabled, don't waste time here */
2638 set_inode_flag(inode, FI_OPU_WRITE);
2639 if (f2fs_should_update_inplace(inode, NULL)) {
2640 err = -EINVAL;
2641 goto out;
2642 }
2643
2644 /* writeback all dirty pages in the range */
2645 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
2646 range->start + range->len - 1);
2647 if (err)
2648 goto out;
2649
2650 /*
2651 * lookup mapping info in extent cache, skip defragmenting if physical
2652 * block addresses are continuous.
2653 */
2654 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2655 if (ei.fofs + ei.len >= pg_end)
2656 goto out;
2657 }
2658
2659 map.m_lblk = pg_start;
2660 map.m_next_pgofs = &next_pgofs;
2661
2662 /*
2663 * lookup mapping info in dnode page cache, skip defragmenting if all
2664 * physical block addresses are continuous even if there are hole(s)
2665 * in logical blocks.
2666 */
2667 while (map.m_lblk < pg_end) {
2668 map.m_len = pg_end - map.m_lblk;
2669 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2670 if (err)
2671 goto out;
2672
2673 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2674 map.m_lblk = next_pgofs;
2675 continue;
2676 }
2677
2678 if (blk_end && blk_end != map.m_pblk)
2679 fragmented = true;
2680
2681 /* record total count of block that we're going to move */
2682 total += map.m_len;
2683
2684 blk_end = map.m_pblk + map.m_len;
2685
2686 map.m_lblk += map.m_len;
2687 }
2688
2689 if (!fragmented) {
2690 total = 0;
2691 goto out;
2692 }
2693
2694 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2695
2696 /*
2697 * make sure there are enough free section for LFS allocation, this can
2698 * avoid defragment running in SSR mode when free section are allocated
2699 * intensively
2700 */
2701 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
2702 err = -EAGAIN;
2703 goto out;
2704 }
2705
2706 map.m_lblk = pg_start;
2707 map.m_len = pg_end - pg_start;
2708 total = 0;
2709
2710 while (map.m_lblk < pg_end) {
2711 pgoff_t idx;
2712 int cnt = 0;
2713
2714 do_map:
2715 map.m_len = pg_end - map.m_lblk;
2716 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
2717 if (err)
2718 goto clear_out;
2719
2720 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
2721 map.m_lblk = next_pgofs;
2722 goto check;
2723 }
2724
2725 set_inode_flag(inode, FI_SKIP_WRITES);
2726
2727 idx = map.m_lblk;
2728 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2729 struct page *page;
2730
2731 page = f2fs_get_lock_data_page(inode, idx, true);
2732 if (IS_ERR(page)) {
2733 err = PTR_ERR(page);
2734 goto clear_out;
2735 }
2736
2737 set_page_dirty(page);
2738 f2fs_put_page(page, 1);
2739
2740 idx++;
2741 cnt++;
2742 total++;
2743 }
2744
2745 map.m_lblk = idx;
2746 check:
2747 if (map.m_lblk < pg_end && cnt < blk_per_seg)
2748 goto do_map;
2749
2750 clear_inode_flag(inode, FI_SKIP_WRITES);
2751
2752 err = filemap_fdatawrite(inode->i_mapping);
2753 if (err)
2754 goto out;
2755 }
2756 clear_out:
2757 clear_inode_flag(inode, FI_SKIP_WRITES);
2758 out:
2759 clear_inode_flag(inode, FI_OPU_WRITE);
2760 inode_unlock(inode);
2761 if (!err)
2762 range->len = (u64)total << PAGE_SHIFT;
2763 return err;
2764 }
2765
f2fs_ioc_defragment(struct file * filp,unsigned long arg)2766 static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2767 {
2768 struct inode *inode = file_inode(filp);
2769 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2770 struct f2fs_defragment range;
2771 int err;
2772
2773 if (!capable(CAP_SYS_ADMIN))
2774 return -EPERM;
2775
2776 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
2777 return -EINVAL;
2778
2779 if (f2fs_readonly(sbi->sb))
2780 return -EROFS;
2781
2782 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
2783 sizeof(range)))
2784 return -EFAULT;
2785
2786 /* verify alignment of offset & size */
2787 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2788 return -EINVAL;
2789
2790 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
2791 max_file_blocks(inode)))
2792 return -EINVAL;
2793
2794 err = mnt_want_write_file(filp);
2795 if (err)
2796 return err;
2797
2798 err = f2fs_defragment_range(sbi, filp, &range);
2799 mnt_drop_write_file(filp);
2800
2801 f2fs_update_time(sbi, REQ_TIME);
2802 if (err < 0)
2803 return err;
2804
2805 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2806 sizeof(range)))
2807 return -EFAULT;
2808
2809 return 0;
2810 }
2811
f2fs_move_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,size_t len)2812 static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2813 struct file *file_out, loff_t pos_out, size_t len)
2814 {
2815 struct inode *src = file_inode(file_in);
2816 struct inode *dst = file_inode(file_out);
2817 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2818 size_t olen = len, dst_max_i_size = 0;
2819 size_t dst_osize;
2820 int ret;
2821
2822 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2823 src->i_sb != dst->i_sb)
2824 return -EXDEV;
2825
2826 if (unlikely(f2fs_readonly(src->i_sb)))
2827 return -EROFS;
2828
2829 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2830 return -EINVAL;
2831
2832 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
2833 return -EOPNOTSUPP;
2834
2835 if (pos_out < 0 || pos_in < 0)
2836 return -EINVAL;
2837
2838 if (src == dst) {
2839 if (pos_in == pos_out)
2840 return 0;
2841 if (pos_out > pos_in && pos_out < pos_in + len)
2842 return -EINVAL;
2843 }
2844
2845 inode_lock(src);
2846 if (src != dst) {
2847 ret = -EBUSY;
2848 if (!inode_trylock(dst))
2849 goto out;
2850 }
2851
2852 if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
2853 ret = -EOPNOTSUPP;
2854 goto out_unlock;
2855 }
2856
2857 ret = -EINVAL;
2858 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2859 goto out_unlock;
2860 if (len == 0)
2861 olen = len = src->i_size - pos_in;
2862 if (pos_in + len == src->i_size)
2863 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2864 if (len == 0) {
2865 ret = 0;
2866 goto out_unlock;
2867 }
2868
2869 dst_osize = dst->i_size;
2870 if (pos_out + olen > dst->i_size)
2871 dst_max_i_size = pos_out + olen;
2872
2873 /* verify the end result is block aligned */
2874 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2875 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2876 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2877 goto out_unlock;
2878
2879 ret = f2fs_convert_inline_inode(src);
2880 if (ret)
2881 goto out_unlock;
2882
2883 ret = f2fs_convert_inline_inode(dst);
2884 if (ret)
2885 goto out_unlock;
2886
2887 /* write out all dirty pages from offset */
2888 ret = filemap_write_and_wait_range(src->i_mapping,
2889 pos_in, pos_in + len);
2890 if (ret)
2891 goto out_unlock;
2892
2893 ret = filemap_write_and_wait_range(dst->i_mapping,
2894 pos_out, pos_out + len);
2895 if (ret)
2896 goto out_unlock;
2897
2898 f2fs_balance_fs(sbi, true);
2899
2900 f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2901 if (src != dst) {
2902 ret = -EBUSY;
2903 if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2904 goto out_src;
2905 }
2906
2907 f2fs_lock_op(sbi);
2908 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2909 pos_out >> F2FS_BLKSIZE_BITS,
2910 len >> F2FS_BLKSIZE_BITS, false);
2911
2912 if (!ret) {
2913 if (dst_max_i_size)
2914 f2fs_i_size_write(dst, dst_max_i_size);
2915 else if (dst_osize != dst->i_size)
2916 f2fs_i_size_write(dst, dst_osize);
2917 }
2918 f2fs_unlock_op(sbi);
2919
2920 if (src != dst)
2921 f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
2922 out_src:
2923 f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2924 out_unlock:
2925 if (src != dst)
2926 inode_unlock(dst);
2927 out:
2928 inode_unlock(src);
2929 return ret;
2930 }
2931
__f2fs_ioc_move_range(struct file * filp,struct f2fs_move_range * range)2932 static int __f2fs_ioc_move_range(struct file *filp,
2933 struct f2fs_move_range *range)
2934 {
2935 struct fd dst;
2936 int err;
2937
2938 if (!(filp->f_mode & FMODE_READ) ||
2939 !(filp->f_mode & FMODE_WRITE))
2940 return -EBADF;
2941
2942 dst = fdget(range->dst_fd);
2943 if (!dst.file)
2944 return -EBADF;
2945
2946 if (!(dst.file->f_mode & FMODE_WRITE)) {
2947 err = -EBADF;
2948 goto err_out;
2949 }
2950
2951 err = mnt_want_write_file(filp);
2952 if (err)
2953 goto err_out;
2954
2955 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2956 range->pos_out, range->len);
2957
2958 mnt_drop_write_file(filp);
2959 err_out:
2960 fdput(dst);
2961 return err;
2962 }
2963
f2fs_ioc_move_range(struct file * filp,unsigned long arg)2964 static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2965 {
2966 struct f2fs_move_range range;
2967
2968 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2969 sizeof(range)))
2970 return -EFAULT;
2971 return __f2fs_ioc_move_range(filp, &range);
2972 }
2973
f2fs_ioc_flush_device(struct file * filp,unsigned long arg)2974 static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2975 {
2976 struct inode *inode = file_inode(filp);
2977 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2978 struct sit_info *sm = SIT_I(sbi);
2979 unsigned int start_segno = 0, end_segno = 0;
2980 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2981 struct f2fs_flush_device range;
2982 int ret;
2983
2984 if (!capable(CAP_SYS_ADMIN))
2985 return -EPERM;
2986
2987 if (f2fs_readonly(sbi->sb))
2988 return -EROFS;
2989
2990 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2991 return -EINVAL;
2992
2993 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2994 sizeof(range)))
2995 return -EFAULT;
2996
2997 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2998 __is_large_section(sbi)) {
2999 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
3000 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
3001 return -EINVAL;
3002 }
3003
3004 ret = mnt_want_write_file(filp);
3005 if (ret)
3006 return ret;
3007
3008 if (range.dev_num != 0)
3009 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
3010 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
3011
3012 start_segno = sm->last_victim[FLUSH_DEVICE];
3013 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
3014 start_segno = dev_start_segno;
3015 end_segno = min(start_segno + range.segments, dev_end_segno);
3016
3017 while (start_segno < end_segno) {
3018 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
3019 ret = -EBUSY;
3020 goto out;
3021 }
3022 sm->last_victim[GC_CB] = end_segno + 1;
3023 sm->last_victim[GC_GREEDY] = end_segno + 1;
3024 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
3025 ret = f2fs_gc(sbi, true, true, true, start_segno);
3026 if (ret == -EAGAIN)
3027 ret = 0;
3028 else if (ret < 0)
3029 break;
3030 start_segno++;
3031 }
3032 out:
3033 mnt_drop_write_file(filp);
3034 return ret;
3035 }
3036
f2fs_ioc_get_features(struct file * filp,unsigned long arg)3037 static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
3038 {
3039 struct inode *inode = file_inode(filp);
3040 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
3041
3042 /* Must validate to set it with SQLite behavior in Android. */
3043 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
3044
3045 return put_user(sb_feature, (u32 __user *)arg);
3046 }
3047
3048 #ifdef CONFIG_QUOTA
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3049 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3050 {
3051 struct dquot *transfer_to[MAXQUOTAS] = {};
3052 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3053 struct super_block *sb = sbi->sb;
3054 int err;
3055
3056 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3057 if (IS_ERR(transfer_to[PRJQUOTA]))
3058 return PTR_ERR(transfer_to[PRJQUOTA]);
3059
3060 err = __dquot_transfer(inode, transfer_to);
3061 if (err)
3062 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3063 dqput(transfer_to[PRJQUOTA]);
3064 return err;
3065 }
3066
f2fs_ioc_setproject(struct file * filp,__u32 projid)3067 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3068 {
3069 struct inode *inode = file_inode(filp);
3070 struct f2fs_inode_info *fi = F2FS_I(inode);
3071 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3072 struct f2fs_inode *ri = NULL;
3073 kprojid_t kprojid;
3074 int err;
3075
3076 if (!f2fs_sb_has_project_quota(sbi)) {
3077 if (projid != F2FS_DEF_PROJID)
3078 return -EOPNOTSUPP;
3079 else
3080 return 0;
3081 }
3082
3083 if (!f2fs_has_extra_attr(inode))
3084 return -EOPNOTSUPP;
3085
3086 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3087
3088 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3089 return 0;
3090
3091 err = -EPERM;
3092 /* Is it quota file? Do not allow user to mess with it */
3093 if (IS_NOQUOTA(inode))
3094 return err;
3095
3096 if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
3097 return -EOVERFLOW;
3098
3099 err = f2fs_dquot_initialize(inode);
3100 if (err)
3101 return err;
3102
3103 f2fs_lock_op(sbi);
3104 err = f2fs_transfer_project_quota(inode, kprojid);
3105 if (err)
3106 goto out_unlock;
3107
3108 F2FS_I(inode)->i_projid = kprojid;
3109 inode->i_ctime = current_time(inode);
3110 f2fs_mark_inode_dirty_sync(inode, true);
3111 out_unlock:
3112 f2fs_unlock_op(sbi);
3113 return err;
3114 }
3115 #else
f2fs_transfer_project_quota(struct inode * inode,kprojid_t kprojid)3116 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3117 {
3118 return 0;
3119 }
3120
f2fs_ioc_setproject(struct file * filp,__u32 projid)3121 static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3122 {
3123 if (projid != F2FS_DEF_PROJID)
3124 return -EOPNOTSUPP;
3125 return 0;
3126 }
3127 #endif
3128
3129 /* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3130
3131 /*
3132 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3133 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3134 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3135 */
3136
3137 static const struct {
3138 u32 iflag;
3139 u32 xflag;
3140 } f2fs_xflags_map[] = {
3141 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3142 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3143 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3144 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3145 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3146 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3147 };
3148
3149 #define F2FS_SUPPORTED_XFLAGS ( \
3150 FS_XFLAG_SYNC | \
3151 FS_XFLAG_IMMUTABLE | \
3152 FS_XFLAG_APPEND | \
3153 FS_XFLAG_NODUMP | \
3154 FS_XFLAG_NOATIME | \
3155 FS_XFLAG_PROJINHERIT)
3156
3157 /* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
f2fs_iflags_to_xflags(u32 iflags)3158 static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3159 {
3160 u32 xflags = 0;
3161 int i;
3162
3163 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3164 if (iflags & f2fs_xflags_map[i].iflag)
3165 xflags |= f2fs_xflags_map[i].xflag;
3166
3167 return xflags;
3168 }
3169
3170 /* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
f2fs_xflags_to_iflags(u32 xflags)3171 static inline u32 f2fs_xflags_to_iflags(u32 xflags)
3172 {
3173 u32 iflags = 0;
3174 int i;
3175
3176 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3177 if (xflags & f2fs_xflags_map[i].xflag)
3178 iflags |= f2fs_xflags_map[i].iflag;
3179
3180 return iflags;
3181 }
3182
f2fs_fill_fsxattr(struct inode * inode,struct fsxattr * fa)3183 static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
3184 {
3185 struct f2fs_inode_info *fi = F2FS_I(inode);
3186
3187 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
3188
3189 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
3190 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
3191 }
3192
f2fs_ioc_fsgetxattr(struct file * filp,unsigned long arg)3193 static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
3194 {
3195 struct inode *inode = file_inode(filp);
3196 struct fsxattr fa;
3197
3198 f2fs_fill_fsxattr(inode, &fa);
3199
3200 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3201 return -EFAULT;
3202 return 0;
3203 }
3204
f2fs_ioc_fssetxattr(struct file * filp,unsigned long arg)3205 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3206 {
3207 struct inode *inode = file_inode(filp);
3208 struct fsxattr fa, old_fa;
3209 u32 iflags;
3210 int err;
3211
3212 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3213 return -EFAULT;
3214
3215 /* Make sure caller has proper permission */
3216 if (!inode_owner_or_capable(inode))
3217 return -EACCES;
3218
3219 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
3220 return -EOPNOTSUPP;
3221
3222 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3223 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
3224 return -EOPNOTSUPP;
3225
3226 err = mnt_want_write_file(filp);
3227 if (err)
3228 return err;
3229
3230 inode_lock(inode);
3231
3232 f2fs_fill_fsxattr(inode, &old_fa);
3233 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
3234 if (err)
3235 goto out;
3236
3237 err = f2fs_setflags_common(inode, iflags,
3238 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
3239 if (err)
3240 goto out;
3241
3242 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
3243 out:
3244 inode_unlock(inode);
3245 mnt_drop_write_file(filp);
3246 return err;
3247 }
3248
f2fs_pin_file_control(struct inode * inode,bool inc)3249 int f2fs_pin_file_control(struct inode *inode, bool inc)
3250 {
3251 struct f2fs_inode_info *fi = F2FS_I(inode);
3252 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3253
3254 /* Use i_gc_failures for normal file as a risk signal. */
3255 if (inc)
3256 f2fs_i_gc_failures_write(inode,
3257 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
3258
3259 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
3260 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3261 __func__, inode->i_ino,
3262 fi->i_gc_failures[GC_FAILURE_PIN]);
3263 clear_inode_flag(inode, FI_PIN_FILE);
3264 return -EAGAIN;
3265 }
3266 return 0;
3267 }
3268
f2fs_ioc_set_pin_file(struct file * filp,unsigned long arg)3269 static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3270 {
3271 struct inode *inode = file_inode(filp);
3272 __u32 pin;
3273 int ret = 0;
3274
3275 if (get_user(pin, (__u32 __user *)arg))
3276 return -EFAULT;
3277
3278 if (!S_ISREG(inode->i_mode))
3279 return -EINVAL;
3280
3281 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3282 return -EROFS;
3283
3284 ret = mnt_want_write_file(filp);
3285 if (ret)
3286 return ret;
3287
3288 inode_lock(inode);
3289
3290 if (!pin) {
3291 clear_inode_flag(inode, FI_PIN_FILE);
3292 f2fs_i_gc_failures_write(inode, 0);
3293 goto done;
3294 }
3295
3296 if (f2fs_should_update_outplace(inode, NULL)) {
3297 ret = -EINVAL;
3298 goto out;
3299 }
3300
3301 if (f2fs_pin_file_control(inode, false)) {
3302 ret = -EAGAIN;
3303 goto out;
3304 }
3305
3306 ret = f2fs_convert_inline_inode(inode);
3307 if (ret)
3308 goto out;
3309
3310 if (!f2fs_disable_compressed_file(inode)) {
3311 ret = -EOPNOTSUPP;
3312 goto out;
3313 }
3314
3315 set_inode_flag(inode, FI_PIN_FILE);
3316 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3317 done:
3318 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3319 out:
3320 inode_unlock(inode);
3321 mnt_drop_write_file(filp);
3322 return ret;
3323 }
3324
f2fs_ioc_get_pin_file(struct file * filp,unsigned long arg)3325 static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3326 {
3327 struct inode *inode = file_inode(filp);
3328 __u32 pin = 0;
3329
3330 if (is_inode_flag_set(inode, FI_PIN_FILE))
3331 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
3332 return put_user(pin, (u32 __user *)arg);
3333 }
3334
f2fs_precache_extents(struct inode * inode)3335 int f2fs_precache_extents(struct inode *inode)
3336 {
3337 struct f2fs_inode_info *fi = F2FS_I(inode);
3338 struct f2fs_map_blocks map;
3339 pgoff_t m_next_extent;
3340 loff_t end;
3341 int err;
3342
3343 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3344 return -EOPNOTSUPP;
3345
3346 map.m_lblk = 0;
3347 map.m_pblk = 0;
3348 map.m_next_pgofs = NULL;
3349 map.m_next_extent = &m_next_extent;
3350 map.m_seg_type = NO_CHECK_TYPE;
3351 map.m_may_create = false;
3352 end = max_file_blocks(inode);
3353
3354 while (map.m_lblk < end) {
3355 map.m_len = end - map.m_lblk;
3356
3357 f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
3358 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
3359 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
3360 if (err)
3361 return err;
3362
3363 map.m_lblk = m_next_extent;
3364 }
3365
3366 return 0;
3367 }
3368
f2fs_ioc_precache_extents(struct file * filp,unsigned long arg)3369 static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3370 {
3371 return f2fs_precache_extents(file_inode(filp));
3372 }
3373
f2fs_ioc_resize_fs(struct file * filp,unsigned long arg)3374 static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3375 {
3376 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3377 __u64 block_count;
3378
3379 if (!capable(CAP_SYS_ADMIN))
3380 return -EPERM;
3381
3382 if (f2fs_readonly(sbi->sb))
3383 return -EROFS;
3384
3385 if (copy_from_user(&block_count, (void __user *)arg,
3386 sizeof(block_count)))
3387 return -EFAULT;
3388
3389 return f2fs_resize_fs(filp, block_count);
3390 }
3391
f2fs_ioc_enable_verity(struct file * filp,unsigned long arg)3392 static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3393 {
3394 struct inode *inode = file_inode(filp);
3395
3396 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3397
3398 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3399 f2fs_warn(F2FS_I_SB(inode),
3400 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem",
3401 inode->i_ino);
3402 return -EOPNOTSUPP;
3403 }
3404
3405 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3406 }
3407
f2fs_ioc_measure_verity(struct file * filp,unsigned long arg)3408 static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3409 {
3410 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3411 return -EOPNOTSUPP;
3412
3413 return fsverity_ioctl_measure(filp, (void __user *)arg);
3414 }
3415
f2fs_ioc_read_verity_metadata(struct file * filp,unsigned long arg)3416 static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg)
3417 {
3418 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3419 return -EOPNOTSUPP;
3420
3421 return fsverity_ioctl_read_metadata(filp, (const void __user *)arg);
3422 }
3423
f2fs_ioc_getfslabel(struct file * filp,unsigned long arg)3424 static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
3425 {
3426 struct inode *inode = file_inode(filp);
3427 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3428 char *vbuf;
3429 int count;
3430 int err = 0;
3431
3432 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3433 if (!vbuf)
3434 return -ENOMEM;
3435
3436 f2fs_down_read(&sbi->sb_lock);
3437 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3438 ARRAY_SIZE(sbi->raw_super->volume_name),
3439 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3440 f2fs_up_read(&sbi->sb_lock);
3441
3442 if (copy_to_user((char __user *)arg, vbuf,
3443 min(FSLABEL_MAX, count)))
3444 err = -EFAULT;
3445
3446 kfree(vbuf);
3447 return err;
3448 }
3449
f2fs_ioc_setfslabel(struct file * filp,unsigned long arg)3450 static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
3451 {
3452 struct inode *inode = file_inode(filp);
3453 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3454 char *vbuf;
3455 int err = 0;
3456
3457 if (!capable(CAP_SYS_ADMIN))
3458 return -EPERM;
3459
3460 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3461 if (IS_ERR(vbuf))
3462 return PTR_ERR(vbuf);
3463
3464 err = mnt_want_write_file(filp);
3465 if (err)
3466 goto out;
3467
3468 f2fs_down_write(&sbi->sb_lock);
3469
3470 memset(sbi->raw_super->volume_name, 0,
3471 sizeof(sbi->raw_super->volume_name));
3472 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3473 sbi->raw_super->volume_name,
3474 ARRAY_SIZE(sbi->raw_super->volume_name));
3475
3476 err = f2fs_commit_super(sbi, false);
3477
3478 f2fs_up_write(&sbi->sb_lock);
3479
3480 mnt_drop_write_file(filp);
3481 out:
3482 kfree(vbuf);
3483 return err;
3484 }
3485
f2fs_get_compress_blocks(struct file * filp,unsigned long arg)3486 static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3487 {
3488 struct inode *inode = file_inode(filp);
3489 __u64 blocks;
3490
3491 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3492 return -EOPNOTSUPP;
3493
3494 if (!f2fs_compressed_file(inode))
3495 return -EINVAL;
3496
3497 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
3498 return put_user(blocks, (u64 __user *)arg);
3499 }
3500
release_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3501 static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3502 {
3503 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3504 unsigned int released_blocks = 0;
3505 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3506 block_t blkaddr;
3507 int i;
3508
3509 for (i = 0; i < count; i++) {
3510 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3511 dn->ofs_in_node + i);
3512
3513 if (!__is_valid_data_blkaddr(blkaddr))
3514 continue;
3515 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3516 DATA_GENERIC_ENHANCE)))
3517 return -EFSCORRUPTED;
3518 }
3519
3520 while (count) {
3521 int compr_blocks = 0;
3522
3523 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3524 blkaddr = f2fs_data_blkaddr(dn);
3525
3526 if (i == 0) {
3527 if (blkaddr == COMPRESS_ADDR)
3528 continue;
3529 dn->ofs_in_node += cluster_size;
3530 goto next;
3531 }
3532
3533 if (__is_valid_data_blkaddr(blkaddr))
3534 compr_blocks++;
3535
3536 if (blkaddr != NEW_ADDR)
3537 continue;
3538
3539 dn->data_blkaddr = NULL_ADDR;
3540 f2fs_set_data_blkaddr(dn);
3541 }
3542
3543 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3544 dec_valid_block_count(sbi, dn->inode,
3545 cluster_size - compr_blocks);
3546
3547 released_blocks += cluster_size - compr_blocks;
3548 next:
3549 count -= cluster_size;
3550 }
3551
3552 return released_blocks;
3553 }
3554
f2fs_release_compress_blocks(struct file * filp,unsigned long arg)3555 static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3556 {
3557 struct inode *inode = file_inode(filp);
3558 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3559 pgoff_t page_idx = 0, last_idx;
3560 unsigned int released_blocks = 0;
3561 int ret;
3562 int writecount;
3563
3564 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3565 return -EOPNOTSUPP;
3566
3567 if (!f2fs_compressed_file(inode))
3568 return -EINVAL;
3569
3570 if (f2fs_readonly(sbi->sb))
3571 return -EROFS;
3572
3573 ret = mnt_want_write_file(filp);
3574 if (ret)
3575 return ret;
3576
3577 f2fs_balance_fs(F2FS_I_SB(inode), true);
3578
3579 inode_lock(inode);
3580
3581 writecount = atomic_read(&inode->i_writecount);
3582 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3583 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
3584 ret = -EBUSY;
3585 goto out;
3586 }
3587
3588 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3589 ret = -EINVAL;
3590 goto out;
3591 }
3592
3593 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3594 if (ret)
3595 goto out;
3596
3597 set_inode_flag(inode, FI_COMPRESS_RELEASED);
3598 inode->i_ctime = current_time(inode);
3599 f2fs_mark_inode_dirty_sync(inode, true);
3600
3601 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
3602 goto out;
3603
3604 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3605 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
3606
3607 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3608
3609 while (page_idx < last_idx) {
3610 struct dnode_of_data dn;
3611 pgoff_t end_offset, count;
3612
3613 set_new_dnode(&dn, inode, NULL, NULL, 0);
3614 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3615 if (ret) {
3616 if (ret == -ENOENT) {
3617 page_idx = f2fs_get_next_page_offset(&dn,
3618 page_idx);
3619 ret = 0;
3620 continue;
3621 }
3622 break;
3623 }
3624
3625 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3626 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3627 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3628
3629 ret = release_compress_blocks(&dn, count);
3630
3631 f2fs_put_dnode(&dn);
3632
3633 if (ret < 0)
3634 break;
3635
3636 page_idx += count;
3637 released_blocks += ret;
3638 }
3639
3640 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3641 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3642 out:
3643 inode_unlock(inode);
3644
3645 mnt_drop_write_file(filp);
3646
3647 if (ret >= 0) {
3648 ret = put_user(released_blocks, (u64 __user *)arg);
3649 } else if (released_blocks &&
3650 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3651 set_sbi_flag(sbi, SBI_NEED_FSCK);
3652 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3653 "iblocks=%llu, released=%u, compr_blocks=%u, "
3654 "run fsck to fix.",
3655 __func__, inode->i_ino, inode->i_blocks,
3656 released_blocks,
3657 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3658 }
3659
3660 return ret;
3661 }
3662
reserve_compress_blocks(struct dnode_of_data * dn,pgoff_t count)3663 static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3664 {
3665 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3666 unsigned int reserved_blocks = 0;
3667 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3668 block_t blkaddr;
3669 int i;
3670
3671 for (i = 0; i < count; i++) {
3672 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3673 dn->ofs_in_node + i);
3674
3675 if (!__is_valid_data_blkaddr(blkaddr))
3676 continue;
3677 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3678 DATA_GENERIC_ENHANCE)))
3679 return -EFSCORRUPTED;
3680 }
3681
3682 while (count) {
3683 int compr_blocks = 0;
3684 blkcnt_t reserved;
3685 int ret;
3686
3687 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3688 blkaddr = f2fs_data_blkaddr(dn);
3689
3690 if (i == 0) {
3691 if (blkaddr == COMPRESS_ADDR)
3692 continue;
3693 dn->ofs_in_node += cluster_size;
3694 goto next;
3695 }
3696
3697 if (__is_valid_data_blkaddr(blkaddr)) {
3698 compr_blocks++;
3699 continue;
3700 }
3701
3702 dn->data_blkaddr = NEW_ADDR;
3703 f2fs_set_data_blkaddr(dn);
3704 }
3705
3706 reserved = cluster_size - compr_blocks;
3707 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3708 if (ret)
3709 return ret;
3710
3711 if (reserved != cluster_size - compr_blocks)
3712 return -ENOSPC;
3713
3714 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3715
3716 reserved_blocks += reserved;
3717 next:
3718 count -= cluster_size;
3719 }
3720
3721 return reserved_blocks;
3722 }
3723
f2fs_reserve_compress_blocks(struct file * filp,unsigned long arg)3724 static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3725 {
3726 struct inode *inode = file_inode(filp);
3727 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3728 pgoff_t page_idx = 0, last_idx;
3729 unsigned int reserved_blocks = 0;
3730 int ret;
3731
3732 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3733 return -EOPNOTSUPP;
3734
3735 if (!f2fs_compressed_file(inode))
3736 return -EINVAL;
3737
3738 if (f2fs_readonly(sbi->sb))
3739 return -EROFS;
3740
3741 ret = mnt_want_write_file(filp);
3742 if (ret)
3743 return ret;
3744
3745 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
3746 goto out;
3747
3748 f2fs_balance_fs(F2FS_I_SB(inode), true);
3749
3750 inode_lock(inode);
3751
3752 if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
3753 ret = -EINVAL;
3754 goto unlock_inode;
3755 }
3756
3757 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3758 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
3759
3760 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3761
3762 while (page_idx < last_idx) {
3763 struct dnode_of_data dn;
3764 pgoff_t end_offset, count;
3765
3766 set_new_dnode(&dn, inode, NULL, NULL, 0);
3767 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3768 if (ret) {
3769 if (ret == -ENOENT) {
3770 page_idx = f2fs_get_next_page_offset(&dn,
3771 page_idx);
3772 ret = 0;
3773 continue;
3774 }
3775 break;
3776 }
3777
3778 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3779 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
3780 count = round_up(count, F2FS_I(inode)->i_cluster_size);
3781
3782 ret = reserve_compress_blocks(&dn, count);
3783
3784 f2fs_put_dnode(&dn);
3785
3786 if (ret < 0)
3787 break;
3788
3789 page_idx += count;
3790 reserved_blocks += ret;
3791 }
3792
3793 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
3794 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3795
3796 if (ret >= 0) {
3797 clear_inode_flag(inode, FI_COMPRESS_RELEASED);
3798 inode->i_ctime = current_time(inode);
3799 f2fs_mark_inode_dirty_sync(inode, true);
3800 }
3801 unlock_inode:
3802 inode_unlock(inode);
3803 out:
3804 mnt_drop_write_file(filp);
3805
3806 if (ret >= 0) {
3807 ret = put_user(reserved_blocks, (u64 __user *)arg);
3808 } else if (reserved_blocks &&
3809 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
3810 set_sbi_flag(sbi, SBI_NEED_FSCK);
3811 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
3812 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
3813 "run fsck to fix.",
3814 __func__, inode->i_ino, inode->i_blocks,
3815 reserved_blocks,
3816 atomic_read(&F2FS_I(inode)->i_compr_blocks));
3817 }
3818
3819 return ret;
3820 }
3821
f2fs_secure_erase(struct block_device * bdev,struct inode * inode,pgoff_t off,block_t block,block_t len,u32 flags)3822 static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3823 pgoff_t off, block_t block, block_t len, u32 flags)
3824 {
3825 struct request_queue *q = bdev_get_queue(bdev);
3826 sector_t sector = SECTOR_FROM_BLOCK(block);
3827 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3828 int ret = 0;
3829
3830 if (!q)
3831 return -ENXIO;
3832
3833 if (flags & F2FS_TRIM_FILE_DISCARD)
3834 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3835 blk_queue_secure_erase(q) ?
3836 BLKDEV_DISCARD_SECURE : 0);
3837
3838 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3839 if (IS_ENCRYPTED(inode))
3840 ret = fscrypt_zeroout_range(inode, off, block, len);
3841 else
3842 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3843 GFP_NOFS, 0);
3844 }
3845
3846 return ret;
3847 }
3848
f2fs_sec_trim_file(struct file * filp,unsigned long arg)3849 static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3850 {
3851 struct inode *inode = file_inode(filp);
3852 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3853 struct address_space *mapping = inode->i_mapping;
3854 struct block_device *prev_bdev = NULL;
3855 struct f2fs_sectrim_range range;
3856 pgoff_t index, pg_end, prev_index = 0;
3857 block_t prev_block = 0, len = 0;
3858 loff_t end_addr;
3859 bool to_end = false;
3860 int ret = 0;
3861
3862 if (!(filp->f_mode & FMODE_WRITE))
3863 return -EBADF;
3864
3865 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3866 sizeof(range)))
3867 return -EFAULT;
3868
3869 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3870 !S_ISREG(inode->i_mode))
3871 return -EINVAL;
3872
3873 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3874 !f2fs_hw_support_discard(sbi)) ||
3875 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3876 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3877 return -EOPNOTSUPP;
3878
3879 file_start_write(filp);
3880 inode_lock(inode);
3881
3882 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3883 range.start >= inode->i_size) {
3884 ret = -EINVAL;
3885 goto err;
3886 }
3887
3888 if (range.len == 0)
3889 goto err;
3890
3891 if (inode->i_size - range.start > range.len) {
3892 end_addr = range.start + range.len;
3893 } else {
3894 end_addr = range.len == (u64)-1 ?
3895 sbi->sb->s_maxbytes : inode->i_size;
3896 to_end = true;
3897 }
3898
3899 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3900 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3901 ret = -EINVAL;
3902 goto err;
3903 }
3904
3905 index = F2FS_BYTES_TO_BLK(range.start);
3906 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3907
3908 ret = f2fs_convert_inline_inode(inode);
3909 if (ret)
3910 goto err;
3911
3912 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3913 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
3914
3915 ret = filemap_write_and_wait_range(mapping, range.start,
3916 to_end ? LLONG_MAX : end_addr - 1);
3917 if (ret)
3918 goto out;
3919
3920 truncate_inode_pages_range(mapping, range.start,
3921 to_end ? -1 : end_addr - 1);
3922
3923 while (index < pg_end) {
3924 struct dnode_of_data dn;
3925 pgoff_t end_offset, count;
3926 int i;
3927
3928 set_new_dnode(&dn, inode, NULL, NULL, 0);
3929 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3930 if (ret) {
3931 if (ret == -ENOENT) {
3932 index = f2fs_get_next_page_offset(&dn, index);
3933 continue;
3934 }
3935 goto out;
3936 }
3937
3938 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3939 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3940 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3941 struct block_device *cur_bdev;
3942 block_t blkaddr = f2fs_data_blkaddr(&dn);
3943
3944 if (!__is_valid_data_blkaddr(blkaddr))
3945 continue;
3946
3947 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3948 DATA_GENERIC_ENHANCE)) {
3949 ret = -EFSCORRUPTED;
3950 f2fs_put_dnode(&dn);
3951 goto out;
3952 }
3953
3954 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3955 if (f2fs_is_multi_device(sbi)) {
3956 int di = f2fs_target_device_index(sbi, blkaddr);
3957
3958 blkaddr -= FDEV(di).start_blk;
3959 }
3960
3961 if (len) {
3962 if (prev_bdev == cur_bdev &&
3963 index == prev_index + len &&
3964 blkaddr == prev_block + len) {
3965 len++;
3966 } else {
3967 ret = f2fs_secure_erase(prev_bdev,
3968 inode, prev_index, prev_block,
3969 len, range.flags);
3970 if (ret) {
3971 f2fs_put_dnode(&dn);
3972 goto out;
3973 }
3974
3975 len = 0;
3976 }
3977 }
3978
3979 if (!len) {
3980 prev_bdev = cur_bdev;
3981 prev_index = index;
3982 prev_block = blkaddr;
3983 len = 1;
3984 }
3985 }
3986
3987 f2fs_put_dnode(&dn);
3988
3989 if (fatal_signal_pending(current)) {
3990 ret = -EINTR;
3991 goto out;
3992 }
3993 cond_resched();
3994 }
3995
3996 if (len)
3997 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3998 prev_block, len, range.flags);
3999 out:
4000 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
4001 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4002 err:
4003 inode_unlock(inode);
4004 file_end_write(filp);
4005
4006 return ret;
4007 }
4008
f2fs_ioc_get_compress_option(struct file * filp,unsigned long arg)4009 static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
4010 {
4011 struct inode *inode = file_inode(filp);
4012 struct f2fs_comp_option option;
4013
4014 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
4015 return -EOPNOTSUPP;
4016
4017 inode_lock_shared(inode);
4018
4019 if (!f2fs_compressed_file(inode)) {
4020 inode_unlock_shared(inode);
4021 return -ENODATA;
4022 }
4023
4024 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
4025 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
4026
4027 inode_unlock_shared(inode);
4028
4029 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
4030 sizeof(option)))
4031 return -EFAULT;
4032
4033 return 0;
4034 }
4035
f2fs_ioc_set_compress_option(struct file * filp,unsigned long arg)4036 static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
4037 {
4038 struct inode *inode = file_inode(filp);
4039 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4040 struct f2fs_comp_option option;
4041 int ret = 0;
4042
4043 if (!f2fs_sb_has_compression(sbi))
4044 return -EOPNOTSUPP;
4045
4046 if (!(filp->f_mode & FMODE_WRITE))
4047 return -EBADF;
4048
4049 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
4050 sizeof(option)))
4051 return -EFAULT;
4052
4053 if (!f2fs_compressed_file(inode) ||
4054 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
4055 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
4056 option.algorithm >= COMPRESS_MAX)
4057 return -EINVAL;
4058
4059 file_start_write(filp);
4060 inode_lock(inode);
4061
4062 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4063 ret = -EBUSY;
4064 goto out;
4065 }
4066
4067 if (inode->i_size != 0) {
4068 ret = -EFBIG;
4069 goto out;
4070 }
4071
4072 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4073 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4074 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
4075 f2fs_mark_inode_dirty_sync(inode, true);
4076
4077 if (!f2fs_is_compress_backend_ready(inode))
4078 f2fs_warn(sbi, "compression algorithm is successfully set, "
4079 "but current kernel doesn't support this algorithm.");
4080 out:
4081 inode_unlock(inode);
4082 file_end_write(filp);
4083
4084 return ret;
4085 }
4086
redirty_blocks(struct inode * inode,pgoff_t page_idx,int len)4087 static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4088 {
4089 DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
4090 struct address_space *mapping = inode->i_mapping;
4091 struct page *page;
4092 pgoff_t redirty_idx = page_idx;
4093 int i, page_len = 0, ret = 0;
4094
4095 page_cache_ra_unbounded(&ractl, len, 0);
4096
4097 for (i = 0; i < len; i++, page_idx++) {
4098 page = read_cache_page(mapping, page_idx, NULL, NULL);
4099 if (IS_ERR(page)) {
4100 ret = PTR_ERR(page);
4101 break;
4102 }
4103 page_len++;
4104 }
4105
4106 for (i = 0; i < page_len; i++, redirty_idx++) {
4107 page = find_lock_page(mapping, redirty_idx);
4108 if (!page) {
4109 ret = -ENOMEM;
4110 break;
4111 }
4112 set_page_dirty(page);
4113 f2fs_put_page(page, 1);
4114 f2fs_put_page(page, 0);
4115 }
4116
4117 return ret;
4118 }
4119
f2fs_ioc_decompress_file(struct file * filp,unsigned long arg)4120 static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4121 {
4122 struct inode *inode = file_inode(filp);
4123 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4124 struct f2fs_inode_info *fi = F2FS_I(inode);
4125 pgoff_t page_idx = 0, last_idx;
4126 unsigned int blk_per_seg = sbi->blocks_per_seg;
4127 int cluster_size = F2FS_I(inode)->i_cluster_size;
4128 int count, ret;
4129
4130 if (!f2fs_sb_has_compression(sbi) ||
4131 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4132 return -EOPNOTSUPP;
4133
4134 if (!(filp->f_mode & FMODE_WRITE))
4135 return -EBADF;
4136
4137 if (!f2fs_compressed_file(inode))
4138 return -EINVAL;
4139
4140 f2fs_balance_fs(F2FS_I_SB(inode), true);
4141
4142 file_start_write(filp);
4143 inode_lock(inode);
4144
4145 if (!f2fs_is_compress_backend_ready(inode)) {
4146 ret = -EOPNOTSUPP;
4147 goto out;
4148 }
4149
4150 if (f2fs_is_mmap_file(inode)) {
4151 ret = -EBUSY;
4152 goto out;
4153 }
4154
4155 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4156 if (ret)
4157 goto out;
4158
4159 if (!atomic_read(&fi->i_compr_blocks))
4160 goto out;
4161
4162 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4163
4164 count = last_idx - page_idx;
4165 while (count) {
4166 int len = min(cluster_size, count);
4167
4168 ret = redirty_blocks(inode, page_idx, len);
4169 if (ret < 0)
4170 break;
4171
4172 if (get_dirty_pages(inode) >= blk_per_seg)
4173 filemap_fdatawrite(inode->i_mapping);
4174
4175 count -= len;
4176 page_idx += len;
4177 }
4178
4179 if (!ret)
4180 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4181 LLONG_MAX);
4182
4183 if (ret)
4184 f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.",
4185 __func__, ret);
4186 out:
4187 inode_unlock(inode);
4188 file_end_write(filp);
4189
4190 return ret;
4191 }
4192
f2fs_ioc_compress_file(struct file * filp,unsigned long arg)4193 static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4194 {
4195 struct inode *inode = file_inode(filp);
4196 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4197 pgoff_t page_idx = 0, last_idx;
4198 unsigned int blk_per_seg = sbi->blocks_per_seg;
4199 int cluster_size = F2FS_I(inode)->i_cluster_size;
4200 int count, ret;
4201
4202 if (!f2fs_sb_has_compression(sbi) ||
4203 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4204 return -EOPNOTSUPP;
4205
4206 if (!(filp->f_mode & FMODE_WRITE))
4207 return -EBADF;
4208
4209 if (!f2fs_compressed_file(inode))
4210 return -EINVAL;
4211
4212 f2fs_balance_fs(F2FS_I_SB(inode), true);
4213
4214 file_start_write(filp);
4215 inode_lock(inode);
4216
4217 if (!f2fs_is_compress_backend_ready(inode)) {
4218 ret = -EOPNOTSUPP;
4219 goto out;
4220 }
4221
4222 if (f2fs_is_mmap_file(inode)) {
4223 ret = -EBUSY;
4224 goto out;
4225 }
4226
4227 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4228 if (ret)
4229 goto out;
4230
4231 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4232
4233 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4234
4235 count = last_idx - page_idx;
4236 while (count) {
4237 int len = min(cluster_size, count);
4238
4239 ret = redirty_blocks(inode, page_idx, len);
4240 if (ret < 0)
4241 break;
4242
4243 if (get_dirty_pages(inode) >= blk_per_seg)
4244 filemap_fdatawrite(inode->i_mapping);
4245
4246 count -= len;
4247 page_idx += len;
4248 }
4249
4250 if (!ret)
4251 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4252 LLONG_MAX);
4253
4254 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4255
4256 if (ret)
4257 f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.",
4258 __func__, ret);
4259 out:
4260 inode_unlock(inode);
4261 file_end_write(filp);
4262
4263 return ret;
4264 }
4265
__f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4266 static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4267 {
4268 switch (cmd) {
4269 case FS_IOC_GETFLAGS:
4270 return f2fs_ioc_getflags(filp, arg);
4271 case FS_IOC_SETFLAGS:
4272 return f2fs_ioc_setflags(filp, arg);
4273 case FS_IOC_GETVERSION:
4274 return f2fs_ioc_getversion(filp, arg);
4275 case F2FS_IOC_START_ATOMIC_WRITE:
4276 return f2fs_ioc_start_atomic_write(filp);
4277 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4278 return f2fs_ioc_commit_atomic_write(filp);
4279 case F2FS_IOC_START_VOLATILE_WRITE:
4280 return f2fs_ioc_start_volatile_write(filp);
4281 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4282 return f2fs_ioc_release_volatile_write(filp);
4283 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4284 return f2fs_ioc_abort_volatile_write(filp);
4285 case F2FS_IOC_SHUTDOWN:
4286 return f2fs_ioc_shutdown(filp, arg);
4287 case FITRIM:
4288 return f2fs_ioc_fitrim(filp, arg);
4289 case FS_IOC_SET_ENCRYPTION_POLICY:
4290 return f2fs_ioc_set_encryption_policy(filp, arg);
4291 case FS_IOC_GET_ENCRYPTION_POLICY:
4292 return f2fs_ioc_get_encryption_policy(filp, arg);
4293 case FS_IOC_GET_ENCRYPTION_PWSALT:
4294 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
4295 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4296 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4297 case FS_IOC_ADD_ENCRYPTION_KEY:
4298 return f2fs_ioc_add_encryption_key(filp, arg);
4299 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4300 return f2fs_ioc_remove_encryption_key(filp, arg);
4301 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4302 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4303 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4304 return f2fs_ioc_get_encryption_key_status(filp, arg);
4305 case FS_IOC_GET_ENCRYPTION_NONCE:
4306 return f2fs_ioc_get_encryption_nonce(filp, arg);
4307 case F2FS_IOC_GARBAGE_COLLECT:
4308 return f2fs_ioc_gc(filp, arg);
4309 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4310 return f2fs_ioc_gc_range(filp, arg);
4311 case F2FS_IOC_WRITE_CHECKPOINT:
4312 return f2fs_ioc_write_checkpoint(filp, arg);
4313 case F2FS_IOC_DEFRAGMENT:
4314 return f2fs_ioc_defragment(filp, arg);
4315 case F2FS_IOC_MOVE_RANGE:
4316 return f2fs_ioc_move_range(filp, arg);
4317 case F2FS_IOC_FLUSH_DEVICE:
4318 return f2fs_ioc_flush_device(filp, arg);
4319 case F2FS_IOC_GET_FEATURES:
4320 return f2fs_ioc_get_features(filp, arg);
4321 case FS_IOC_FSGETXATTR:
4322 return f2fs_ioc_fsgetxattr(filp, arg);
4323 case FS_IOC_FSSETXATTR:
4324 return f2fs_ioc_fssetxattr(filp, arg);
4325 case F2FS_IOC_GET_PIN_FILE:
4326 return f2fs_ioc_get_pin_file(filp, arg);
4327 case F2FS_IOC_SET_PIN_FILE:
4328 return f2fs_ioc_set_pin_file(filp, arg);
4329 case F2FS_IOC_PRECACHE_EXTENTS:
4330 return f2fs_ioc_precache_extents(filp, arg);
4331 case F2FS_IOC_RESIZE_FS:
4332 return f2fs_ioc_resize_fs(filp, arg);
4333 case FS_IOC_ENABLE_VERITY:
4334 return f2fs_ioc_enable_verity(filp, arg);
4335 case FS_IOC_MEASURE_VERITY:
4336 return f2fs_ioc_measure_verity(filp, arg);
4337 case FS_IOC_READ_VERITY_METADATA:
4338 return f2fs_ioc_read_verity_metadata(filp, arg);
4339 case FS_IOC_GETFSLABEL:
4340 return f2fs_ioc_getfslabel(filp, arg);
4341 case FS_IOC_SETFSLABEL:
4342 return f2fs_ioc_setfslabel(filp, arg);
4343 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4344 return f2fs_get_compress_blocks(filp, arg);
4345 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4346 return f2fs_release_compress_blocks(filp, arg);
4347 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4348 return f2fs_reserve_compress_blocks(filp, arg);
4349 case F2FS_IOC_SEC_TRIM_FILE:
4350 return f2fs_sec_trim_file(filp, arg);
4351 case F2FS_IOC_GET_COMPRESS_OPTION:
4352 return f2fs_ioc_get_compress_option(filp, arg);
4353 case F2FS_IOC_SET_COMPRESS_OPTION:
4354 return f2fs_ioc_set_compress_option(filp, arg);
4355 case F2FS_IOC_DECOMPRESS_FILE:
4356 return f2fs_ioc_decompress_file(filp, arg);
4357 case F2FS_IOC_COMPRESS_FILE:
4358 return f2fs_ioc_compress_file(filp, arg);
4359 default:
4360 return -ENOTTY;
4361 }
4362 }
4363
f2fs_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)4364 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4365 {
4366 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4367 return -EIO;
4368 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4369 return -ENOSPC;
4370
4371 return __f2fs_ioctl(filp, cmd, arg);
4372 }
4373
4374 /*
4375 * Return %true if the given read or write request should use direct I/O, or
4376 * %false if it should use buffered I/O.
4377 */
f2fs_should_use_dio(struct inode * inode,struct kiocb * iocb,struct iov_iter * iter)4378 static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
4379 struct iov_iter *iter)
4380 {
4381 unsigned int align;
4382
4383 if (!(iocb->ki_flags & IOCB_DIRECT))
4384 return false;
4385
4386 if (f2fs_force_buffered_io(inode, iocb, iter))
4387 return false;
4388
4389 /*
4390 * Direct I/O not aligned to the disk's logical_block_size will be
4391 * attempted, but will fail with -EINVAL.
4392 *
4393 * f2fs additionally requires that direct I/O be aligned to the
4394 * filesystem block size, which is often a stricter requirement.
4395 * However, f2fs traditionally falls back to buffered I/O on requests
4396 * that are logical_block_size-aligned but not fs-block aligned.
4397 *
4398 * The below logic implements this behavior.
4399 */
4400 align = iocb->ki_pos | iov_iter_alignment(iter);
4401 if (!IS_ALIGNED(align, i_blocksize(inode)) &&
4402 IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
4403 return false;
4404
4405 return true;
4406 }
4407
f2fs_dio_read_end_io(struct kiocb * iocb,ssize_t size,int error,unsigned int flags)4408 static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
4409 unsigned int flags)
4410 {
4411 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4412
4413 dec_page_count(sbi, F2FS_DIO_READ);
4414 if (error)
4415 return error;
4416 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, size);
4417 return 0;
4418 }
4419
4420 static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
4421 .end_io = f2fs_dio_read_end_io,
4422 };
4423
f2fs_dio_read_iter(struct kiocb * iocb,struct iov_iter * to)4424 static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
4425 {
4426 struct file *file = iocb->ki_filp;
4427 struct inode *inode = file_inode(file);
4428 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4429 struct f2fs_inode_info *fi = F2FS_I(inode);
4430 const loff_t pos = iocb->ki_pos;
4431 const size_t count = iov_iter_count(to);
4432 struct iomap_dio *dio;
4433 ssize_t ret;
4434
4435 if (count == 0)
4436 return 0; /* skip atime update */
4437
4438 trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
4439 if (trace_android_fs_dataread_start_enabled()) {
4440 char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
4441
4442 path = android_fstrace_get_pathname(pathbuf,
4443 MAX_TRACE_PATHBUF_LEN,
4444 inode);
4445 trace_android_fs_dataread_start(inode, pos,
4446 count, current->pid, path,
4447 current->comm);
4448 }
4449
4450 if (iocb->ki_flags & IOCB_NOWAIT) {
4451 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4452 ret = -EAGAIN;
4453 goto out;
4454 }
4455 } else {
4456 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4457 }
4458
4459 /*
4460 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4461 * the higher-level function iomap_dio_rw() in order to ensure that the
4462 * F2FS_DIO_READ counter will be decremented correctly in all cases.
4463 */
4464 inc_page_count(sbi, F2FS_DIO_READ);
4465 dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
4466 &f2fs_iomap_dio_read_ops, is_sync_kiocb(iocb));
4467 if (IS_ERR_OR_NULL(dio)) {
4468 ret = PTR_ERR_OR_ZERO(dio);
4469 if (ret != -EIOCBQUEUED)
4470 dec_page_count(sbi, F2FS_DIO_READ);
4471 } else {
4472 ret = iomap_dio_complete(dio);
4473 }
4474
4475 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4476
4477 file_accessed(file);
4478 out:
4479 if (trace_android_fs_dataread_start_enabled())
4480 trace_android_fs_dataread_end(inode, pos, count);
4481 trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
4482 return ret;
4483 }
4484
f2fs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)4485 static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
4486 {
4487 struct inode *inode = file_inode(iocb->ki_filp);
4488 ssize_t ret = 0;
4489
4490 if (!f2fs_is_compress_backend_ready(inode))
4491 return -EOPNOTSUPP;
4492
4493 if (f2fs_should_use_dio(inode, iocb, to))
4494 return f2fs_dio_read_iter(iocb, to);
4495
4496 ret = generic_file_buffered_read(iocb, to, ret);
4497 if (ret > 0)
4498 f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_READ_IO, ret);
4499 return ret;
4500 }
4501
f2fs_write_checks(struct kiocb * iocb,struct iov_iter * from)4502 static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4503 {
4504 struct file *file = iocb->ki_filp;
4505 struct inode *inode = file_inode(file);
4506 ssize_t count;
4507 int err;
4508
4509 if (IS_IMMUTABLE(inode))
4510 return -EPERM;
4511
4512 if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
4513 return -EPERM;
4514
4515 count = generic_write_checks(iocb, from);
4516 if (count <= 0)
4517 return count;
4518
4519 err = file_modified(file);
4520 if (err)
4521 return err;
4522 return count;
4523 }
4524
4525 /*
4526 * Preallocate blocks for a write request, if it is possible and helpful to do
4527 * so. Returns a positive number if blocks may have been preallocated, 0 if no
4528 * blocks were preallocated, or a negative errno value if something went
4529 * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
4530 * requested blocks (not just some of them) have been allocated.
4531 */
f2fs_preallocate_blocks(struct kiocb * iocb,struct iov_iter * iter,bool dio)4532 static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
4533 bool dio)
4534 {
4535 struct inode *inode = file_inode(iocb->ki_filp);
4536 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4537 const loff_t pos = iocb->ki_pos;
4538 const size_t count = iov_iter_count(iter);
4539 struct f2fs_map_blocks map = {};
4540 int flag;
4541 int ret;
4542
4543 /* If it will be an out-of-place direct write, don't bother. */
4544 if (dio && f2fs_lfs_mode(sbi))
4545 return 0;
4546 /*
4547 * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
4548 * buffered IO, if DIO meets any holes.
4549 */
4550 if (dio && i_size_read(inode) &&
4551 (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
4552 return 0;
4553
4554 /* No-wait I/O can't allocate blocks. */
4555 if (iocb->ki_flags & IOCB_NOWAIT)
4556 return 0;
4557
4558 /* If it will be a short write, don't bother. */
4559 if (iov_iter_fault_in_readable(iter, count))
4560 return 0;
4561
4562 if (f2fs_has_inline_data(inode)) {
4563 /* If the data will fit inline, don't bother. */
4564 if (pos + count <= MAX_INLINE_DATA(inode))
4565 return 0;
4566 ret = f2fs_convert_inline_inode(inode);
4567 if (ret)
4568 return ret;
4569 }
4570
4571 /* Do not preallocate blocks that will be written partially in 4KB. */
4572 map.m_lblk = F2FS_BLK_ALIGN(pos);
4573 map.m_len = F2FS_BYTES_TO_BLK(pos + count);
4574 if (map.m_len > map.m_lblk)
4575 map.m_len -= map.m_lblk;
4576 else
4577 map.m_len = 0;
4578 map.m_may_create = true;
4579 if (dio) {
4580 map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
4581 flag = F2FS_GET_BLOCK_PRE_DIO;
4582 } else {
4583 map.m_seg_type = NO_CHECK_TYPE;
4584 flag = F2FS_GET_BLOCK_PRE_AIO;
4585 }
4586
4587 ret = f2fs_map_blocks(inode, &map, 1, flag);
4588 /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
4589 if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
4590 return ret;
4591 if (ret == 0)
4592 set_inode_flag(inode, FI_PREALLOCATED_ALL);
4593 return map.m_len;
4594 }
4595
f2fs_buffered_write_iter(struct kiocb * iocb,struct iov_iter * from)4596 static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
4597 struct iov_iter *from)
4598 {
4599 struct file *file = iocb->ki_filp;
4600 struct inode *inode = file_inode(file);
4601 ssize_t ret;
4602
4603 if (iocb->ki_flags & IOCB_NOWAIT)
4604 return -EOPNOTSUPP;
4605
4606 current->backing_dev_info = inode_to_bdi(inode);
4607 ret = generic_perform_write(file, from, iocb->ki_pos);
4608 current->backing_dev_info = NULL;
4609
4610 if (ret > 0) {
4611 iocb->ki_pos += ret;
4612 f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_IO, ret);
4613 }
4614 return ret;
4615 }
4616
f2fs_dio_write_end_io(struct kiocb * iocb,ssize_t size,int error,unsigned int flags)4617 static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
4618 unsigned int flags)
4619 {
4620 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
4621
4622 dec_page_count(sbi, F2FS_DIO_WRITE);
4623 if (error)
4624 return error;
4625 f2fs_update_iostat(sbi, APP_DIRECT_IO, size);
4626 return 0;
4627 }
4628
4629 static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
4630 .end_io = f2fs_dio_write_end_io,
4631 };
4632
f2fs_dio_write_iter(struct kiocb * iocb,struct iov_iter * from,bool * may_need_sync)4633 static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4634 bool *may_need_sync)
4635 {
4636 struct file *file = iocb->ki_filp;
4637 struct inode *inode = file_inode(file);
4638 struct f2fs_inode_info *fi = F2FS_I(inode);
4639 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4640 const bool do_opu = f2fs_lfs_mode(sbi);
4641 const int whint_mode = F2FS_OPTION(sbi).whint_mode;
4642 const loff_t pos = iocb->ki_pos;
4643 const ssize_t count = iov_iter_count(from);
4644 const enum rw_hint hint = iocb->ki_hint;
4645 unsigned int dio_flags;
4646 struct iomap_dio *dio;
4647 ssize_t ret;
4648
4649 trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
4650 if (trace_android_fs_datawrite_start_enabled()) {
4651 char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
4652
4653 path = android_fstrace_get_pathname(pathbuf,
4654 MAX_TRACE_PATHBUF_LEN,
4655 inode);
4656 trace_android_fs_datawrite_start(inode, pos, count,
4657 current->pid, path,
4658 current->comm);
4659 }
4660
4661 if (iocb->ki_flags & IOCB_NOWAIT) {
4662 /* f2fs_convert_inline_inode() and block allocation can block */
4663 if (f2fs_has_inline_data(inode) ||
4664 !f2fs_overwrite_io(inode, pos, count)) {
4665 ret = -EAGAIN;
4666 goto out;
4667 }
4668
4669 if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
4670 ret = -EAGAIN;
4671 goto out;
4672 }
4673 if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) {
4674 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4675 ret = -EAGAIN;
4676 goto out;
4677 }
4678 } else {
4679 ret = f2fs_convert_inline_inode(inode);
4680 if (ret)
4681 goto out;
4682
4683 f2fs_down_read(&fi->i_gc_rwsem[WRITE]);
4684 if (do_opu)
4685 f2fs_down_read(&fi->i_gc_rwsem[READ]);
4686 }
4687 if (whint_mode == WHINT_MODE_OFF)
4688 iocb->ki_hint = WRITE_LIFE_NOT_SET;
4689
4690 /*
4691 * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
4692 * the higher-level function iomap_dio_rw() in order to ensure that the
4693 * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
4694 */
4695 inc_page_count(sbi, F2FS_DIO_WRITE);
4696 dio_flags = 0;
4697 if (pos + count > inode->i_size)
4698 dio_flags = 1; /* IOMAP_DIO_FORCE_WAIT */
4699 dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
4700 &f2fs_iomap_dio_write_ops,
4701 dio_flags || is_sync_kiocb(iocb));
4702 if (IS_ERR_OR_NULL(dio)) {
4703 ret = PTR_ERR_OR_ZERO(dio);
4704 if (ret == -ENOTBLK)
4705 ret = 0;
4706 if (ret != -EIOCBQUEUED)
4707 dec_page_count(sbi, F2FS_DIO_WRITE);
4708 } else {
4709 ret = iomap_dio_complete(dio);
4710 }
4711
4712 if (whint_mode == WHINT_MODE_OFF)
4713 iocb->ki_hint = hint;
4714 if (do_opu)
4715 f2fs_up_read(&fi->i_gc_rwsem[READ]);
4716 f2fs_up_read(&fi->i_gc_rwsem[WRITE]);
4717
4718 if (ret < 0)
4719 goto out;
4720 if (pos + ret > inode->i_size)
4721 f2fs_i_size_write(inode, pos + ret);
4722 if (!do_opu)
4723 set_inode_flag(inode, FI_UPDATE_WRITE);
4724
4725 if (iov_iter_count(from)) {
4726 ssize_t ret2;
4727 loff_t bufio_start_pos = iocb->ki_pos;
4728
4729 /*
4730 * The direct write was partial, so we need to fall back to a
4731 * buffered write for the remainder.
4732 */
4733
4734 ret2 = f2fs_buffered_write_iter(iocb, from);
4735 if (iov_iter_count(from))
4736 f2fs_write_failed(inode, iocb->ki_pos);
4737 if (ret2 < 0)
4738 goto out;
4739
4740 /*
4741 * Ensure that the pagecache pages are written to disk and
4742 * invalidated to preserve the expected O_DIRECT semantics.
4743 */
4744 if (ret2 > 0) {
4745 loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
4746
4747 ret += ret2;
4748
4749 ret2 = filemap_write_and_wait_range(file->f_mapping,
4750 bufio_start_pos,
4751 bufio_end_pos);
4752 if (ret2 < 0)
4753 goto out;
4754 invalidate_mapping_pages(file->f_mapping,
4755 bufio_start_pos >> PAGE_SHIFT,
4756 bufio_end_pos >> PAGE_SHIFT);
4757 }
4758 } else {
4759 /* iomap_dio_rw() already handled the generic_write_sync(). */
4760 *may_need_sync = false;
4761 }
4762 out:
4763 if (trace_android_fs_datawrite_start_enabled())
4764 trace_android_fs_datawrite_end(inode, pos, count);
4765 trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
4766 return ret;
4767 }
4768
f2fs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)4769 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4770 {
4771 struct inode *inode = file_inode(iocb->ki_filp);
4772 const loff_t orig_pos = iocb->ki_pos;
4773 const size_t orig_count = iov_iter_count(from);
4774 loff_t target_size;
4775 bool dio;
4776 bool may_need_sync = true;
4777 int preallocated;
4778 ssize_t ret;
4779
4780 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4781 ret = -EIO;
4782 goto out;
4783 }
4784
4785 if (!f2fs_is_compress_backend_ready(inode)) {
4786 ret = -EOPNOTSUPP;
4787 goto out;
4788 }
4789
4790 if (iocb->ki_flags & IOCB_NOWAIT) {
4791 if (!inode_trylock(inode)) {
4792 ret = -EAGAIN;
4793 goto out;
4794 }
4795 } else {
4796 inode_lock(inode);
4797 }
4798
4799 ret = f2fs_write_checks(iocb, from);
4800 if (ret <= 0)
4801 goto out_unlock;
4802
4803 /* Determine whether we will do a direct write or a buffered write. */
4804 dio = f2fs_should_use_dio(inode, iocb, from);
4805
4806 /* Possibly preallocate the blocks for the write. */
4807 target_size = iocb->ki_pos + iov_iter_count(from);
4808 preallocated = f2fs_preallocate_blocks(iocb, from, dio);
4809 if (preallocated < 0)
4810 ret = preallocated;
4811 else
4812 /* Do the actual write. */
4813 ret = dio ?
4814 f2fs_dio_write_iter(iocb, from, &may_need_sync):
4815 f2fs_buffered_write_iter(iocb, from);
4816
4817 /* Don't leave any preallocated blocks around past i_size. */
4818 if (preallocated && i_size_read(inode) < target_size) {
4819 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4820 f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
4821 if (!f2fs_truncate(inode))
4822 file_dont_truncate(inode);
4823 f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);
4824 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
4825 } else {
4826 file_dont_truncate(inode);
4827 }
4828
4829 clear_inode_flag(inode, FI_PREALLOCATED_ALL);
4830 out_unlock:
4831 inode_unlock(inode);
4832 out:
4833 trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
4834 if (ret > 0 && may_need_sync)
4835 ret = generic_write_sync(iocb, ret);
4836 return ret;
4837 }
4838
f2fs_file_fadvise(struct file * filp,loff_t offset,loff_t len,int advice)4839 static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
4840 int advice)
4841 {
4842 struct address_space *mapping;
4843 struct backing_dev_info *bdi;
4844 struct inode *inode = file_inode(filp);
4845 int err;
4846
4847 if (advice == POSIX_FADV_SEQUENTIAL) {
4848 if (S_ISFIFO(inode->i_mode))
4849 return -ESPIPE;
4850
4851 mapping = filp->f_mapping;
4852 if (!mapping || len < 0)
4853 return -EINVAL;
4854
4855 bdi = inode_to_bdi(mapping->host);
4856 filp->f_ra.ra_pages = bdi->ra_pages *
4857 F2FS_I_SB(inode)->seq_file_ra_mul;
4858 spin_lock(&filp->f_lock);
4859 filp->f_mode &= ~FMODE_RANDOM;
4860 spin_unlock(&filp->f_lock);
4861 return 0;
4862 }
4863
4864 err = generic_fadvise(filp, offset, len, advice);
4865 if (!err && advice == POSIX_FADV_DONTNEED &&
4866 test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
4867 f2fs_compressed_file(inode))
4868 f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
4869
4870 return err;
4871 }
4872
4873 #ifdef CONFIG_COMPAT
4874 struct compat_f2fs_gc_range {
4875 u32 sync;
4876 compat_u64 start;
4877 compat_u64 len;
4878 };
4879 #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4880 struct compat_f2fs_gc_range)
4881
f2fs_compat_ioc_gc_range(struct file * file,unsigned long arg)4882 static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4883 {
4884 struct compat_f2fs_gc_range __user *urange;
4885 struct f2fs_gc_range range;
4886 int err;
4887
4888 urange = compat_ptr(arg);
4889 err = get_user(range.sync, &urange->sync);
4890 err |= get_user(range.start, &urange->start);
4891 err |= get_user(range.len, &urange->len);
4892 if (err)
4893 return -EFAULT;
4894
4895 return __f2fs_ioc_gc_range(file, &range);
4896 }
4897
4898 struct compat_f2fs_move_range {
4899 u32 dst_fd;
4900 compat_u64 pos_in;
4901 compat_u64 pos_out;
4902 compat_u64 len;
4903 };
4904 #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4905 struct compat_f2fs_move_range)
4906
f2fs_compat_ioc_move_range(struct file * file,unsigned long arg)4907 static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4908 {
4909 struct compat_f2fs_move_range __user *urange;
4910 struct f2fs_move_range range;
4911 int err;
4912
4913 urange = compat_ptr(arg);
4914 err = get_user(range.dst_fd, &urange->dst_fd);
4915 err |= get_user(range.pos_in, &urange->pos_in);
4916 err |= get_user(range.pos_out, &urange->pos_out);
4917 err |= get_user(range.len, &urange->len);
4918 if (err)
4919 return -EFAULT;
4920
4921 return __f2fs_ioc_move_range(file, &range);
4922 }
4923
f2fs_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)4924 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4925 {
4926 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4927 return -EIO;
4928 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4929 return -ENOSPC;
4930
4931 switch (cmd) {
4932 case FS_IOC32_GETFLAGS:
4933 cmd = FS_IOC_GETFLAGS;
4934 break;
4935 case FS_IOC32_SETFLAGS:
4936 cmd = FS_IOC_SETFLAGS;
4937 break;
4938 case FS_IOC32_GETVERSION:
4939 cmd = FS_IOC_GETVERSION;
4940 break;
4941 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4942 return f2fs_compat_ioc_gc_range(file, arg);
4943 case F2FS_IOC32_MOVE_RANGE:
4944 return f2fs_compat_ioc_move_range(file, arg);
4945 case F2FS_IOC_START_ATOMIC_WRITE:
4946 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4947 case F2FS_IOC_START_VOLATILE_WRITE:
4948 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4949 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4950 case F2FS_IOC_SHUTDOWN:
4951 case FITRIM:
4952 case FS_IOC_SET_ENCRYPTION_POLICY:
4953 case FS_IOC_GET_ENCRYPTION_PWSALT:
4954 case FS_IOC_GET_ENCRYPTION_POLICY:
4955 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4956 case FS_IOC_ADD_ENCRYPTION_KEY:
4957 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4958 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4959 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4960 case FS_IOC_GET_ENCRYPTION_NONCE:
4961 case F2FS_IOC_GARBAGE_COLLECT:
4962 case F2FS_IOC_WRITE_CHECKPOINT:
4963 case F2FS_IOC_DEFRAGMENT:
4964 case F2FS_IOC_FLUSH_DEVICE:
4965 case F2FS_IOC_GET_FEATURES:
4966 case FS_IOC_FSGETXATTR:
4967 case FS_IOC_FSSETXATTR:
4968 case F2FS_IOC_GET_PIN_FILE:
4969 case F2FS_IOC_SET_PIN_FILE:
4970 case F2FS_IOC_PRECACHE_EXTENTS:
4971 case F2FS_IOC_RESIZE_FS:
4972 case FS_IOC_ENABLE_VERITY:
4973 case FS_IOC_MEASURE_VERITY:
4974 case FS_IOC_READ_VERITY_METADATA:
4975 case FS_IOC_GETFSLABEL:
4976 case FS_IOC_SETFSLABEL:
4977 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4978 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4979 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4980 case F2FS_IOC_SEC_TRIM_FILE:
4981 case F2FS_IOC_GET_COMPRESS_OPTION:
4982 case F2FS_IOC_SET_COMPRESS_OPTION:
4983 case F2FS_IOC_DECOMPRESS_FILE:
4984 case F2FS_IOC_COMPRESS_FILE:
4985 break;
4986 default:
4987 return -ENOIOCTLCMD;
4988 }
4989 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
4990 }
4991 #endif
4992
4993 const struct file_operations f2fs_file_operations = {
4994 .llseek = f2fs_llseek,
4995 .read_iter = f2fs_file_read_iter,
4996 .write_iter = f2fs_file_write_iter,
4997 .open = f2fs_file_open,
4998 .release = f2fs_release_file,
4999 .mmap = f2fs_file_mmap,
5000 .flush = f2fs_file_flush,
5001 .fsync = f2fs_sync_file,
5002 .fallocate = f2fs_fallocate,
5003 .unlocked_ioctl = f2fs_ioctl,
5004 #ifdef CONFIG_COMPAT
5005 .compat_ioctl = f2fs_compat_ioctl,
5006 #endif
5007 .splice_read = generic_file_splice_read,
5008 .splice_write = iter_file_splice_write,
5009 .fadvise = f2fs_file_fadvise,
5010 };
5011