1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/data.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/mpage.h>
12 #include <linux/writeback.h>
13 #include <linux/backing-dev.h>
14 #include <linux/pagevec.h>
15 #include <linux/blkdev.h>
16 #include <linux/bio.h>
17 #include <linux/blk-crypto.h>
18 #include <linux/swap.h>
19 #include <linux/prefetch.h>
20 #include <linux/uio.h>
21 #include <linux/cleancache.h>
22 #include <linux/sched/signal.h>
23 #include <linux/fiemap.h>
24
25 #include "f2fs.h"
26 #include "node.h"
27 #include "segment.h"
28 #include "trace.h"
29 #include <trace/events/f2fs.h>
30
31 #define NUM_PREALLOC_POST_READ_CTXS 128
32
33 static struct kmem_cache *bio_post_read_ctx_cache;
34 static struct kmem_cache *bio_entry_slab;
35 static mempool_t *bio_post_read_ctx_pool;
36 static struct bio_set f2fs_bioset;
37
38 #define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
39
f2fs_init_bioset(void)40 int __init f2fs_init_bioset(void)
41 {
42 if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
43 0, BIOSET_NEED_BVECS))
44 return -ENOMEM;
45 return 0;
46 }
47
f2fs_destroy_bioset(void)48 void f2fs_destroy_bioset(void)
49 {
50 bioset_exit(&f2fs_bioset);
51 }
52
__f2fs_bio_alloc(gfp_t gfp_mask,unsigned int nr_iovecs)53 static inline struct bio *__f2fs_bio_alloc(gfp_t gfp_mask,
54 unsigned int nr_iovecs)
55 {
56 return bio_alloc_bioset(gfp_mask, nr_iovecs, &f2fs_bioset);
57 }
58
f2fs_bio_alloc(struct f2fs_sb_info * sbi,int npages,bool noio)59 struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio)
60 {
61 if (noio) {
62 /* No failure on bio allocation */
63 return __f2fs_bio_alloc(GFP_NOIO, npages);
64 }
65
66 if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
67 f2fs_show_injection_info(sbi, FAULT_ALLOC_BIO);
68 return NULL;
69 }
70
71 return __f2fs_bio_alloc(GFP_KERNEL, npages);
72 }
73
__is_cp_guaranteed(struct page * page)74 static bool __is_cp_guaranteed(struct page *page)
75 {
76 struct address_space *mapping = page->mapping;
77 struct inode *inode;
78 struct f2fs_sb_info *sbi;
79
80 if (!mapping)
81 return false;
82
83 if (f2fs_is_compressed_page(page))
84 return false;
85
86 inode = mapping->host;
87 sbi = F2FS_I_SB(inode);
88
89 if (inode->i_ino == F2FS_META_INO(sbi) ||
90 inode->i_ino == F2FS_NODE_INO(sbi) ||
91 S_ISDIR(inode->i_mode) ||
92 (S_ISREG(inode->i_mode) &&
93 (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
94 is_cold_data(page))
95 return true;
96 return false;
97 }
98
__read_io_type(struct page * page)99 static enum count_type __read_io_type(struct page *page)
100 {
101 struct address_space *mapping = page_file_mapping(page);
102
103 if (mapping) {
104 struct inode *inode = mapping->host;
105 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
106
107 if (inode->i_ino == F2FS_META_INO(sbi))
108 return F2FS_RD_META;
109
110 if (inode->i_ino == F2FS_NODE_INO(sbi))
111 return F2FS_RD_NODE;
112 }
113 return F2FS_RD_DATA;
114 }
115
116 /* postprocessing steps for read bios */
117 enum bio_post_read_step {
118 STEP_DECRYPT,
119 STEP_DECOMPRESS_NOWQ, /* handle normal cluster data inplace */
120 STEP_DECOMPRESS, /* handle compressed cluster data in workqueue */
121 STEP_VERITY,
122 };
123
124 struct bio_post_read_ctx {
125 struct bio *bio;
126 struct f2fs_sb_info *sbi;
127 struct work_struct work;
128 unsigned int enabled_steps;
129 };
130
__read_end_io(struct bio * bio,bool compr,bool verity)131 static void __read_end_io(struct bio *bio, bool compr, bool verity)
132 {
133 struct page *page;
134 struct bio_vec *bv;
135 struct bvec_iter_all iter_all;
136
137 bio_for_each_segment_all(bv, bio, iter_all) {
138 page = bv->bv_page;
139
140 #ifdef CONFIG_F2FS_FS_COMPRESSION
141 if (compr && f2fs_is_compressed_page(page)) {
142 f2fs_decompress_pages(bio, page, verity);
143 continue;
144 }
145 if (verity)
146 continue;
147 #endif
148
149 /* PG_error was set if any post_read step failed */
150 if (bio->bi_status || PageError(page)) {
151 ClearPageUptodate(page);
152 /* will re-read again later */
153 ClearPageError(page);
154 } else {
155 SetPageUptodate(page);
156 }
157 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
158 unlock_page(page);
159 }
160 }
161
162 static void f2fs_release_read_bio(struct bio *bio);
__f2fs_read_end_io(struct bio * bio,bool compr,bool verity)163 static void __f2fs_read_end_io(struct bio *bio, bool compr, bool verity)
164 {
165 if (!compr)
166 __read_end_io(bio, false, verity);
167 f2fs_release_read_bio(bio);
168 }
169
f2fs_decompress_bio(struct bio * bio,bool verity)170 static void f2fs_decompress_bio(struct bio *bio, bool verity)
171 {
172 __read_end_io(bio, true, verity);
173 }
174
175 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
176
f2fs_decrypt_work(struct bio_post_read_ctx * ctx)177 static void f2fs_decrypt_work(struct bio_post_read_ctx *ctx)
178 {
179 fscrypt_decrypt_bio(ctx->bio);
180 }
181
f2fs_decompress_work(struct bio_post_read_ctx * ctx)182 static void f2fs_decompress_work(struct bio_post_read_ctx *ctx)
183 {
184 f2fs_decompress_bio(ctx->bio, ctx->enabled_steps & (1 << STEP_VERITY));
185 }
186
187 #ifdef CONFIG_F2FS_FS_COMPRESSION
f2fs_verify_pages(struct page ** rpages,unsigned int cluster_size)188 static void f2fs_verify_pages(struct page **rpages, unsigned int cluster_size)
189 {
190 f2fs_decompress_end_io(rpages, cluster_size, false, true);
191 }
192
f2fs_verify_bio(struct bio * bio)193 static void f2fs_verify_bio(struct bio *bio)
194 {
195 struct bio_vec *bv;
196 struct bvec_iter_all iter_all;
197
198 bio_for_each_segment_all(bv, bio, iter_all) {
199 struct page *page = bv->bv_page;
200 struct decompress_io_ctx *dic;
201
202 dic = (struct decompress_io_ctx *)page_private(page);
203
204 if (dic) {
205 if (atomic_dec_return(&dic->verity_pages))
206 continue;
207 f2fs_verify_pages(dic->rpages,
208 dic->cluster_size);
209 f2fs_free_dic(dic);
210 continue;
211 }
212
213 if (bio->bi_status || PageError(page))
214 goto clear_uptodate;
215
216 if (fsverity_verify_page(page)) {
217 SetPageUptodate(page);
218 goto unlock;
219 }
220 clear_uptodate:
221 ClearPageUptodate(page);
222 ClearPageError(page);
223 unlock:
224 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
225 unlock_page(page);
226 }
227 }
228 #endif
229
f2fs_verity_work(struct work_struct * work)230 static void f2fs_verity_work(struct work_struct *work)
231 {
232 struct bio_post_read_ctx *ctx =
233 container_of(work, struct bio_post_read_ctx, work);
234 struct bio *bio = ctx->bio;
235 #ifdef CONFIG_F2FS_FS_COMPRESSION
236 unsigned int enabled_steps = ctx->enabled_steps;
237 #endif
238
239 /*
240 * fsverity_verify_bio() may call readpages() again, and while verity
241 * will be disabled for this, decryption may still be needed, resulting
242 * in another bio_post_read_ctx being allocated. So to prevent
243 * deadlocks we need to release the current ctx to the mempool first.
244 * This assumes that verity is the last post-read step.
245 */
246 mempool_free(ctx, bio_post_read_ctx_pool);
247 bio->bi_private = NULL;
248
249 #ifdef CONFIG_F2FS_FS_COMPRESSION
250 /* previous step is decompression */
251 if (enabled_steps & (1 << STEP_DECOMPRESS)) {
252 f2fs_verify_bio(bio);
253 f2fs_release_read_bio(bio);
254 return;
255 }
256 #endif
257
258 fsverity_verify_bio(bio);
259 __f2fs_read_end_io(bio, false, false);
260 }
261
f2fs_post_read_work(struct work_struct * work)262 static void f2fs_post_read_work(struct work_struct *work)
263 {
264 struct bio_post_read_ctx *ctx =
265 container_of(work, struct bio_post_read_ctx, work);
266
267 if (ctx->enabled_steps & (1 << STEP_DECRYPT))
268 f2fs_decrypt_work(ctx);
269
270 if (ctx->enabled_steps & (1 << STEP_DECOMPRESS))
271 f2fs_decompress_work(ctx);
272
273 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
274 INIT_WORK(&ctx->work, f2fs_verity_work);
275 fsverity_enqueue_verify_work(&ctx->work);
276 return;
277 }
278
279 __f2fs_read_end_io(ctx->bio,
280 ctx->enabled_steps & (1 << STEP_DECOMPRESS), false);
281 }
282
f2fs_enqueue_post_read_work(struct f2fs_sb_info * sbi,struct work_struct * work)283 static void f2fs_enqueue_post_read_work(struct f2fs_sb_info *sbi,
284 struct work_struct *work)
285 {
286 queue_work(sbi->post_read_wq, work);
287 }
288
bio_post_read_processing(struct bio_post_read_ctx * ctx)289 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
290 {
291 /*
292 * We use different work queues for decryption and for verity because
293 * verity may require reading metadata pages that need decryption, and
294 * we shouldn't recurse to the same workqueue.
295 */
296
297 if (ctx->enabled_steps & (1 << STEP_DECRYPT) ||
298 ctx->enabled_steps & (1 << STEP_DECOMPRESS)) {
299 INIT_WORK(&ctx->work, f2fs_post_read_work);
300 f2fs_enqueue_post_read_work(ctx->sbi, &ctx->work);
301 return;
302 }
303
304 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
305 INIT_WORK(&ctx->work, f2fs_verity_work);
306 fsverity_enqueue_verify_work(&ctx->work);
307 return;
308 }
309
310 __f2fs_read_end_io(ctx->bio, false, false);
311 }
312
f2fs_bio_post_read_required(struct bio * bio)313 static bool f2fs_bio_post_read_required(struct bio *bio)
314 {
315 return bio->bi_private;
316 }
317
f2fs_read_end_io(struct bio * bio)318 static void f2fs_read_end_io(struct bio *bio)
319 {
320 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
321
322 if (time_to_inject(sbi, FAULT_READ_IO)) {
323 f2fs_show_injection_info(sbi, FAULT_READ_IO);
324 bio->bi_status = BLK_STS_IOERR;
325 }
326
327 if (f2fs_bio_post_read_required(bio)) {
328 struct bio_post_read_ctx *ctx = bio->bi_private;
329
330 bio_post_read_processing(ctx);
331 return;
332 }
333
334 __f2fs_read_end_io(bio, false, false);
335 }
336
f2fs_write_end_io(struct bio * bio)337 static void f2fs_write_end_io(struct bio *bio)
338 {
339 struct f2fs_sb_info *sbi = bio->bi_private;
340 struct bio_vec *bvec;
341 struct bvec_iter_all iter_all;
342
343 if (time_to_inject(sbi, FAULT_WRITE_IO)) {
344 f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
345 bio->bi_status = BLK_STS_IOERR;
346 }
347
348 bio_for_each_segment_all(bvec, bio, iter_all) {
349 struct page *page = bvec->bv_page;
350 enum count_type type = WB_DATA_TYPE(page);
351
352 if (IS_DUMMY_WRITTEN_PAGE(page)) {
353 set_page_private(page, (unsigned long)NULL);
354 ClearPagePrivate(page);
355 unlock_page(page);
356 mempool_free(page, sbi->write_io_dummy);
357
358 if (unlikely(bio->bi_status))
359 f2fs_stop_checkpoint(sbi, true);
360 continue;
361 }
362
363 fscrypt_finalize_bounce_page(&page);
364
365 #ifdef CONFIG_F2FS_FS_COMPRESSION
366 if (f2fs_is_compressed_page(page)) {
367 f2fs_compress_write_end_io(bio, page);
368 continue;
369 }
370 #endif
371
372 if (unlikely(bio->bi_status)) {
373 mapping_set_error(page->mapping, -EIO);
374 if (type == F2FS_WB_CP_DATA)
375 f2fs_stop_checkpoint(sbi, true);
376 }
377
378 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
379 page->index != nid_of_node(page));
380
381 dec_page_count(sbi, type);
382 if (f2fs_in_warm_node_list(sbi, page))
383 f2fs_del_fsync_node_entry(sbi, page);
384 clear_cold_data(page);
385 end_page_writeback(page);
386 }
387 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
388 wq_has_sleeper(&sbi->cp_wait))
389 wake_up(&sbi->cp_wait);
390
391 bio_put(bio);
392 }
393
f2fs_target_device(struct f2fs_sb_info * sbi,block_t blk_addr,struct bio * bio)394 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
395 block_t blk_addr, struct bio *bio)
396 {
397 struct block_device *bdev = sbi->sb->s_bdev;
398 int i;
399
400 if (f2fs_is_multi_device(sbi)) {
401 for (i = 0; i < sbi->s_ndevs; i++) {
402 if (FDEV(i).start_blk <= blk_addr &&
403 FDEV(i).end_blk >= blk_addr) {
404 blk_addr -= FDEV(i).start_blk;
405 bdev = FDEV(i).bdev;
406 break;
407 }
408 }
409 }
410 if (bio) {
411 bio_set_dev(bio, bdev);
412 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
413 }
414 return bdev;
415 }
416
f2fs_target_device_index(struct f2fs_sb_info * sbi,block_t blkaddr)417 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
418 {
419 int i;
420
421 if (!f2fs_is_multi_device(sbi))
422 return 0;
423
424 for (i = 0; i < sbi->s_ndevs; i++)
425 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
426 return i;
427 return 0;
428 }
429
430 /*
431 * Return true, if pre_bio's bdev is same as its target device.
432 */
__same_bdev(struct f2fs_sb_info * sbi,block_t blk_addr,struct bio * bio)433 static bool __same_bdev(struct f2fs_sb_info *sbi,
434 block_t blk_addr, struct bio *bio)
435 {
436 struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
437 return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
438 }
439
__bio_alloc(struct f2fs_io_info * fio,int npages)440 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
441 {
442 struct f2fs_sb_info *sbi = fio->sbi;
443 struct bio *bio;
444
445 bio = f2fs_bio_alloc(sbi, npages, true);
446
447 f2fs_target_device(sbi, fio->new_blkaddr, bio);
448 if (is_read_io(fio->op)) {
449 bio->bi_end_io = f2fs_read_end_io;
450 bio->bi_private = NULL;
451 } else {
452 bio->bi_end_io = f2fs_write_end_io;
453 bio->bi_private = sbi;
454 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
455 fio->type, fio->temp);
456 }
457 if (fio->io_wbc)
458 wbc_init_bio(fio->io_wbc, bio);
459
460 return bio;
461 }
462
f2fs_set_bio_crypt_ctx(struct bio * bio,const struct inode * inode,pgoff_t first_idx,const struct f2fs_io_info * fio,gfp_t gfp_mask)463 static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
464 pgoff_t first_idx,
465 const struct f2fs_io_info *fio,
466 gfp_t gfp_mask)
467 {
468 /*
469 * The f2fs garbage collector sets ->encrypted_page when it wants to
470 * read/write raw data without encryption.
471 */
472 if (!fio || !fio->encrypted_page)
473 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
474 }
475
f2fs_crypt_mergeable_bio(struct bio * bio,const struct inode * inode,pgoff_t next_idx,const struct f2fs_io_info * fio)476 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
477 pgoff_t next_idx,
478 const struct f2fs_io_info *fio)
479 {
480 /*
481 * The f2fs garbage collector sets ->encrypted_page when it wants to
482 * read/write raw data without encryption.
483 */
484 if (fio && fio->encrypted_page)
485 return !bio_has_crypt_ctx(bio);
486
487 return fscrypt_mergeable_bio(bio, inode, next_idx);
488 }
489
__submit_bio(struct f2fs_sb_info * sbi,struct bio * bio,enum page_type type)490 static inline void __submit_bio(struct f2fs_sb_info *sbi,
491 struct bio *bio, enum page_type type)
492 {
493 if (!is_read_io(bio_op(bio))) {
494 unsigned int start;
495
496 if (type != DATA && type != NODE)
497 goto submit_io;
498
499 if (f2fs_lfs_mode(sbi) && current->plug)
500 blk_finish_plug(current->plug);
501
502 if (!F2FS_IO_ALIGNED(sbi))
503 goto submit_io;
504
505 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
506 start %= F2FS_IO_SIZE(sbi);
507
508 if (start == 0)
509 goto submit_io;
510
511 /* fill dummy pages */
512 for (; start < F2FS_IO_SIZE(sbi); start++) {
513 struct page *page =
514 mempool_alloc(sbi->write_io_dummy,
515 GFP_NOIO | __GFP_NOFAIL);
516 f2fs_bug_on(sbi, !page);
517
518 zero_user_segment(page, 0, PAGE_SIZE);
519 SetPagePrivate(page);
520 set_page_private(page, DUMMY_WRITTEN_PAGE);
521 lock_page(page);
522 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
523 f2fs_bug_on(sbi, 1);
524 }
525 /*
526 * In the NODE case, we lose next block address chain. So, we
527 * need to do checkpoint in f2fs_sync_file.
528 */
529 if (type == NODE)
530 set_sbi_flag(sbi, SBI_NEED_CP);
531 }
532 submit_io:
533 if (is_read_io(bio_op(bio)))
534 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
535 else
536 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
537 submit_bio(bio);
538 }
539
f2fs_submit_bio(struct f2fs_sb_info * sbi,struct bio * bio,enum page_type type)540 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
541 struct bio *bio, enum page_type type)
542 {
543 __submit_bio(sbi, bio, type);
544 }
545
__attach_io_flag(struct f2fs_io_info * fio)546 static void __attach_io_flag(struct f2fs_io_info *fio)
547 {
548 struct f2fs_sb_info *sbi = fio->sbi;
549 unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
550 unsigned int io_flag, fua_flag, meta_flag;
551
552 if (fio->type == DATA)
553 io_flag = sbi->data_io_flag;
554 else if (fio->type == NODE)
555 io_flag = sbi->node_io_flag;
556 else
557 return;
558
559 fua_flag = io_flag & temp_mask;
560 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
561
562 /*
563 * data/node io flag bits per temp:
564 * REQ_META | REQ_FUA |
565 * 5 | 4 | 3 | 2 | 1 | 0 |
566 * Cold | Warm | Hot | Cold | Warm | Hot |
567 */
568 if ((1 << fio->temp) & meta_flag)
569 fio->op_flags |= REQ_META;
570 if ((1 << fio->temp) & fua_flag)
571 fio->op_flags |= REQ_FUA;
572 }
573
__submit_merged_bio(struct f2fs_bio_info * io)574 static void __submit_merged_bio(struct f2fs_bio_info *io)
575 {
576 struct f2fs_io_info *fio = &io->fio;
577
578 if (!io->bio)
579 return;
580
581 __attach_io_flag(fio);
582 bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
583
584 if (is_read_io(fio->op))
585 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
586 else
587 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
588
589 __submit_bio(io->sbi, io->bio, fio->type);
590 io->bio = NULL;
591 }
592
__has_merged_page(struct bio * bio,struct inode * inode,struct page * page,nid_t ino)593 static bool __has_merged_page(struct bio *bio, struct inode *inode,
594 struct page *page, nid_t ino)
595 {
596 struct bio_vec *bvec;
597 struct bvec_iter_all iter_all;
598
599 if (!bio)
600 return false;
601
602 if (!inode && !page && !ino)
603 return true;
604
605 bio_for_each_segment_all(bvec, bio, iter_all) {
606 struct page *target = bvec->bv_page;
607
608 if (fscrypt_is_bounce_page(target)) {
609 target = fscrypt_pagecache_page(target);
610 if (IS_ERR(target))
611 continue;
612 }
613 if (f2fs_is_compressed_page(target)) {
614 target = f2fs_compress_control_page(target);
615 if (IS_ERR(target))
616 continue;
617 }
618
619 if (inode && inode == target->mapping->host)
620 return true;
621 if (page && page == target)
622 return true;
623 if (ino && ino == ino_of_node(target))
624 return true;
625 }
626
627 return false;
628 }
629
__f2fs_submit_merged_write(struct f2fs_sb_info * sbi,enum page_type type,enum temp_type temp)630 static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
631 enum page_type type, enum temp_type temp)
632 {
633 enum page_type btype = PAGE_TYPE_OF_BIO(type);
634 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
635
636 down_write(&io->io_rwsem);
637
638 /* change META to META_FLUSH in the checkpoint procedure */
639 if (type >= META_FLUSH) {
640 io->fio.type = META_FLUSH;
641 io->fio.op = REQ_OP_WRITE;
642 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
643 if (!test_opt(sbi, NOBARRIER))
644 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
645 }
646 __submit_merged_bio(io);
647 up_write(&io->io_rwsem);
648 }
649
__submit_merged_write_cond(struct f2fs_sb_info * sbi,struct inode * inode,struct page * page,nid_t ino,enum page_type type,bool force)650 static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
651 struct inode *inode, struct page *page,
652 nid_t ino, enum page_type type, bool force)
653 {
654 enum temp_type temp;
655 bool ret = true;
656
657 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
658 if (!force) {
659 enum page_type btype = PAGE_TYPE_OF_BIO(type);
660 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
661
662 down_read(&io->io_rwsem);
663 ret = __has_merged_page(io->bio, inode, page, ino);
664 up_read(&io->io_rwsem);
665 }
666 if (ret)
667 __f2fs_submit_merged_write(sbi, type, temp);
668
669 /* TODO: use HOT temp only for meta pages now. */
670 if (type >= META)
671 break;
672 }
673 }
674
f2fs_submit_merged_write(struct f2fs_sb_info * sbi,enum page_type type)675 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
676 {
677 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
678 }
679
f2fs_submit_merged_write_cond(struct f2fs_sb_info * sbi,struct inode * inode,struct page * page,nid_t ino,enum page_type type)680 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
681 struct inode *inode, struct page *page,
682 nid_t ino, enum page_type type)
683 {
684 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
685 }
686
f2fs_flush_merged_writes(struct f2fs_sb_info * sbi)687 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
688 {
689 f2fs_submit_merged_write(sbi, DATA);
690 f2fs_submit_merged_write(sbi, NODE);
691 f2fs_submit_merged_write(sbi, META);
692 }
693
694 /*
695 * Fill the locked page with data located in the block address.
696 * A caller needs to unlock the page on failure.
697 */
f2fs_submit_page_bio(struct f2fs_io_info * fio)698 int f2fs_submit_page_bio(struct f2fs_io_info *fio)
699 {
700 struct bio *bio;
701 struct page *page = fio->encrypted_page ?
702 fio->encrypted_page : fio->page;
703
704 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
705 fio->is_por ? META_POR : (__is_meta_io(fio) ?
706 META_GENERIC : DATA_GENERIC_ENHANCE)))
707 return -EFSCORRUPTED;
708
709 trace_f2fs_submit_page_bio(page, fio);
710 f2fs_trace_ios(fio, 0);
711
712 /* Allocate a new bio */
713 bio = __bio_alloc(fio, 1);
714
715 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
716 fio->page->index, fio, GFP_NOIO);
717
718 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
719 bio_put(bio);
720 return -EFAULT;
721 }
722
723 if (fio->io_wbc && !is_read_io(fio->op))
724 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
725
726 __attach_io_flag(fio);
727 bio_set_op_attrs(bio, fio->op, fio->op_flags);
728
729 inc_page_count(fio->sbi, is_read_io(fio->op) ?
730 __read_io_type(page): WB_DATA_TYPE(fio->page));
731
732 __submit_bio(fio->sbi, bio, fio->type);
733 return 0;
734 }
735
page_is_mergeable(struct f2fs_sb_info * sbi,struct bio * bio,block_t last_blkaddr,block_t cur_blkaddr)736 static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
737 block_t last_blkaddr, block_t cur_blkaddr)
738 {
739 if (last_blkaddr + 1 != cur_blkaddr)
740 return false;
741 return __same_bdev(sbi, cur_blkaddr, bio);
742 }
743
io_type_is_mergeable(struct f2fs_bio_info * io,struct f2fs_io_info * fio)744 static bool io_type_is_mergeable(struct f2fs_bio_info *io,
745 struct f2fs_io_info *fio)
746 {
747 if (io->fio.op != fio->op)
748 return false;
749 return io->fio.op_flags == fio->op_flags;
750 }
751
io_is_mergeable(struct f2fs_sb_info * sbi,struct bio * bio,struct f2fs_bio_info * io,struct f2fs_io_info * fio,block_t last_blkaddr,block_t cur_blkaddr)752 static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
753 struct f2fs_bio_info *io,
754 struct f2fs_io_info *fio,
755 block_t last_blkaddr,
756 block_t cur_blkaddr)
757 {
758 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
759 unsigned int filled_blocks =
760 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
761 unsigned int io_size = F2FS_IO_SIZE(sbi);
762 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
763
764 /* IOs in bio is aligned and left space of vectors is not enough */
765 if (!(filled_blocks % io_size) && left_vecs < io_size)
766 return false;
767 }
768 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
769 return false;
770 return io_type_is_mergeable(io, fio);
771 }
772
add_bio_entry(struct f2fs_sb_info * sbi,struct bio * bio,struct page * page,enum temp_type temp)773 static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
774 struct page *page, enum temp_type temp)
775 {
776 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
777 struct bio_entry *be;
778
779 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
780 be->bio = bio;
781 bio_get(bio);
782
783 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
784 f2fs_bug_on(sbi, 1);
785
786 down_write(&io->bio_list_lock);
787 list_add_tail(&be->list, &io->bio_list);
788 up_write(&io->bio_list_lock);
789 }
790
del_bio_entry(struct bio_entry * be)791 static void del_bio_entry(struct bio_entry *be)
792 {
793 list_del(&be->list);
794 kmem_cache_free(bio_entry_slab, be);
795 }
796
add_ipu_page(struct f2fs_io_info * fio,struct bio ** bio,struct page * page)797 static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
798 struct page *page)
799 {
800 struct f2fs_sb_info *sbi = fio->sbi;
801 enum temp_type temp;
802 bool found = false;
803 int ret = -EAGAIN;
804
805 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
806 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
807 struct list_head *head = &io->bio_list;
808 struct bio_entry *be;
809
810 down_write(&io->bio_list_lock);
811 list_for_each_entry(be, head, list) {
812 if (be->bio != *bio)
813 continue;
814
815 found = true;
816
817 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
818 *fio->last_block,
819 fio->new_blkaddr));
820 if (f2fs_crypt_mergeable_bio(*bio,
821 fio->page->mapping->host,
822 fio->page->index, fio) &&
823 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
824 PAGE_SIZE) {
825 ret = 0;
826 break;
827 }
828
829 /* page can't be merged into bio; submit the bio */
830 del_bio_entry(be);
831 __submit_bio(sbi, *bio, DATA);
832 break;
833 }
834 up_write(&io->bio_list_lock);
835 }
836
837 if (ret) {
838 bio_put(*bio);
839 *bio = NULL;
840 }
841
842 return ret;
843 }
844
f2fs_submit_merged_ipu_write(struct f2fs_sb_info * sbi,struct bio ** bio,struct page * page)845 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
846 struct bio **bio, struct page *page)
847 {
848 enum temp_type temp;
849 bool found = false;
850 struct bio *target = bio ? *bio : NULL;
851
852 f2fs_bug_on(sbi, !target && !page);
853
854 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
855 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
856 struct list_head *head = &io->bio_list;
857 struct bio_entry *be;
858
859 if (list_empty(head))
860 continue;
861
862 down_read(&io->bio_list_lock);
863 list_for_each_entry(be, head, list) {
864 if (target)
865 found = (target == be->bio);
866 else
867 found = __has_merged_page(be->bio, NULL,
868 page, 0);
869 if (found)
870 break;
871 }
872 up_read(&io->bio_list_lock);
873
874 if (!found)
875 continue;
876
877 found = false;
878
879 down_write(&io->bio_list_lock);
880 list_for_each_entry(be, head, list) {
881 if (target)
882 found = (target == be->bio);
883 else
884 found = __has_merged_page(be->bio, NULL,
885 page, 0);
886 if (found) {
887 target = be->bio;
888 del_bio_entry(be);
889 break;
890 }
891 }
892 up_write(&io->bio_list_lock);
893 }
894
895 if (found)
896 __submit_bio(sbi, target, DATA);
897 if (bio && *bio) {
898 bio_put(*bio);
899 *bio = NULL;
900 }
901 }
902
f2fs_merge_page_bio(struct f2fs_io_info * fio)903 int f2fs_merge_page_bio(struct f2fs_io_info *fio)
904 {
905 struct bio *bio = *fio->bio;
906 struct page *page = fio->encrypted_page ?
907 fio->encrypted_page : fio->page;
908
909 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
910 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
911 return -EFSCORRUPTED;
912
913 trace_f2fs_submit_page_bio(page, fio);
914 f2fs_trace_ios(fio, 0);
915
916 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
917 fio->new_blkaddr))
918 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
919 alloc_new:
920 if (!bio) {
921 bio = __bio_alloc(fio, BIO_MAX_PAGES);
922 __attach_io_flag(fio);
923 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
924 fio->page->index, fio, GFP_NOIO);
925 bio_set_op_attrs(bio, fio->op, fio->op_flags);
926
927 add_bio_entry(fio->sbi, bio, page, fio->temp);
928 } else {
929 if (add_ipu_page(fio, &bio, page))
930 goto alloc_new;
931 }
932
933 if (fio->io_wbc)
934 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
935
936 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
937
938 *fio->last_block = fio->new_blkaddr;
939 *fio->bio = bio;
940
941 return 0;
942 }
943
f2fs_submit_page_write(struct f2fs_io_info * fio)944 void f2fs_submit_page_write(struct f2fs_io_info *fio)
945 {
946 struct f2fs_sb_info *sbi = fio->sbi;
947 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
948 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
949 struct page *bio_page;
950
951 f2fs_bug_on(sbi, is_read_io(fio->op));
952
953 down_write(&io->io_rwsem);
954 next:
955 if (fio->in_list) {
956 spin_lock(&io->io_lock);
957 if (list_empty(&io->io_list)) {
958 spin_unlock(&io->io_lock);
959 goto out;
960 }
961 fio = list_first_entry(&io->io_list,
962 struct f2fs_io_info, list);
963 list_del(&fio->list);
964 spin_unlock(&io->io_lock);
965 }
966
967 verify_fio_blkaddr(fio);
968
969 if (fio->encrypted_page)
970 bio_page = fio->encrypted_page;
971 else if (fio->compressed_page)
972 bio_page = fio->compressed_page;
973 else
974 bio_page = fio->page;
975
976 /* set submitted = true as a return value */
977 fio->submitted = true;
978
979 inc_page_count(sbi, WB_DATA_TYPE(bio_page));
980
981 if (io->bio &&
982 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
983 fio->new_blkaddr) ||
984 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
985 bio_page->index, fio)))
986 __submit_merged_bio(io);
987 alloc_new:
988 if (io->bio == NULL) {
989 if (F2FS_IO_ALIGNED(sbi) &&
990 (fio->type == DATA || fio->type == NODE) &&
991 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
992 dec_page_count(sbi, WB_DATA_TYPE(bio_page));
993 fio->retry = true;
994 goto skip;
995 }
996 io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
997 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
998 bio_page->index, fio, GFP_NOIO);
999 io->fio = *fio;
1000 }
1001
1002 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
1003 __submit_merged_bio(io);
1004 goto alloc_new;
1005 }
1006
1007 if (fio->io_wbc)
1008 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
1009
1010 io->last_block_in_bio = fio->new_blkaddr;
1011 f2fs_trace_ios(fio, 0);
1012
1013 trace_f2fs_submit_page_write(fio->page, fio);
1014 skip:
1015 if (fio->in_list)
1016 goto next;
1017 out:
1018 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1019 !f2fs_is_checkpoint_ready(sbi))
1020 __submit_merged_bio(io);
1021 up_write(&io->io_rwsem);
1022 }
1023
f2fs_need_verity(const struct inode * inode,pgoff_t idx)1024 static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
1025 {
1026 return fsverity_active(inode) && (idx <
1027 DIV_ROUND_UP(fsverity_get_verified_data_size(inode), PAGE_SIZE));
1028 }
1029
f2fs_grab_read_bio(struct inode * inode,block_t blkaddr,unsigned nr_pages,unsigned op_flag,pgoff_t first_idx,bool for_write,bool for_verity)1030 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1031 unsigned nr_pages, unsigned op_flag,
1032 pgoff_t first_idx, bool for_write,
1033 bool for_verity)
1034 {
1035 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1036 struct bio *bio;
1037 struct bio_post_read_ctx *ctx;
1038 unsigned int post_read_steps = 0;
1039
1040 bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES),
1041 for_write);
1042 if (!bio)
1043 return ERR_PTR(-ENOMEM);
1044
1045 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
1046
1047 f2fs_target_device(sbi, blkaddr, bio);
1048 bio->bi_end_io = f2fs_read_end_io;
1049 bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
1050
1051 if (fscrypt_inode_uses_fs_layer_crypto(inode))
1052 post_read_steps |= 1 << STEP_DECRYPT;
1053 if (f2fs_compressed_file(inode))
1054 post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
1055 if (for_verity && f2fs_need_verity(inode, first_idx))
1056 post_read_steps |= 1 << STEP_VERITY;
1057
1058 if (post_read_steps) {
1059 /* Due to the mempool, this never fails. */
1060 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1061 ctx->bio = bio;
1062 ctx->sbi = sbi;
1063 ctx->enabled_steps = post_read_steps;
1064 bio->bi_private = ctx;
1065 }
1066
1067 return bio;
1068 }
1069
f2fs_release_read_bio(struct bio * bio)1070 static void f2fs_release_read_bio(struct bio *bio)
1071 {
1072 if (bio->bi_private)
1073 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
1074 bio_put(bio);
1075 }
1076
1077 /* This can handle encryption stuffs */
f2fs_submit_page_read(struct inode * inode,struct page * page,block_t blkaddr,int op_flags,bool for_write)1078 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1079 block_t blkaddr, int op_flags, bool for_write)
1080 {
1081 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1082 struct bio *bio;
1083
1084 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1085 page->index, for_write, true);
1086 if (IS_ERR(bio))
1087 return PTR_ERR(bio);
1088
1089 /* wait for GCed page writeback via META_MAPPING */
1090 f2fs_wait_on_block_writeback(inode, blkaddr);
1091
1092 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1093 bio_put(bio);
1094 return -EFAULT;
1095 }
1096 ClearPageError(page);
1097 inc_page_count(sbi, F2FS_RD_DATA);
1098 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1099 __submit_bio(sbi, bio, DATA);
1100 return 0;
1101 }
1102
__set_data_blkaddr(struct dnode_of_data * dn)1103 static void __set_data_blkaddr(struct dnode_of_data *dn)
1104 {
1105 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1106 __le32 *addr_array;
1107 int base = 0;
1108
1109 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1110 base = get_extra_isize(dn->inode);
1111
1112 /* Get physical address of data block */
1113 addr_array = blkaddr_in_node(rn);
1114 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1115 }
1116
1117 /*
1118 * Lock ordering for the change of data block address:
1119 * ->data_page
1120 * ->node_page
1121 * update block addresses in the node page
1122 */
f2fs_set_data_blkaddr(struct dnode_of_data * dn)1123 void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
1124 {
1125 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1126 __set_data_blkaddr(dn);
1127 if (set_page_dirty(dn->node_page))
1128 dn->node_changed = true;
1129 }
1130
f2fs_update_data_blkaddr(struct dnode_of_data * dn,block_t blkaddr)1131 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1132 {
1133 dn->data_blkaddr = blkaddr;
1134 f2fs_set_data_blkaddr(dn);
1135 f2fs_update_extent_cache(dn);
1136 }
1137
1138 /* dn->ofs_in_node will be returned with up-to-date last block pointer */
f2fs_reserve_new_blocks(struct dnode_of_data * dn,blkcnt_t count)1139 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1140 {
1141 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1142 int err;
1143
1144 if (!count)
1145 return 0;
1146
1147 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1148 return -EPERM;
1149 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1150 return err;
1151
1152 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1153 dn->ofs_in_node, count);
1154
1155 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1156
1157 for (; count > 0; dn->ofs_in_node++) {
1158 block_t blkaddr = f2fs_data_blkaddr(dn);
1159 if (blkaddr == NULL_ADDR) {
1160 dn->data_blkaddr = NEW_ADDR;
1161 __set_data_blkaddr(dn);
1162 count--;
1163 }
1164 }
1165
1166 if (set_page_dirty(dn->node_page))
1167 dn->node_changed = true;
1168 return 0;
1169 }
1170
1171 /* Should keep dn->ofs_in_node unchanged */
f2fs_reserve_new_block(struct dnode_of_data * dn)1172 int f2fs_reserve_new_block(struct dnode_of_data *dn)
1173 {
1174 unsigned int ofs_in_node = dn->ofs_in_node;
1175 int ret;
1176
1177 ret = f2fs_reserve_new_blocks(dn, 1);
1178 dn->ofs_in_node = ofs_in_node;
1179 return ret;
1180 }
1181
f2fs_reserve_block(struct dnode_of_data * dn,pgoff_t index)1182 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1183 {
1184 bool need_put = dn->inode_page ? false : true;
1185 int err;
1186
1187 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1188 if (err)
1189 return err;
1190
1191 if (dn->data_blkaddr == NULL_ADDR)
1192 err = f2fs_reserve_new_block(dn);
1193 if (err || need_put)
1194 f2fs_put_dnode(dn);
1195 return err;
1196 }
1197
f2fs_get_block(struct dnode_of_data * dn,pgoff_t index)1198 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
1199 {
1200 struct extent_info ei = {0, 0, 0};
1201 struct inode *inode = dn->inode;
1202
1203 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1204 dn->data_blkaddr = ei.blk + index - ei.fofs;
1205 return 0;
1206 }
1207
1208 return f2fs_reserve_block(dn, index);
1209 }
1210
f2fs_get_read_data_page(struct inode * inode,pgoff_t index,int op_flags,bool for_write)1211 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1212 int op_flags, bool for_write)
1213 {
1214 struct address_space *mapping = inode->i_mapping;
1215 struct dnode_of_data dn;
1216 struct page *page;
1217 struct extent_info ei = {0,0,0};
1218 int err;
1219
1220 page = f2fs_grab_cache_page(mapping, index, for_write);
1221 if (!page)
1222 return ERR_PTR(-ENOMEM);
1223
1224 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1225 dn.data_blkaddr = ei.blk + index - ei.fofs;
1226 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1227 DATA_GENERIC_ENHANCE_READ)) {
1228 err = -EFSCORRUPTED;
1229 goto put_err;
1230 }
1231 goto got_it;
1232 }
1233
1234 set_new_dnode(&dn, inode, NULL, NULL, 0);
1235 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1236 if (err)
1237 goto put_err;
1238 f2fs_put_dnode(&dn);
1239
1240 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1241 err = -ENOENT;
1242 goto put_err;
1243 }
1244 if (dn.data_blkaddr != NEW_ADDR &&
1245 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1246 dn.data_blkaddr,
1247 DATA_GENERIC_ENHANCE)) {
1248 err = -EFSCORRUPTED;
1249 goto put_err;
1250 }
1251 got_it:
1252 if (PageUptodate(page)) {
1253 unlock_page(page);
1254 return page;
1255 }
1256
1257 /*
1258 * A new dentry page is allocated but not able to be written, since its
1259 * new inode page couldn't be allocated due to -ENOSPC.
1260 * In such the case, its blkaddr can be remained as NEW_ADDR.
1261 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1262 * f2fs_init_inode_metadata.
1263 */
1264 if (dn.data_blkaddr == NEW_ADDR) {
1265 zero_user_segment(page, 0, PAGE_SIZE);
1266 if (!PageUptodate(page))
1267 SetPageUptodate(page);
1268 unlock_page(page);
1269 return page;
1270 }
1271
1272 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1273 op_flags, for_write);
1274 if (err)
1275 goto put_err;
1276 return page;
1277
1278 put_err:
1279 f2fs_put_page(page, 1);
1280 return ERR_PTR(err);
1281 }
1282
f2fs_find_data_page(struct inode * inode,pgoff_t index)1283 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
1284 {
1285 struct address_space *mapping = inode->i_mapping;
1286 struct page *page;
1287
1288 page = find_get_page(mapping, index);
1289 if (page && PageUptodate(page))
1290 return page;
1291 f2fs_put_page(page, 0);
1292
1293 page = f2fs_get_read_data_page(inode, index, 0, false);
1294 if (IS_ERR(page))
1295 return page;
1296
1297 if (PageUptodate(page))
1298 return page;
1299
1300 wait_on_page_locked(page);
1301 if (unlikely(!PageUptodate(page))) {
1302 f2fs_put_page(page, 0);
1303 return ERR_PTR(-EIO);
1304 }
1305 return page;
1306 }
1307
1308 /*
1309 * If it tries to access a hole, return an error.
1310 * Because, the callers, functions in dir.c and GC, should be able to know
1311 * whether this page exists or not.
1312 */
f2fs_get_lock_data_page(struct inode * inode,pgoff_t index,bool for_write)1313 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1314 bool for_write)
1315 {
1316 struct address_space *mapping = inode->i_mapping;
1317 struct page *page;
1318 repeat:
1319 page = f2fs_get_read_data_page(inode, index, 0, for_write);
1320 if (IS_ERR(page))
1321 return page;
1322
1323 /* wait for read completion */
1324 lock_page(page);
1325 if (unlikely(page->mapping != mapping)) {
1326 f2fs_put_page(page, 1);
1327 goto repeat;
1328 }
1329 if (unlikely(!PageUptodate(page))) {
1330 f2fs_put_page(page, 1);
1331 return ERR_PTR(-EIO);
1332 }
1333 return page;
1334 }
1335
1336 /*
1337 * Caller ensures that this data page is never allocated.
1338 * A new zero-filled data page is allocated in the page cache.
1339 *
1340 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1341 * f2fs_unlock_op().
1342 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1343 * ipage should be released by this function.
1344 */
f2fs_get_new_data_page(struct inode * inode,struct page * ipage,pgoff_t index,bool new_i_size)1345 struct page *f2fs_get_new_data_page(struct inode *inode,
1346 struct page *ipage, pgoff_t index, bool new_i_size)
1347 {
1348 struct address_space *mapping = inode->i_mapping;
1349 struct page *page;
1350 struct dnode_of_data dn;
1351 int err;
1352
1353 page = f2fs_grab_cache_page(mapping, index, true);
1354 if (!page) {
1355 /*
1356 * before exiting, we should make sure ipage will be released
1357 * if any error occur.
1358 */
1359 f2fs_put_page(ipage, 1);
1360 return ERR_PTR(-ENOMEM);
1361 }
1362
1363 set_new_dnode(&dn, inode, ipage, NULL, 0);
1364 err = f2fs_reserve_block(&dn, index);
1365 if (err) {
1366 f2fs_put_page(page, 1);
1367 return ERR_PTR(err);
1368 }
1369 if (!ipage)
1370 f2fs_put_dnode(&dn);
1371
1372 if (PageUptodate(page))
1373 goto got_it;
1374
1375 if (dn.data_blkaddr == NEW_ADDR) {
1376 zero_user_segment(page, 0, PAGE_SIZE);
1377 if (!PageUptodate(page))
1378 SetPageUptodate(page);
1379 } else {
1380 f2fs_put_page(page, 1);
1381
1382 /* if ipage exists, blkaddr should be NEW_ADDR */
1383 f2fs_bug_on(F2FS_I_SB(inode), ipage);
1384 page = f2fs_get_lock_data_page(inode, index, true);
1385 if (IS_ERR(page))
1386 return page;
1387 }
1388 got_it:
1389 if (new_i_size && i_size_read(inode) <
1390 ((loff_t)(index + 1) << PAGE_SHIFT))
1391 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1392 return page;
1393 }
1394
__allocate_data_block(struct dnode_of_data * dn,int seg_type,int contig_level)1395 static int __allocate_data_block(struct dnode_of_data *dn, int seg_type, int contig_level)
1396 {
1397 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1398 struct f2fs_summary sum;
1399 struct node_info ni;
1400 block_t old_blkaddr;
1401 blkcnt_t count = 1;
1402 int err;
1403
1404 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1405 return -EPERM;
1406
1407 err = f2fs_get_node_info(sbi, dn->nid, &ni);
1408 if (err)
1409 return err;
1410
1411 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1412 if (dn->data_blkaddr != NULL_ADDR)
1413 goto alloc;
1414
1415 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1416 return err;
1417
1418 alloc:
1419 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1420 old_blkaddr = dn->data_blkaddr;
1421 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1422 &sum, seg_type, NULL, contig_level);
1423 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1424 invalidate_mapping_pages(META_MAPPING(sbi),
1425 old_blkaddr, old_blkaddr);
1426 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1427
1428 /*
1429 * i_size will be updated by direct_IO. Otherwise, we'll get stale
1430 * data from unwritten block via dio_read.
1431 */
1432 return 0;
1433 }
1434
f2fs_preallocate_blocks(struct kiocb * iocb,struct iov_iter * from)1435 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
1436 {
1437 struct inode *inode = file_inode(iocb->ki_filp);
1438 struct f2fs_map_blocks map;
1439 int flag;
1440 int err = 0;
1441 bool direct_io = iocb->ki_flags & IOCB_DIRECT;
1442
1443 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1444 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1445 if (map.m_len > map.m_lblk)
1446 map.m_len -= map.m_lblk;
1447 else
1448 map.m_len = 0;
1449
1450 map.m_next_pgofs = NULL;
1451 map.m_next_extent = NULL;
1452 map.m_seg_type = NO_CHECK_TYPE;
1453 map.m_may_create = true;
1454
1455 if (direct_io) {
1456 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1457 flag = f2fs_force_buffered_io(inode, iocb, from) ?
1458 F2FS_GET_BLOCK_PRE_AIO :
1459 F2FS_GET_BLOCK_PRE_DIO;
1460 goto map_blocks;
1461 }
1462 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1463 err = f2fs_convert_inline_inode(inode);
1464 if (err)
1465 return err;
1466 }
1467 if (f2fs_has_inline_data(inode))
1468 return err;
1469
1470 flag = F2FS_GET_BLOCK_PRE_AIO;
1471
1472 map_blocks:
1473 err = f2fs_map_blocks(inode, &map, 1, flag);
1474 if (map.m_len > 0 && err == -ENOSPC) {
1475 if (!direct_io)
1476 set_inode_flag(inode, FI_NO_PREALLOC);
1477 err = 0;
1478 }
1479 return err;
1480 }
1481
f2fs_do_map_lock(struct f2fs_sb_info * sbi,int flag,bool lock)1482 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1483 {
1484 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1485 if (lock)
1486 down_read(&sbi->node_change);
1487 else
1488 up_read(&sbi->node_change);
1489 } else {
1490 if (lock)
1491 f2fs_lock_op(sbi);
1492 else
1493 f2fs_unlock_op(sbi);
1494 }
1495 }
1496
1497 /*
1498 * f2fs_map_blocks() tries to find or build mapping relationship which
1499 * maps continuous logical blocks to physical blocks, and return such
1500 * info via f2fs_map_blocks structure.
1501 */
f2fs_map_blocks(struct inode * inode,struct f2fs_map_blocks * map,int create,int flag)1502 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1503 int create, int flag)
1504 {
1505 unsigned int maxblocks = map->m_len;
1506 struct dnode_of_data dn;
1507 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1508 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1509 pgoff_t pgofs, end_offset, end;
1510 int err = 0, ofs = 1;
1511 unsigned int ofs_in_node, last_ofs_in_node;
1512 blkcnt_t prealloc;
1513 struct extent_info ei = {0,0,0};
1514 block_t blkaddr;
1515 unsigned int start_pgofs;
1516 int contig_level = SEQ_NONE;
1517 #ifdef CONFIG_F2FS_GRADING_SSR
1518 contig_level = check_io_seq(maxblocks);
1519 #endif
1520
1521 if (!maxblocks)
1522 return 0;
1523
1524 map->m_len = 0;
1525 map->m_flags = 0;
1526
1527 /* it only supports block size == page size */
1528 pgofs = (pgoff_t)map->m_lblk;
1529 end = pgofs + maxblocks;
1530
1531 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1532 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1533 map->m_may_create)
1534 goto next_dnode;
1535
1536 map->m_pblk = ei.blk + pgofs - ei.fofs;
1537 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1538 map->m_flags = F2FS_MAP_MAPPED;
1539 if (map->m_next_extent)
1540 *map->m_next_extent = pgofs + map->m_len;
1541
1542 /* for hardware encryption, but to avoid potential issue in future */
1543 if (flag == F2FS_GET_BLOCK_DIO)
1544 f2fs_wait_on_block_writeback_range(inode,
1545 map->m_pblk, map->m_len);
1546 goto out;
1547 }
1548
1549 next_dnode:
1550 if (map->m_may_create)
1551 f2fs_do_map_lock(sbi, flag, true);
1552
1553 /* When reading holes, we need its node page */
1554 set_new_dnode(&dn, inode, NULL, NULL, 0);
1555 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1556 if (err) {
1557 if (flag == F2FS_GET_BLOCK_BMAP)
1558 map->m_pblk = 0;
1559
1560 if (err == -ENOENT) {
1561 /*
1562 * There is one exceptional case that read_node_page()
1563 * may return -ENOENT due to filesystem has been
1564 * shutdown or cp_error, so force to convert error
1565 * number to EIO for such case.
1566 */
1567 if (map->m_may_create &&
1568 (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1569 f2fs_cp_error(sbi))) {
1570 err = -EIO;
1571 goto unlock_out;
1572 }
1573
1574 err = 0;
1575 if (map->m_next_pgofs)
1576 *map->m_next_pgofs =
1577 f2fs_get_next_page_offset(&dn, pgofs);
1578 if (map->m_next_extent)
1579 *map->m_next_extent =
1580 f2fs_get_next_page_offset(&dn, pgofs);
1581 }
1582 goto unlock_out;
1583 }
1584
1585 start_pgofs = pgofs;
1586 prealloc = 0;
1587 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1588 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1589
1590 next_block:
1591 blkaddr = f2fs_data_blkaddr(&dn);
1592
1593 if (__is_valid_data_blkaddr(blkaddr) &&
1594 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1595 err = -EFSCORRUPTED;
1596 goto sync_out;
1597 }
1598
1599 if (__is_valid_data_blkaddr(blkaddr)) {
1600 /* use out-place-update for driect IO under LFS mode */
1601 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1602 map->m_may_create) {
1603 err = __allocate_data_block(&dn, map->m_seg_type, contig_level);
1604 if (err)
1605 goto sync_out;
1606 blkaddr = dn.data_blkaddr;
1607 set_inode_flag(inode, FI_APPEND_WRITE);
1608 }
1609 } else {
1610 if (create) {
1611 if (unlikely(f2fs_cp_error(sbi))) {
1612 err = -EIO;
1613 goto sync_out;
1614 }
1615 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1616 if (blkaddr == NULL_ADDR) {
1617 prealloc++;
1618 last_ofs_in_node = dn.ofs_in_node;
1619 }
1620 } else {
1621 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1622 flag != F2FS_GET_BLOCK_DIO);
1623 err = __allocate_data_block(&dn,
1624 map->m_seg_type, contig_level);
1625 if (!err)
1626 set_inode_flag(inode, FI_APPEND_WRITE);
1627 }
1628 if (err)
1629 goto sync_out;
1630 map->m_flags |= F2FS_MAP_NEW;
1631 blkaddr = dn.data_blkaddr;
1632 } else {
1633 if (flag == F2FS_GET_BLOCK_BMAP) {
1634 map->m_pblk = 0;
1635 goto sync_out;
1636 }
1637 if (flag == F2FS_GET_BLOCK_PRECACHE)
1638 goto sync_out;
1639 if (flag == F2FS_GET_BLOCK_FIEMAP &&
1640 blkaddr == NULL_ADDR) {
1641 if (map->m_next_pgofs)
1642 *map->m_next_pgofs = pgofs + 1;
1643 goto sync_out;
1644 }
1645 if (flag != F2FS_GET_BLOCK_FIEMAP) {
1646 /* for defragment case */
1647 if (map->m_next_pgofs)
1648 *map->m_next_pgofs = pgofs + 1;
1649 goto sync_out;
1650 }
1651 }
1652 }
1653
1654 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1655 goto skip;
1656
1657 if (map->m_len == 0) {
1658 /* preallocated unwritten block should be mapped for fiemap. */
1659 if (blkaddr == NEW_ADDR)
1660 map->m_flags |= F2FS_MAP_UNWRITTEN;
1661 map->m_flags |= F2FS_MAP_MAPPED;
1662
1663 map->m_pblk = blkaddr;
1664 map->m_len = 1;
1665 } else if ((map->m_pblk != NEW_ADDR &&
1666 blkaddr == (map->m_pblk + ofs)) ||
1667 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1668 flag == F2FS_GET_BLOCK_PRE_DIO) {
1669 ofs++;
1670 map->m_len++;
1671 } else {
1672 goto sync_out;
1673 }
1674
1675 skip:
1676 dn.ofs_in_node++;
1677 pgofs++;
1678
1679 /* preallocate blocks in batch for one dnode page */
1680 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1681 (pgofs == end || dn.ofs_in_node == end_offset)) {
1682
1683 dn.ofs_in_node = ofs_in_node;
1684 err = f2fs_reserve_new_blocks(&dn, prealloc);
1685 if (err)
1686 goto sync_out;
1687
1688 map->m_len += dn.ofs_in_node - ofs_in_node;
1689 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1690 err = -ENOSPC;
1691 goto sync_out;
1692 }
1693 dn.ofs_in_node = end_offset;
1694 }
1695
1696 if (pgofs >= end)
1697 goto sync_out;
1698 else if (dn.ofs_in_node < end_offset)
1699 goto next_block;
1700
1701 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1702 if (map->m_flags & F2FS_MAP_MAPPED) {
1703 unsigned int ofs = start_pgofs - map->m_lblk;
1704
1705 f2fs_update_extent_cache_range(&dn,
1706 start_pgofs, map->m_pblk + ofs,
1707 map->m_len - ofs);
1708 }
1709 }
1710
1711 f2fs_put_dnode(&dn);
1712
1713 if (map->m_may_create) {
1714 f2fs_do_map_lock(sbi, flag, false);
1715 f2fs_balance_fs(sbi, dn.node_changed);
1716 }
1717 goto next_dnode;
1718
1719 sync_out:
1720
1721 /* for hardware encryption, but to avoid potential issue in future */
1722 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1723 f2fs_wait_on_block_writeback_range(inode,
1724 map->m_pblk, map->m_len);
1725
1726 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1727 if (map->m_flags & F2FS_MAP_MAPPED) {
1728 unsigned int ofs = start_pgofs - map->m_lblk;
1729
1730 f2fs_update_extent_cache_range(&dn,
1731 start_pgofs, map->m_pblk + ofs,
1732 map->m_len - ofs);
1733 }
1734 if (map->m_next_extent)
1735 *map->m_next_extent = pgofs + 1;
1736 }
1737 f2fs_put_dnode(&dn);
1738 unlock_out:
1739 if (map->m_may_create) {
1740 f2fs_do_map_lock(sbi, flag, false);
1741 f2fs_balance_fs(sbi, dn.node_changed);
1742 }
1743 out:
1744 trace_f2fs_map_blocks(inode, map, err);
1745 return err;
1746 }
1747
f2fs_overwrite_io(struct inode * inode,loff_t pos,size_t len)1748 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1749 {
1750 struct f2fs_map_blocks map;
1751 block_t last_lblk;
1752 int err;
1753
1754 if (pos + len > i_size_read(inode))
1755 return false;
1756
1757 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1758 map.m_next_pgofs = NULL;
1759 map.m_next_extent = NULL;
1760 map.m_seg_type = NO_CHECK_TYPE;
1761 map.m_may_create = false;
1762 last_lblk = F2FS_BLK_ALIGN(pos + len);
1763
1764 while (map.m_lblk < last_lblk) {
1765 map.m_len = last_lblk - map.m_lblk;
1766 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1767 if (err || map.m_len == 0)
1768 return false;
1769 map.m_lblk += map.m_len;
1770 }
1771 return true;
1772 }
1773
__get_data_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create,int flag,pgoff_t * next_pgofs,int seg_type,bool may_write)1774 static int __get_data_block(struct inode *inode, sector_t iblock,
1775 struct buffer_head *bh, int create, int flag,
1776 pgoff_t *next_pgofs, int seg_type, bool may_write)
1777 {
1778 struct f2fs_map_blocks map;
1779 int err;
1780
1781 map.m_lblk = iblock;
1782 map.m_len = bh->b_size >> inode->i_blkbits;
1783 map.m_next_pgofs = next_pgofs;
1784 map.m_next_extent = NULL;
1785 map.m_seg_type = seg_type;
1786 map.m_may_create = may_write;
1787
1788 err = f2fs_map_blocks(inode, &map, create, flag);
1789 if (!err) {
1790 map_bh(bh, inode->i_sb, map.m_pblk);
1791 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1792 bh->b_size = (u64)map.m_len << inode->i_blkbits;
1793 }
1794 return err;
1795 }
1796
get_data_block(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create,int flag,pgoff_t * next_pgofs)1797 static int get_data_block(struct inode *inode, sector_t iblock,
1798 struct buffer_head *bh_result, int create, int flag,
1799 pgoff_t *next_pgofs)
1800 {
1801 return __get_data_block(inode, iblock, bh_result, create,
1802 flag, next_pgofs,
1803 NO_CHECK_TYPE, create);
1804 }
1805
get_data_block_dio_write(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)1806 static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1807 struct buffer_head *bh_result, int create)
1808 {
1809 return __get_data_block(inode, iblock, bh_result, create,
1810 F2FS_GET_BLOCK_DIO, NULL,
1811 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1812 IS_SWAPFILE(inode) ? false : true);
1813 }
1814
get_data_block_dio(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)1815 static int get_data_block_dio(struct inode *inode, sector_t iblock,
1816 struct buffer_head *bh_result, int create)
1817 {
1818 return __get_data_block(inode, iblock, bh_result, create,
1819 F2FS_GET_BLOCK_DIO, NULL,
1820 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1821 false);
1822 }
1823
get_data_block_bmap(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)1824 static int get_data_block_bmap(struct inode *inode, sector_t iblock,
1825 struct buffer_head *bh_result, int create)
1826 {
1827 return __get_data_block(inode, iblock, bh_result, create,
1828 F2FS_GET_BLOCK_BMAP, NULL,
1829 NO_CHECK_TYPE, create);
1830 }
1831
logical_to_blk(struct inode * inode,loff_t offset)1832 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
1833 {
1834 return (offset >> inode->i_blkbits);
1835 }
1836
blk_to_logical(struct inode * inode,sector_t blk)1837 static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
1838 {
1839 return (blk << inode->i_blkbits);
1840 }
1841
f2fs_xattr_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo)1842 static int f2fs_xattr_fiemap(struct inode *inode,
1843 struct fiemap_extent_info *fieinfo)
1844 {
1845 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1846 struct page *page;
1847 struct node_info ni;
1848 __u64 phys = 0, len;
1849 __u32 flags;
1850 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1851 int err = 0;
1852
1853 if (f2fs_has_inline_xattr(inode)) {
1854 int offset;
1855
1856 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1857 inode->i_ino, false);
1858 if (!page)
1859 return -ENOMEM;
1860
1861 err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1862 if (err) {
1863 f2fs_put_page(page, 1);
1864 return err;
1865 }
1866
1867 phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1868 offset = offsetof(struct f2fs_inode, i_addr) +
1869 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1870 get_inline_xattr_addrs(inode));
1871
1872 phys += offset;
1873 len = inline_xattr_size(inode);
1874
1875 f2fs_put_page(page, 1);
1876
1877 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1878
1879 if (!xnid)
1880 flags |= FIEMAP_EXTENT_LAST;
1881
1882 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1883 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1884 if (err || err == 1)
1885 return err;
1886 }
1887
1888 if (xnid) {
1889 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1890 if (!page)
1891 return -ENOMEM;
1892
1893 err = f2fs_get_node_info(sbi, xnid, &ni);
1894 if (err) {
1895 f2fs_put_page(page, 1);
1896 return err;
1897 }
1898
1899 phys = (__u64)blk_to_logical(inode, ni.blk_addr);
1900 len = inode->i_sb->s_blocksize;
1901
1902 f2fs_put_page(page, 1);
1903
1904 flags = FIEMAP_EXTENT_LAST;
1905 }
1906
1907 if (phys) {
1908 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1909 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1910 }
1911
1912 return (err < 0 ? err : 0);
1913 }
1914
max_inode_blocks(struct inode * inode)1915 static loff_t max_inode_blocks(struct inode *inode)
1916 {
1917 loff_t result = ADDRS_PER_INODE(inode);
1918 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1919
1920 /* two direct node blocks */
1921 result += (leaf_count * 2);
1922
1923 /* two indirect node blocks */
1924 leaf_count *= NIDS_PER_BLOCK;
1925 result += (leaf_count * 2);
1926
1927 /* one double indirect node block */
1928 leaf_count *= NIDS_PER_BLOCK;
1929 result += leaf_count;
1930
1931 return result;
1932 }
1933
f2fs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,u64 start,u64 len)1934 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1935 u64 start, u64 len)
1936 {
1937 struct buffer_head map_bh;
1938 sector_t start_blk, last_blk;
1939 pgoff_t next_pgofs;
1940 u64 logical = 0, phys = 0, size = 0;
1941 u32 flags = 0;
1942 int ret = 0;
1943 bool compr_cluster = false;
1944 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1945
1946 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1947 ret = f2fs_precache_extents(inode);
1948 if (ret)
1949 return ret;
1950 }
1951
1952 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1953 if (ret)
1954 return ret;
1955
1956 inode_lock(inode);
1957
1958 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1959 ret = f2fs_xattr_fiemap(inode, fieinfo);
1960 goto out;
1961 }
1962
1963 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1964 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1965 if (ret != -EAGAIN)
1966 goto out;
1967 }
1968
1969 if (logical_to_blk(inode, len) == 0)
1970 len = blk_to_logical(inode, 1);
1971
1972 start_blk = logical_to_blk(inode, start);
1973 last_blk = logical_to_blk(inode, start + len - 1);
1974
1975 next:
1976 memset(&map_bh, 0, sizeof(struct buffer_head));
1977 map_bh.b_size = len;
1978
1979 if (compr_cluster)
1980 map_bh.b_size = blk_to_logical(inode, cluster_size - 1);
1981
1982 ret = get_data_block(inode, start_blk, &map_bh, 0,
1983 F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
1984 if (ret)
1985 goto out;
1986
1987 /* HOLE */
1988 if (!buffer_mapped(&map_bh)) {
1989 start_blk = next_pgofs;
1990
1991 if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
1992 max_inode_blocks(inode)))
1993 goto prep_next;
1994
1995 flags |= FIEMAP_EXTENT_LAST;
1996 }
1997
1998 if (size) {
1999 if (IS_ENCRYPTED(inode))
2000 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
2001
2002 ret = fiemap_fill_next_extent(fieinfo, logical,
2003 phys, size, flags);
2004 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
2005 if (ret)
2006 goto out;
2007 size = 0;
2008 }
2009
2010 if (start_blk > last_blk)
2011 goto out;
2012
2013 if (compr_cluster) {
2014 compr_cluster = false;
2015
2016
2017 logical = blk_to_logical(inode, start_blk - 1);
2018 phys = blk_to_logical(inode, map_bh.b_blocknr);
2019 size = blk_to_logical(inode, cluster_size);
2020
2021 flags |= FIEMAP_EXTENT_ENCODED;
2022
2023 start_blk += cluster_size - 1;
2024
2025 if (start_blk > last_blk)
2026 goto out;
2027
2028 goto prep_next;
2029 }
2030
2031 if (map_bh.b_blocknr == COMPRESS_ADDR) {
2032 compr_cluster = true;
2033 start_blk++;
2034 goto prep_next;
2035 }
2036
2037 logical = blk_to_logical(inode, start_blk);
2038 phys = blk_to_logical(inode, map_bh.b_blocknr);
2039 size = map_bh.b_size;
2040 flags = 0;
2041 if (buffer_unwritten(&map_bh))
2042 flags = FIEMAP_EXTENT_UNWRITTEN;
2043
2044 start_blk += logical_to_blk(inode, size);
2045
2046 prep_next:
2047 cond_resched();
2048 if (fatal_signal_pending(current))
2049 ret = -EINTR;
2050 else
2051 goto next;
2052 out:
2053 if (ret == 1)
2054 ret = 0;
2055
2056 inode_unlock(inode);
2057 return ret;
2058 }
2059
f2fs_readpage_limit(struct inode * inode)2060 static inline loff_t f2fs_readpage_limit(struct inode *inode)
2061 {
2062 if (IS_ENABLED(CONFIG_FS_VERITY) &&
2063 (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
2064 return inode->i_sb->s_maxbytes;
2065
2066 return i_size_read(inode);
2067 }
2068
f2fs_read_single_page(struct inode * inode,struct page * page,unsigned nr_pages,struct f2fs_map_blocks * map,struct bio ** bio_ret,sector_t * last_block_in_bio,bool is_readahead)2069 static int f2fs_read_single_page(struct inode *inode, struct page *page,
2070 unsigned nr_pages,
2071 struct f2fs_map_blocks *map,
2072 struct bio **bio_ret,
2073 sector_t *last_block_in_bio,
2074 bool is_readahead)
2075 {
2076 struct bio *bio = *bio_ret;
2077 const unsigned blkbits = inode->i_blkbits;
2078 const unsigned blocksize = 1 << blkbits;
2079 sector_t block_in_file;
2080 sector_t last_block;
2081 sector_t last_block_in_file;
2082 sector_t block_nr;
2083 int ret = 0;
2084
2085 block_in_file = (sector_t)page_index(page);
2086 last_block = block_in_file + nr_pages;
2087 last_block_in_file = (f2fs_readpage_limit(inode) + blocksize - 1) >>
2088 blkbits;
2089 if (last_block > last_block_in_file)
2090 last_block = last_block_in_file;
2091
2092 /* just zeroing out page which is beyond EOF */
2093 if (block_in_file >= last_block)
2094 goto zero_out;
2095 /*
2096 * Map blocks using the previous result first.
2097 */
2098 if ((map->m_flags & F2FS_MAP_MAPPED) &&
2099 block_in_file > map->m_lblk &&
2100 block_in_file < (map->m_lblk + map->m_len))
2101 goto got_it;
2102
2103 /*
2104 * Then do more f2fs_map_blocks() calls until we are
2105 * done with this page.
2106 */
2107 map->m_lblk = block_in_file;
2108 map->m_len = last_block - block_in_file;
2109
2110 ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2111 if (ret)
2112 goto out;
2113 got_it:
2114 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2115 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2116 SetPageMappedToDisk(page);
2117
2118 if (!PageUptodate(page) && (!PageSwapCache(page) &&
2119 !cleancache_get_page(page))) {
2120 SetPageUptodate(page);
2121 goto confused;
2122 }
2123
2124 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2125 DATA_GENERIC_ENHANCE_READ)) {
2126 ret = -EFSCORRUPTED;
2127 goto out;
2128 }
2129 } else {
2130 zero_out:
2131 zero_user_segment(page, 0, PAGE_SIZE);
2132 if (f2fs_need_verity(inode, page->index) &&
2133 !fsverity_verify_page(page)) {
2134 ret = -EIO;
2135 goto out;
2136 }
2137 if (!PageUptodate(page))
2138 SetPageUptodate(page);
2139 unlock_page(page);
2140 goto out;
2141 }
2142
2143 /*
2144 * This page will go to BIO. Do we need to send this
2145 * BIO off first?
2146 */
2147 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2148 *last_block_in_bio, block_nr) ||
2149 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2150 submit_and_realloc:
2151 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2152 bio = NULL;
2153 }
2154 if (bio == NULL) {
2155 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2156 is_readahead ? REQ_RAHEAD : 0, page->index,
2157 false, true);
2158 if (IS_ERR(bio)) {
2159 ret = PTR_ERR(bio);
2160 bio = NULL;
2161 goto out;
2162 }
2163 }
2164
2165 /*
2166 * If the page is under writeback, we need to wait for
2167 * its completion to see the correct decrypted data.
2168 */
2169 f2fs_wait_on_block_writeback(inode, block_nr);
2170
2171 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2172 goto submit_and_realloc;
2173
2174 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2175 f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
2176 ClearPageError(page);
2177 *last_block_in_bio = block_nr;
2178 goto out;
2179 confused:
2180 if (bio) {
2181 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2182 bio = NULL;
2183 }
2184 unlock_page(page);
2185 out:
2186 *bio_ret = bio;
2187 return ret;
2188 }
2189
2190 #ifdef CONFIG_F2FS_FS_COMPRESSION
f2fs_read_multi_pages(struct compress_ctx * cc,struct bio ** bio_ret,unsigned nr_pages,sector_t * last_block_in_bio,bool is_readahead,bool for_write)2191 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2192 unsigned nr_pages, sector_t *last_block_in_bio,
2193 bool is_readahead, bool for_write)
2194 {
2195 struct dnode_of_data dn;
2196 struct inode *inode = cc->inode;
2197 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2198 struct bio *bio = *bio_ret;
2199 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2200 sector_t last_block_in_file;
2201 const unsigned blkbits = inode->i_blkbits;
2202 const unsigned blocksize = 1 << blkbits;
2203 struct decompress_io_ctx *dic = NULL;
2204 struct bio_post_read_ctx *ctx;
2205 bool for_verity = false;
2206 int i;
2207 int ret = 0;
2208
2209 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2210
2211 last_block_in_file = (f2fs_readpage_limit(inode) +
2212 blocksize - 1) >> blkbits;
2213
2214 /* get rid of pages beyond EOF */
2215 for (i = 0; i < cc->cluster_size; i++) {
2216 struct page *page = cc->rpages[i];
2217
2218 if (!page)
2219 continue;
2220 if ((sector_t)page->index >= last_block_in_file) {
2221 zero_user_segment(page, 0, PAGE_SIZE);
2222 if (!PageUptodate(page))
2223 SetPageUptodate(page);
2224 } else if (!PageUptodate(page)) {
2225 continue;
2226 }
2227 unlock_page(page);
2228 if (for_write)
2229 put_page(page);
2230 cc->rpages[i] = NULL;
2231 cc->nr_rpages--;
2232 }
2233
2234 /* we are done since all pages are beyond EOF */
2235 if (f2fs_cluster_is_empty(cc))
2236 goto out;
2237
2238 set_new_dnode(&dn, inode, NULL, NULL, 0);
2239 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2240 if (ret)
2241 goto out;
2242
2243 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2244
2245 for (i = 1; i < cc->cluster_size; i++) {
2246 block_t blkaddr;
2247
2248 blkaddr = data_blkaddr(dn.inode, dn.node_page,
2249 dn.ofs_in_node + i);
2250
2251 if (!__is_valid_data_blkaddr(blkaddr))
2252 break;
2253
2254 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2255 ret = -EFAULT;
2256 goto out_put_dnode;
2257 }
2258 cc->nr_cpages++;
2259 }
2260
2261 /* nothing to decompress */
2262 if (cc->nr_cpages == 0) {
2263 ret = 0;
2264 goto out_put_dnode;
2265 }
2266
2267 dic = f2fs_alloc_dic(cc);
2268 if (IS_ERR(dic)) {
2269 ret = PTR_ERR(dic);
2270 goto out_put_dnode;
2271 }
2272
2273 /*
2274 * It's possible to enable fsverity on the fly when handling a cluster,
2275 * which requires complicated error handling. Instead of adding more
2276 * complexity, let's give a rule where end_io post-processes fsverity
2277 * per cluster. In order to do that, we need to submit bio, if previous
2278 * bio sets a different post-process policy.
2279 */
2280 if (fsverity_active(cc->inode)) {
2281 atomic_set(&dic->verity_pages, cc->nr_cpages);
2282 for_verity = true;
2283
2284 if (bio) {
2285 ctx = bio->bi_private;
2286 if (!(ctx->enabled_steps & (1 << STEP_VERITY))) {
2287 __submit_bio(sbi, bio, DATA);
2288 bio = NULL;
2289 }
2290 }
2291 }
2292
2293 for (i = 0; i < dic->nr_cpages; i++) {
2294 struct page *page = dic->cpages[i];
2295 block_t blkaddr;
2296
2297 blkaddr = data_blkaddr(dn.inode, dn.node_page,
2298 dn.ofs_in_node + i + 1);
2299
2300 if (bio && (!page_is_mergeable(sbi, bio,
2301 *last_block_in_bio, blkaddr) ||
2302 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2303 submit_and_realloc:
2304 __submit_bio(sbi, bio, DATA);
2305 bio = NULL;
2306 }
2307
2308 if (!bio) {
2309 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2310 is_readahead ? REQ_RAHEAD : 0,
2311 page->index, for_write, for_verity);
2312 if (IS_ERR(bio)) {
2313 unsigned int remained = dic->nr_cpages - i;
2314 bool release = false;
2315
2316 ret = PTR_ERR(bio);
2317 dic->failed = true;
2318
2319 if (for_verity) {
2320 if (!atomic_sub_return(remained,
2321 &dic->verity_pages))
2322 release = true;
2323 } else {
2324 if (!atomic_sub_return(remained,
2325 &dic->pending_pages))
2326 release = true;
2327 }
2328
2329 if (release) {
2330 f2fs_decompress_end_io(dic->rpages,
2331 cc->cluster_size, true,
2332 false);
2333 f2fs_free_dic(dic);
2334 }
2335
2336 f2fs_put_dnode(&dn);
2337 *bio_ret = NULL;
2338 return ret;
2339 }
2340 }
2341
2342 f2fs_wait_on_block_writeback(inode, blkaddr);
2343
2344 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2345 goto submit_and_realloc;
2346
2347 /* tag STEP_DECOMPRESS to handle IO in wq */
2348 ctx = bio->bi_private;
2349 if (!(ctx->enabled_steps & (1 << STEP_DECOMPRESS)))
2350 ctx->enabled_steps |= 1 << STEP_DECOMPRESS;
2351
2352 inc_page_count(sbi, F2FS_RD_DATA);
2353 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
2354 f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
2355 ClearPageError(page);
2356 *last_block_in_bio = blkaddr;
2357 }
2358
2359 f2fs_put_dnode(&dn);
2360
2361 *bio_ret = bio;
2362 return 0;
2363
2364 out_put_dnode:
2365 f2fs_put_dnode(&dn);
2366 out:
2367 f2fs_decompress_end_io(cc->rpages, cc->cluster_size, true, false);
2368 *bio_ret = bio;
2369 return ret;
2370 }
2371 #endif
2372
2373 /*
2374 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2375 * Major change was from block_size == page_size in f2fs by default.
2376 *
2377 * Note that the aops->readpages() function is ONLY used for read-ahead. If
2378 * this function ever deviates from doing just read-ahead, it should either
2379 * use ->readpage() or do the necessary surgery to decouple ->readpages()
2380 * from read-ahead.
2381 */
f2fs_mpage_readpages(struct inode * inode,struct readahead_control * rac,struct page * page)2382 static int f2fs_mpage_readpages(struct inode *inode,
2383 struct readahead_control *rac, struct page *page)
2384 {
2385 struct bio *bio = NULL;
2386 sector_t last_block_in_bio = 0;
2387 struct f2fs_map_blocks map;
2388 #ifdef CONFIG_F2FS_FS_COMPRESSION
2389 struct compress_ctx cc = {
2390 .inode = inode,
2391 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2392 .cluster_size = F2FS_I(inode)->i_cluster_size,
2393 .cluster_idx = NULL_CLUSTER,
2394 .rpages = NULL,
2395 .cpages = NULL,
2396 .nr_rpages = 0,
2397 .nr_cpages = 0,
2398 };
2399 #endif
2400 unsigned nr_pages = rac ? readahead_count(rac) : 1;
2401 unsigned max_nr_pages = nr_pages;
2402 int ret = 0;
2403 bool drop_ra = false;
2404
2405 map.m_pblk = 0;
2406 map.m_lblk = 0;
2407 map.m_len = 0;
2408 map.m_flags = 0;
2409 map.m_next_pgofs = NULL;
2410 map.m_next_extent = NULL;
2411 map.m_seg_type = NO_CHECK_TYPE;
2412 map.m_may_create = false;
2413
2414 /*
2415 * Two readahead threads for same address range can cause race condition
2416 * which fragments sequential read IOs. So let's avoid each other.
2417 */
2418 if (rac && readahead_count(rac)) {
2419 if (READ_ONCE(F2FS_I(inode)->ra_offset) == readahead_index(rac))
2420 drop_ra = true;
2421 else
2422 WRITE_ONCE(F2FS_I(inode)->ra_offset,
2423 readahead_index(rac));
2424 }
2425
2426 for (; nr_pages; nr_pages--) {
2427 if (rac) {
2428 page = readahead_page(rac);
2429 prefetchw(&page->flags);
2430 if (drop_ra) {
2431 f2fs_put_page(page, 1);
2432 continue;
2433 }
2434 }
2435
2436 #ifdef CONFIG_F2FS_FS_COMPRESSION
2437 if (f2fs_compressed_file(inode)) {
2438 /* there are remained comressed pages, submit them */
2439 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2440 ret = f2fs_read_multi_pages(&cc, &bio,
2441 max_nr_pages,
2442 &last_block_in_bio,
2443 rac != NULL, false);
2444 f2fs_destroy_compress_ctx(&cc, false);
2445 if (ret)
2446 goto set_error_page;
2447 }
2448 ret = f2fs_is_compressed_cluster(inode, page->index);
2449 if (ret < 0)
2450 goto set_error_page;
2451 else if (!ret)
2452 goto read_single_page;
2453
2454 ret = f2fs_init_compress_ctx(&cc);
2455 if (ret)
2456 goto set_error_page;
2457
2458 f2fs_compress_ctx_add_page(&cc, page);
2459
2460 goto next_page;
2461 }
2462 read_single_page:
2463 #endif
2464
2465 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2466 &bio, &last_block_in_bio, rac);
2467 if (ret) {
2468 #ifdef CONFIG_F2FS_FS_COMPRESSION
2469 set_error_page:
2470 #endif
2471 SetPageError(page);
2472 zero_user_segment(page, 0, PAGE_SIZE);
2473 unlock_page(page);
2474 }
2475 #ifdef CONFIG_F2FS_FS_COMPRESSION
2476 next_page:
2477 #endif
2478 if (rac)
2479 put_page(page);
2480
2481 #ifdef CONFIG_F2FS_FS_COMPRESSION
2482 if (f2fs_compressed_file(inode)) {
2483 /* last page */
2484 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2485 ret = f2fs_read_multi_pages(&cc, &bio,
2486 max_nr_pages,
2487 &last_block_in_bio,
2488 rac != NULL, false);
2489 f2fs_destroy_compress_ctx(&cc, false);
2490 }
2491 }
2492 #endif
2493 }
2494 if (bio)
2495 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2496
2497 if (rac && readahead_count(rac) && !drop_ra)
2498 WRITE_ONCE(F2FS_I(inode)->ra_offset, -1);
2499 return ret;
2500 }
2501
f2fs_read_data_page(struct file * file,struct page * page)2502 static int f2fs_read_data_page(struct file *file, struct page *page)
2503 {
2504 struct inode *inode = page_file_mapping(page)->host;
2505 int ret = -EAGAIN;
2506
2507 trace_f2fs_readpage(page, DATA);
2508
2509 if (!f2fs_is_compress_backend_ready(inode)) {
2510 unlock_page(page);
2511 return -EOPNOTSUPP;
2512 }
2513
2514 /* If the file has inline data, try to read it directly */
2515 if (f2fs_has_inline_data(inode))
2516 ret = f2fs_read_inline_data(inode, page);
2517 if (ret == -EAGAIN)
2518 ret = f2fs_mpage_readpages(inode, NULL, page);
2519 return ret;
2520 }
2521
f2fs_readahead(struct readahead_control * rac)2522 static void f2fs_readahead(struct readahead_control *rac)
2523 {
2524 struct inode *inode = rac->mapping->host;
2525
2526 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2527
2528 if (!f2fs_is_compress_backend_ready(inode))
2529 return;
2530
2531 /* If the file has inline data, skip readpages */
2532 if (f2fs_has_inline_data(inode))
2533 return;
2534
2535 f2fs_mpage_readpages(inode, rac, NULL);
2536 }
2537
f2fs_encrypt_one_page(struct f2fs_io_info * fio)2538 int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2539 {
2540 struct inode *inode = fio->page->mapping->host;
2541 struct page *mpage, *page;
2542 gfp_t gfp_flags = GFP_NOFS;
2543
2544 if (!f2fs_encrypted_file(inode))
2545 return 0;
2546
2547 page = fio->compressed_page ? fio->compressed_page : fio->page;
2548
2549 /* wait for GCed page writeback via META_MAPPING */
2550 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2551
2552 if (fscrypt_inode_uses_inline_crypto(inode))
2553 return 0;
2554
2555 retry_encrypt:
2556 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2557 PAGE_SIZE, 0, gfp_flags);
2558 if (IS_ERR(fio->encrypted_page)) {
2559 /* flush pending IOs and wait for a while in the ENOMEM case */
2560 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2561 f2fs_flush_merged_writes(fio->sbi);
2562 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2563 gfp_flags |= __GFP_NOFAIL;
2564 goto retry_encrypt;
2565 }
2566 return PTR_ERR(fio->encrypted_page);
2567 }
2568
2569 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2570 if (mpage) {
2571 if (PageUptodate(mpage))
2572 memcpy(page_address(mpage),
2573 page_address(fio->encrypted_page), PAGE_SIZE);
2574 f2fs_put_page(mpage, 1);
2575 }
2576 return 0;
2577 }
2578
check_inplace_update_policy(struct inode * inode,struct f2fs_io_info * fio)2579 static inline bool check_inplace_update_policy(struct inode *inode,
2580 struct f2fs_io_info *fio)
2581 {
2582 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2583 unsigned int policy = SM_I(sbi)->ipu_policy;
2584
2585 if (policy & (0x1 << F2FS_IPU_FORCE))
2586 return true;
2587 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
2588 return true;
2589 if (policy & (0x1 << F2FS_IPU_UTIL) &&
2590 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2591 return true;
2592 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
2593 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2594 return true;
2595
2596 /*
2597 * IPU for rewrite async pages
2598 */
2599 if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2600 fio && fio->op == REQ_OP_WRITE &&
2601 !(fio->op_flags & REQ_SYNC) &&
2602 !IS_ENCRYPTED(inode))
2603 return true;
2604
2605 /* this is only set during fdatasync */
2606 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2607 is_inode_flag_set(inode, FI_NEED_IPU))
2608 return true;
2609
2610 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2611 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2612 return true;
2613
2614 return false;
2615 }
2616
f2fs_should_update_inplace(struct inode * inode,struct f2fs_io_info * fio)2617 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2618 {
2619 if (f2fs_is_pinned_file(inode))
2620 return true;
2621
2622 /* if this is cold file, we should overwrite to avoid fragmentation */
2623 if (file_is_cold(inode))
2624 return true;
2625
2626 return check_inplace_update_policy(inode, fio);
2627 }
2628
f2fs_should_update_outplace(struct inode * inode,struct f2fs_io_info * fio)2629 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2630 {
2631 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2632
2633 if (f2fs_lfs_mode(sbi))
2634 return true;
2635 if (S_ISDIR(inode->i_mode))
2636 return true;
2637 if (IS_NOQUOTA(inode))
2638 return true;
2639 if (f2fs_is_atomic_file(inode))
2640 return true;
2641 if (fio) {
2642 if (is_cold_data(fio->page))
2643 return true;
2644 if (IS_ATOMIC_WRITTEN_PAGE(fio->page))
2645 return true;
2646 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2647 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2648 return true;
2649 }
2650 return false;
2651 }
2652
need_inplace_update(struct f2fs_io_info * fio)2653 static inline bool need_inplace_update(struct f2fs_io_info *fio)
2654 {
2655 struct inode *inode = fio->page->mapping->host;
2656
2657 if (f2fs_should_update_outplace(inode, fio))
2658 return false;
2659
2660 return f2fs_should_update_inplace(inode, fio);
2661 }
2662
f2fs_do_write_data_page(struct f2fs_io_info * fio)2663 int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2664 {
2665 struct page *page = fio->page;
2666 struct inode *inode = page->mapping->host;
2667 struct dnode_of_data dn;
2668 struct extent_info ei = {0,0,0};
2669 struct node_info ni;
2670 bool ipu_force = false;
2671 int err = 0;
2672
2673 set_new_dnode(&dn, inode, NULL, NULL, 0);
2674 if (need_inplace_update(fio) &&
2675 f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2676 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2677
2678 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2679 DATA_GENERIC_ENHANCE))
2680 return -EFSCORRUPTED;
2681
2682 ipu_force = true;
2683 fio->need_lock = LOCK_DONE;
2684 goto got_it;
2685 }
2686
2687 /* Deadlock due to between page->lock and f2fs_lock_op */
2688 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2689 return -EAGAIN;
2690
2691 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2692 if (err)
2693 goto out;
2694
2695 fio->old_blkaddr = dn.data_blkaddr;
2696
2697 /* This page is already truncated */
2698 if (fio->old_blkaddr == NULL_ADDR) {
2699 ClearPageUptodate(page);
2700 clear_cold_data(page);
2701 goto out_writepage;
2702 }
2703 got_it:
2704 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2705 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2706 DATA_GENERIC_ENHANCE)) {
2707 err = -EFSCORRUPTED;
2708 goto out_writepage;
2709 }
2710 /*
2711 * If current allocation needs SSR,
2712 * it had better in-place writes for updated data.
2713 */
2714 if (ipu_force ||
2715 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2716 need_inplace_update(fio))) {
2717 err = f2fs_encrypt_one_page(fio);
2718 if (err)
2719 goto out_writepage;
2720
2721 set_page_writeback(page);
2722 ClearPageError(page);
2723 f2fs_put_dnode(&dn);
2724 if (fio->need_lock == LOCK_REQ)
2725 f2fs_unlock_op(fio->sbi);
2726 err = f2fs_inplace_write_data(fio);
2727 if (err) {
2728 if (fscrypt_inode_uses_fs_layer_crypto(inode))
2729 fscrypt_finalize_bounce_page(&fio->encrypted_page);
2730 if (PageWriteback(page))
2731 end_page_writeback(page);
2732 } else {
2733 set_inode_flag(inode, FI_UPDATE_WRITE);
2734 }
2735 trace_f2fs_do_write_data_page(fio->page, IPU);
2736 return err;
2737 }
2738
2739 if (fio->need_lock == LOCK_RETRY) {
2740 if (!f2fs_trylock_op(fio->sbi)) {
2741 err = -EAGAIN;
2742 goto out_writepage;
2743 }
2744 fio->need_lock = LOCK_REQ;
2745 }
2746
2747 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
2748 if (err)
2749 goto out_writepage;
2750
2751 fio->version = ni.version;
2752
2753 err = f2fs_encrypt_one_page(fio);
2754 if (err)
2755 goto out_writepage;
2756
2757 set_page_writeback(page);
2758 ClearPageError(page);
2759
2760 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2761 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2762
2763 /* LFS mode write path */
2764 f2fs_outplace_write_data(&dn, fio);
2765 trace_f2fs_do_write_data_page(page, OPU);
2766 set_inode_flag(inode, FI_APPEND_WRITE);
2767 if (page->index == 0)
2768 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
2769 out_writepage:
2770 f2fs_put_dnode(&dn);
2771 out:
2772 if (fio->need_lock == LOCK_REQ)
2773 f2fs_unlock_op(fio->sbi);
2774 return err;
2775 }
2776
f2fs_write_single_data_page(struct page * page,int * submitted,struct bio ** bio,sector_t * last_block,struct writeback_control * wbc,enum iostat_type io_type,int compr_blocks,bool allow_balance)2777 int f2fs_write_single_data_page(struct page *page, int *submitted,
2778 struct bio **bio,
2779 sector_t *last_block,
2780 struct writeback_control *wbc,
2781 enum iostat_type io_type,
2782 int compr_blocks,
2783 bool allow_balance)
2784 {
2785 struct inode *inode = page->mapping->host;
2786 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2787 loff_t i_size = i_size_read(inode);
2788 const pgoff_t end_index = ((unsigned long long)i_size)
2789 >> PAGE_SHIFT;
2790 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2791 unsigned offset = 0;
2792 bool need_balance_fs = false;
2793 int err = 0;
2794 struct f2fs_io_info fio = {
2795 .sbi = sbi,
2796 .ino = inode->i_ino,
2797 .type = DATA,
2798 .op = REQ_OP_WRITE,
2799 .op_flags = wbc_to_write_flags(wbc),
2800 .old_blkaddr = NULL_ADDR,
2801 .page = page,
2802 .encrypted_page = NULL,
2803 .submitted = false,
2804 .compr_blocks = compr_blocks,
2805 .need_lock = LOCK_RETRY,
2806 .post_read = f2fs_post_read_required(inode),
2807 .io_type = io_type,
2808 .io_wbc = wbc,
2809 .bio = bio,
2810 .last_block = last_block,
2811 };
2812
2813 trace_f2fs_writepage(page, DATA);
2814
2815 /* we should bypass data pages to proceed the kworkder jobs */
2816 if (unlikely(f2fs_cp_error(sbi))) {
2817 mapping_set_error(page->mapping, -EIO);
2818 /*
2819 * don't drop any dirty dentry pages for keeping lastest
2820 * directory structure.
2821 */
2822 if (S_ISDIR(inode->i_mode) &&
2823 !is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2824 goto redirty_out;
2825 goto out;
2826 }
2827
2828 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2829 goto redirty_out;
2830
2831 if (page->index < end_index ||
2832 f2fs_verity_in_progress(inode) ||
2833 compr_blocks)
2834 goto write;
2835
2836 /*
2837 * If the offset is out-of-range of file size,
2838 * this page does not have to be written to disk.
2839 */
2840 offset = i_size & (PAGE_SIZE - 1);
2841 if ((page->index >= end_index + 1) || !offset)
2842 goto out;
2843
2844 zero_user_segment(page, offset, PAGE_SIZE);
2845 write:
2846 if (f2fs_is_drop_cache(inode))
2847 goto out;
2848 /* we should not write 0'th page having journal header */
2849 if (f2fs_is_volatile_file(inode) && (!page->index ||
2850 (!wbc->for_reclaim &&
2851 f2fs_available_free_memory(sbi, BASE_CHECK))))
2852 goto redirty_out;
2853
2854 /* Dentry/quota blocks are controlled by checkpoint */
2855 if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
2856 /*
2857 * We need to wait for node_write to avoid block allocation during
2858 * checkpoint. This can only happen to quota writes which can cause
2859 * the below discard race condition.
2860 */
2861 if (IS_NOQUOTA(inode))
2862 down_read(&sbi->node_write);
2863
2864 fio.need_lock = LOCK_DONE;
2865 err = f2fs_do_write_data_page(&fio);
2866
2867 if (IS_NOQUOTA(inode))
2868 up_read(&sbi->node_write);
2869
2870 goto done;
2871 }
2872
2873 if (!wbc->for_reclaim)
2874 need_balance_fs = true;
2875 else if (has_not_enough_free_secs(sbi, 0, 0))
2876 goto redirty_out;
2877 else
2878 set_inode_flag(inode, FI_HOT_DATA);
2879
2880 err = -EAGAIN;
2881 if (f2fs_has_inline_data(inode)) {
2882 err = f2fs_write_inline_data(inode, page);
2883 if (!err)
2884 goto out;
2885 }
2886
2887 if (err == -EAGAIN) {
2888 err = f2fs_do_write_data_page(&fio);
2889 if (err == -EAGAIN) {
2890 fio.need_lock = LOCK_REQ;
2891 err = f2fs_do_write_data_page(&fio);
2892 }
2893 }
2894
2895 if (err) {
2896 file_set_keep_isize(inode);
2897 } else {
2898 spin_lock(&F2FS_I(inode)->i_size_lock);
2899 if (F2FS_I(inode)->last_disk_size < psize)
2900 F2FS_I(inode)->last_disk_size = psize;
2901 spin_unlock(&F2FS_I(inode)->i_size_lock);
2902 }
2903
2904 done:
2905 if (err && err != -ENOENT)
2906 goto redirty_out;
2907
2908 out:
2909 inode_dec_dirty_pages(inode);
2910 if (err) {
2911 ClearPageUptodate(page);
2912 clear_cold_data(page);
2913 }
2914
2915 if (wbc->for_reclaim) {
2916 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2917 clear_inode_flag(inode, FI_HOT_DATA);
2918 f2fs_remove_dirty_inode(inode);
2919 submitted = NULL;
2920 }
2921 unlock_page(page);
2922 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2923 !F2FS_I(inode)->wb_task && allow_balance)
2924 f2fs_balance_fs(sbi, need_balance_fs);
2925
2926 if (unlikely(f2fs_cp_error(sbi))) {
2927 f2fs_submit_merged_write(sbi, DATA);
2928 if (bio && *bio)
2929 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2930 submitted = NULL;
2931 }
2932
2933 if (submitted)
2934 *submitted = fio.submitted ? 1 : 0;
2935
2936 return 0;
2937
2938 redirty_out:
2939 redirty_page_for_writepage(wbc, page);
2940 /*
2941 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2942 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2943 * file_write_and_wait_range() will see EIO error, which is critical
2944 * to return value of fsync() followed by atomic_write failure to user.
2945 */
2946 if (!err || wbc->for_reclaim)
2947 return AOP_WRITEPAGE_ACTIVATE;
2948 unlock_page(page);
2949 return err;
2950 }
2951
f2fs_write_data_page(struct page * page,struct writeback_control * wbc)2952 static int f2fs_write_data_page(struct page *page,
2953 struct writeback_control *wbc)
2954 {
2955 #ifdef CONFIG_F2FS_FS_COMPRESSION
2956 struct inode *inode = page->mapping->host;
2957
2958 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2959 goto out;
2960
2961 if (f2fs_compressed_file(inode)) {
2962 if (f2fs_is_compressed_cluster(inode, page->index)) {
2963 redirty_page_for_writepage(wbc, page);
2964 return AOP_WRITEPAGE_ACTIVATE;
2965 }
2966 }
2967 out:
2968 #endif
2969
2970 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2971 wbc, FS_DATA_IO, 0, true);
2972 }
2973
2974 /*
2975 * This function was copied from write_cche_pages from mm/page-writeback.c.
2976 * The major change is making write step of cold data page separately from
2977 * warm/hot data page.
2978 */
f2fs_write_cache_pages(struct address_space * mapping,struct writeback_control * wbc,enum iostat_type io_type)2979 static int f2fs_write_cache_pages(struct address_space *mapping,
2980 struct writeback_control *wbc,
2981 enum iostat_type io_type)
2982 {
2983 int ret = 0;
2984 int done = 0, retry = 0;
2985 struct pagevec pvec;
2986 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2987 struct bio *bio = NULL;
2988 sector_t last_block;
2989 #ifdef CONFIG_F2FS_FS_COMPRESSION
2990 struct inode *inode = mapping->host;
2991 struct compress_ctx cc = {
2992 .inode = inode,
2993 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2994 .cluster_size = F2FS_I(inode)->i_cluster_size,
2995 .cluster_idx = NULL_CLUSTER,
2996 .rpages = NULL,
2997 .nr_rpages = 0,
2998 .cpages = NULL,
2999 .rbuf = NULL,
3000 .cbuf = NULL,
3001 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
3002 .private = NULL,
3003 };
3004 #endif
3005 int nr_pages;
3006 pgoff_t index;
3007 pgoff_t end; /* Inclusive */
3008 pgoff_t done_index;
3009 int range_whole = 0;
3010 xa_mark_t tag;
3011 int nwritten = 0;
3012 int submitted = 0;
3013 int i;
3014
3015 pagevec_init(&pvec);
3016
3017 if (get_dirty_pages(mapping->host) <=
3018 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
3019 set_inode_flag(mapping->host, FI_HOT_DATA);
3020 else
3021 clear_inode_flag(mapping->host, FI_HOT_DATA);
3022
3023 if (wbc->range_cyclic) {
3024 index = mapping->writeback_index; /* prev offset */
3025 end = -1;
3026 } else {
3027 index = wbc->range_start >> PAGE_SHIFT;
3028 end = wbc->range_end >> PAGE_SHIFT;
3029 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3030 range_whole = 1;
3031 }
3032 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3033 tag = PAGECACHE_TAG_TOWRITE;
3034 else
3035 tag = PAGECACHE_TAG_DIRTY;
3036 retry:
3037 retry = 0;
3038 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3039 tag_pages_for_writeback(mapping, index, end);
3040 done_index = index;
3041 while (!done && !retry && (index <= end)) {
3042 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
3043 tag);
3044 if (nr_pages == 0)
3045 break;
3046
3047 for (i = 0; i < nr_pages; i++) {
3048 struct page *page = pvec.pages[i];
3049 bool need_readd;
3050 readd:
3051 need_readd = false;
3052 #ifdef CONFIG_F2FS_FS_COMPRESSION
3053 if (f2fs_compressed_file(inode)) {
3054 ret = f2fs_init_compress_ctx(&cc);
3055 if (ret) {
3056 done = 1;
3057 break;
3058 }
3059
3060 if (!f2fs_cluster_can_merge_page(&cc,
3061 page->index)) {
3062 ret = f2fs_write_multi_pages(&cc,
3063 &submitted, wbc, io_type);
3064 if (!ret)
3065 need_readd = true;
3066 goto result;
3067 }
3068
3069 if (unlikely(f2fs_cp_error(sbi)))
3070 goto lock_page;
3071
3072 if (f2fs_cluster_is_empty(&cc)) {
3073 void *fsdata = NULL;
3074 struct page *pagep;
3075 int ret2;
3076
3077 ret2 = f2fs_prepare_compress_overwrite(
3078 inode, &pagep,
3079 page->index, &fsdata);
3080 if (ret2 < 0) {
3081 ret = ret2;
3082 done = 1;
3083 break;
3084 } else if (ret2 &&
3085 !f2fs_compress_write_end(inode,
3086 fsdata, page->index,
3087 1)) {
3088 retry = 1;
3089 break;
3090 }
3091 } else {
3092 goto lock_page;
3093 }
3094 }
3095 #endif
3096 /* give a priority to WB_SYNC threads */
3097 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3098 wbc->sync_mode == WB_SYNC_NONE) {
3099 done = 1;
3100 break;
3101 }
3102 #ifdef CONFIG_F2FS_FS_COMPRESSION
3103 lock_page:
3104 #endif
3105 done_index = page->index;
3106 retry_write:
3107 lock_page(page);
3108
3109 if (unlikely(page->mapping != mapping)) {
3110 continue_unlock:
3111 unlock_page(page);
3112 continue;
3113 }
3114
3115 if (!PageDirty(page)) {
3116 /* someone wrote it for us */
3117 goto continue_unlock;
3118 }
3119
3120 if (PageWriteback(page)) {
3121 if (wbc->sync_mode != WB_SYNC_NONE)
3122 f2fs_wait_on_page_writeback(page,
3123 DATA, true, true);
3124 else
3125 goto continue_unlock;
3126 }
3127
3128 if (!clear_page_dirty_for_io(page))
3129 goto continue_unlock;
3130
3131 #ifdef CONFIG_F2FS_FS_COMPRESSION
3132 if (f2fs_compressed_file(inode)) {
3133 get_page(page);
3134 f2fs_compress_ctx_add_page(&cc, page);
3135 continue;
3136 }
3137 #endif
3138 ret = f2fs_write_single_data_page(page, &submitted,
3139 &bio, &last_block, wbc, io_type,
3140 0, true);
3141 if (ret == AOP_WRITEPAGE_ACTIVATE)
3142 unlock_page(page);
3143 #ifdef CONFIG_F2FS_FS_COMPRESSION
3144 result:
3145 #endif
3146 nwritten += submitted;
3147 wbc->nr_to_write -= submitted;
3148
3149 if (unlikely(ret)) {
3150 /*
3151 * keep nr_to_write, since vfs uses this to
3152 * get # of written pages.
3153 */
3154 if (ret == AOP_WRITEPAGE_ACTIVATE) {
3155 ret = 0;
3156 goto next;
3157 } else if (ret == -EAGAIN) {
3158 ret = 0;
3159 if (wbc->sync_mode == WB_SYNC_ALL) {
3160 cond_resched();
3161 congestion_wait(BLK_RW_ASYNC,
3162 DEFAULT_IO_TIMEOUT);
3163 goto retry_write;
3164 }
3165 goto next;
3166 }
3167 done_index = page->index + 1;
3168 done = 1;
3169 break;
3170 }
3171
3172 if (wbc->nr_to_write <= 0 &&
3173 wbc->sync_mode == WB_SYNC_NONE) {
3174 done = 1;
3175 break;
3176 }
3177 next:
3178 if (need_readd)
3179 goto readd;
3180 }
3181 pagevec_release(&pvec);
3182 cond_resched();
3183 }
3184 #ifdef CONFIG_F2FS_FS_COMPRESSION
3185 /* flush remained pages in compress cluster */
3186 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3187 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3188 nwritten += submitted;
3189 wbc->nr_to_write -= submitted;
3190 if (ret) {
3191 done = 1;
3192 retry = 0;
3193 }
3194 }
3195 if (f2fs_compressed_file(inode))
3196 f2fs_destroy_compress_ctx(&cc, false);
3197 #endif
3198 if (retry) {
3199 index = 0;
3200 end = -1;
3201 goto retry;
3202 }
3203 if (wbc->range_cyclic && !done)
3204 done_index = 0;
3205 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3206 mapping->writeback_index = done_index;
3207
3208 if (nwritten)
3209 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3210 NULL, 0, DATA);
3211 /* submit cached bio of IPU write */
3212 if (bio)
3213 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3214
3215 return ret;
3216 }
3217
__should_serialize_io(struct inode * inode,struct writeback_control * wbc)3218 static inline bool __should_serialize_io(struct inode *inode,
3219 struct writeback_control *wbc)
3220 {
3221 /* to avoid deadlock in path of data flush */
3222 if (F2FS_I(inode)->wb_task)
3223 return false;
3224
3225 if (!S_ISREG(inode->i_mode))
3226 return false;
3227 if (IS_NOQUOTA(inode))
3228 return false;
3229
3230 if (f2fs_compressed_file(inode))
3231 return true;
3232 if (wbc->sync_mode != WB_SYNC_ALL)
3233 return true;
3234 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3235 return true;
3236 return false;
3237 }
3238
__f2fs_write_data_pages(struct address_space * mapping,struct writeback_control * wbc,enum iostat_type io_type)3239 static int __f2fs_write_data_pages(struct address_space *mapping,
3240 struct writeback_control *wbc,
3241 enum iostat_type io_type)
3242 {
3243 struct inode *inode = mapping->host;
3244 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3245 struct blk_plug plug;
3246 int ret;
3247 bool locked = false;
3248
3249 /* deal with chardevs and other special file */
3250 if (!mapping->a_ops->writepage)
3251 return 0;
3252
3253 /* skip writing if there is no dirty page in this inode */
3254 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3255 return 0;
3256
3257 /* during POR, we don't need to trigger writepage at all. */
3258 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3259 goto skip_write;
3260
3261 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3262 wbc->sync_mode == WB_SYNC_NONE &&
3263 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3264 f2fs_available_free_memory(sbi, DIRTY_DENTS))
3265 goto skip_write;
3266
3267 /* skip writing during file defragment */
3268 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
3269 goto skip_write;
3270
3271 trace_f2fs_writepages(mapping->host, wbc, DATA);
3272
3273 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3274 if (wbc->sync_mode == WB_SYNC_ALL)
3275 atomic_inc(&sbi->wb_sync_req[DATA]);
3276 else if (atomic_read(&sbi->wb_sync_req[DATA])) {
3277 /* to avoid potential deadlock */
3278 if (current->plug)
3279 blk_finish_plug(current->plug);
3280 goto skip_write;
3281 }
3282
3283 if (__should_serialize_io(inode, wbc)) {
3284 mutex_lock(&sbi->writepages);
3285 locked = true;
3286 }
3287
3288 blk_start_plug(&plug);
3289 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3290 blk_finish_plug(&plug);
3291
3292 if (locked)
3293 mutex_unlock(&sbi->writepages);
3294
3295 if (wbc->sync_mode == WB_SYNC_ALL)
3296 atomic_dec(&sbi->wb_sync_req[DATA]);
3297 /*
3298 * if some pages were truncated, we cannot guarantee its mapping->host
3299 * to detect pending bios.
3300 */
3301
3302 f2fs_remove_dirty_inode(inode);
3303 return ret;
3304
3305 skip_write:
3306 wbc->pages_skipped += get_dirty_pages(inode);
3307 trace_f2fs_writepages(mapping->host, wbc, DATA);
3308 return 0;
3309 }
3310
f2fs_write_data_pages(struct address_space * mapping,struct writeback_control * wbc)3311 static int f2fs_write_data_pages(struct address_space *mapping,
3312 struct writeback_control *wbc)
3313 {
3314 struct inode *inode = mapping->host;
3315
3316 return __f2fs_write_data_pages(mapping, wbc,
3317 F2FS_I(inode)->cp_task == current ?
3318 FS_CP_DATA_IO : FS_DATA_IO);
3319 }
3320
f2fs_write_failed(struct address_space * mapping,loff_t to)3321 static void f2fs_write_failed(struct address_space *mapping, loff_t to)
3322 {
3323 struct inode *inode = mapping->host;
3324 loff_t i_size = i_size_read(inode);
3325
3326 if (IS_NOQUOTA(inode))
3327 return;
3328
3329 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3330 if (to > i_size && !f2fs_verity_in_progress(inode)) {
3331 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3332 down_write(&F2FS_I(inode)->i_mmap_sem);
3333
3334 truncate_pagecache(inode, i_size);
3335 f2fs_truncate_blocks(inode, i_size, true);
3336
3337 up_write(&F2FS_I(inode)->i_mmap_sem);
3338 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3339 }
3340 }
3341
prepare_write_begin(struct f2fs_sb_info * sbi,struct page * page,loff_t pos,unsigned len,block_t * blk_addr,bool * node_changed)3342 static int prepare_write_begin(struct f2fs_sb_info *sbi,
3343 struct page *page, loff_t pos, unsigned len,
3344 block_t *blk_addr, bool *node_changed)
3345 {
3346 struct inode *inode = page->mapping->host;
3347 pgoff_t index = page->index;
3348 struct dnode_of_data dn;
3349 struct page *ipage;
3350 bool locked = false;
3351 struct extent_info ei = {0,0,0};
3352 int err = 0;
3353 int flag;
3354
3355 /*
3356 * we already allocated all the blocks, so we don't need to get
3357 * the block addresses when there is no need to fill the page.
3358 */
3359 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
3360 !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3361 !f2fs_verity_in_progress(inode))
3362 return 0;
3363
3364 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3365 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3366 flag = F2FS_GET_BLOCK_DEFAULT;
3367 else
3368 flag = F2FS_GET_BLOCK_PRE_AIO;
3369
3370 if (f2fs_has_inline_data(inode) ||
3371 (pos & PAGE_MASK) >= i_size_read(inode)) {
3372 f2fs_do_map_lock(sbi, flag, true);
3373 locked = true;
3374 }
3375
3376 restart:
3377 /* check inline_data */
3378 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3379 if (IS_ERR(ipage)) {
3380 err = PTR_ERR(ipage);
3381 goto unlock_out;
3382 }
3383
3384 set_new_dnode(&dn, inode, ipage, ipage, 0);
3385
3386 if (f2fs_has_inline_data(inode)) {
3387 if (pos + len <= MAX_INLINE_DATA(inode)) {
3388 f2fs_do_read_inline_data(page, ipage);
3389 set_inode_flag(inode, FI_DATA_EXIST);
3390 if (inode->i_nlink)
3391 set_inline_node(ipage);
3392 } else {
3393 err = f2fs_convert_inline_page(&dn, page);
3394 if (err)
3395 goto out;
3396 if (dn.data_blkaddr == NULL_ADDR)
3397 err = f2fs_get_block(&dn, index);
3398 }
3399 } else if (locked) {
3400 err = f2fs_get_block(&dn, index);
3401 } else {
3402 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3403 dn.data_blkaddr = ei.blk + index - ei.fofs;
3404 } else {
3405 /* hole case */
3406 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3407 if (err || dn.data_blkaddr == NULL_ADDR) {
3408 f2fs_put_dnode(&dn);
3409 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
3410 true);
3411 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3412 locked = true;
3413 goto restart;
3414 }
3415 }
3416 }
3417
3418 /* convert_inline_page can make node_changed */
3419 *blk_addr = dn.data_blkaddr;
3420 *node_changed = dn.node_changed;
3421 out:
3422 f2fs_put_dnode(&dn);
3423 unlock_out:
3424 if (locked)
3425 f2fs_do_map_lock(sbi, flag, false);
3426 return err;
3427 }
3428
f2fs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)3429 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3430 loff_t pos, unsigned len, unsigned flags,
3431 struct page **pagep, void **fsdata)
3432 {
3433 struct inode *inode = mapping->host;
3434 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3435 struct page *page = NULL;
3436 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3437 bool need_balance = false, drop_atomic = false;
3438 block_t blkaddr = NULL_ADDR;
3439 int err = 0;
3440
3441 trace_f2fs_write_begin(inode, pos, len, flags);
3442
3443 if (!f2fs_is_checkpoint_ready(sbi)) {
3444 err = -ENOSPC;
3445 goto fail;
3446 }
3447
3448 if ((f2fs_is_atomic_file(inode) &&
3449 !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3450 is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
3451 err = -ENOMEM;
3452 drop_atomic = true;
3453 goto fail;
3454 }
3455
3456 /*
3457 * We should check this at this moment to avoid deadlock on inode page
3458 * and #0 page. The locking rule for inline_data conversion should be:
3459 * lock_page(page #0) -> lock_page(inode_page)
3460 */
3461 if (index != 0) {
3462 err = f2fs_convert_inline_inode(inode);
3463 if (err)
3464 goto fail;
3465 }
3466
3467 #ifdef CONFIG_F2FS_FS_COMPRESSION
3468 if (f2fs_compressed_file(inode)) {
3469 int ret;
3470
3471 *fsdata = NULL;
3472
3473 if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
3474 goto repeat;
3475
3476 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3477 index, fsdata);
3478 if (ret < 0) {
3479 err = ret;
3480 goto fail;
3481 } else if (ret) {
3482 return 0;
3483 }
3484 }
3485 #endif
3486
3487 repeat:
3488 /*
3489 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3490 * wait_for_stable_page. Will wait that below with our IO control.
3491 */
3492 page = f2fs_pagecache_get_page(mapping, index,
3493 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3494 if (!page) {
3495 err = -ENOMEM;
3496 goto fail;
3497 }
3498
3499 /* TODO: cluster can be compressed due to race with .writepage */
3500
3501 *pagep = page;
3502
3503 err = prepare_write_begin(sbi, page, pos, len,
3504 &blkaddr, &need_balance);
3505 if (err)
3506 goto fail;
3507
3508 if (need_balance && !IS_NOQUOTA(inode) &&
3509 has_not_enough_free_secs(sbi, 0, 0)) {
3510 unlock_page(page);
3511 f2fs_balance_fs(sbi, true);
3512 lock_page(page);
3513 if (page->mapping != mapping) {
3514 /* The page got truncated from under us */
3515 f2fs_put_page(page, 1);
3516 goto repeat;
3517 }
3518 }
3519
3520 f2fs_wait_on_page_writeback(page, DATA, false, true);
3521
3522 if (len == PAGE_SIZE || PageUptodate(page))
3523 return 0;
3524
3525 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3526 !f2fs_verity_in_progress(inode)) {
3527 zero_user_segment(page, len, PAGE_SIZE);
3528 return 0;
3529 }
3530
3531 if (blkaddr == NEW_ADDR) {
3532 zero_user_segment(page, 0, PAGE_SIZE);
3533 SetPageUptodate(page);
3534 } else {
3535 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3536 DATA_GENERIC_ENHANCE_READ)) {
3537 err = -EFSCORRUPTED;
3538 goto fail;
3539 }
3540 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
3541 if (err)
3542 goto fail;
3543
3544 lock_page(page);
3545 if (unlikely(page->mapping != mapping)) {
3546 f2fs_put_page(page, 1);
3547 goto repeat;
3548 }
3549 if (unlikely(!PageUptodate(page))) {
3550 err = -EIO;
3551 goto fail;
3552 }
3553 }
3554 return 0;
3555
3556 fail:
3557 f2fs_put_page(page, 1);
3558 f2fs_write_failed(mapping, pos + len);
3559 if (drop_atomic)
3560 f2fs_drop_inmem_pages_all(sbi, false);
3561 return err;
3562 }
3563
f2fs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)3564 static int f2fs_write_end(struct file *file,
3565 struct address_space *mapping,
3566 loff_t pos, unsigned len, unsigned copied,
3567 struct page *page, void *fsdata)
3568 {
3569 struct inode *inode = page->mapping->host;
3570
3571 trace_f2fs_write_end(inode, pos, len, copied);
3572
3573 /*
3574 * This should be come from len == PAGE_SIZE, and we expect copied
3575 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3576 * let generic_perform_write() try to copy data again through copied=0.
3577 */
3578 if (!PageUptodate(page)) {
3579 if (unlikely(copied != len))
3580 copied = 0;
3581 else
3582 SetPageUptodate(page);
3583 }
3584
3585 #ifdef CONFIG_F2FS_FS_COMPRESSION
3586 /* overwrite compressed file */
3587 if (f2fs_compressed_file(inode) && fsdata) {
3588 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3589 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3590
3591 if (pos + copied > i_size_read(inode) &&
3592 !f2fs_verity_in_progress(inode))
3593 f2fs_i_size_write(inode, pos + copied);
3594 return copied;
3595 }
3596 #endif
3597
3598 if (!copied)
3599 goto unlock_out;
3600
3601 set_page_dirty(page);
3602
3603 if (pos + copied > i_size_read(inode) &&
3604 !f2fs_verity_in_progress(inode))
3605 f2fs_i_size_write(inode, pos + copied);
3606 unlock_out:
3607 f2fs_put_page(page, 1);
3608 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3609 return copied;
3610 }
3611
check_direct_IO(struct inode * inode,struct iov_iter * iter,loff_t offset)3612 static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3613 loff_t offset)
3614 {
3615 unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3616 unsigned blkbits = i_blkbits;
3617 unsigned blocksize_mask = (1 << blkbits) - 1;
3618 unsigned long align = offset | iov_iter_alignment(iter);
3619 struct block_device *bdev = inode->i_sb->s_bdev;
3620
3621 if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3622 return 1;
3623
3624 if (align & blocksize_mask) {
3625 if (bdev)
3626 blkbits = blksize_bits(bdev_logical_block_size(bdev));
3627 blocksize_mask = (1 << blkbits) - 1;
3628 if (align & blocksize_mask)
3629 return -EINVAL;
3630 return 1;
3631 }
3632 return 0;
3633 }
3634
f2fs_dio_end_io(struct bio * bio)3635 static void f2fs_dio_end_io(struct bio *bio)
3636 {
3637 struct f2fs_private_dio *dio = bio->bi_private;
3638
3639 dec_page_count(F2FS_I_SB(dio->inode),
3640 dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3641
3642 bio->bi_private = dio->orig_private;
3643 bio->bi_end_io = dio->orig_end_io;
3644
3645 kfree(dio);
3646
3647 bio_endio(bio);
3648 }
3649
f2fs_dio_submit_bio(struct bio * bio,struct inode * inode,loff_t file_offset)3650 static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3651 loff_t file_offset)
3652 {
3653 struct f2fs_private_dio *dio;
3654 bool write = (bio_op(bio) == REQ_OP_WRITE);
3655
3656 dio = f2fs_kzalloc(F2FS_I_SB(inode),
3657 sizeof(struct f2fs_private_dio), GFP_NOFS);
3658 if (!dio)
3659 goto out;
3660
3661 dio->inode = inode;
3662 dio->orig_end_io = bio->bi_end_io;
3663 dio->orig_private = bio->bi_private;
3664 dio->write = write;
3665
3666 bio->bi_end_io = f2fs_dio_end_io;
3667 bio->bi_private = dio;
3668
3669 inc_page_count(F2FS_I_SB(inode),
3670 write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3671
3672 submit_bio(bio);
3673 return;
3674 out:
3675 bio->bi_status = BLK_STS_IOERR;
3676 bio_endio(bio);
3677 }
3678
f2fs_direct_IO(struct kiocb * iocb,struct iov_iter * iter)3679 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3680 {
3681 struct address_space *mapping = iocb->ki_filp->f_mapping;
3682 struct inode *inode = mapping->host;
3683 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3684 struct f2fs_inode_info *fi = F2FS_I(inode);
3685 size_t count = iov_iter_count(iter);
3686 loff_t offset = iocb->ki_pos;
3687 int rw = iov_iter_rw(iter);
3688 int err;
3689 enum rw_hint hint = iocb->ki_hint;
3690 int whint_mode = F2FS_OPTION(sbi).whint_mode;
3691 bool do_opu;
3692
3693 err = check_direct_IO(inode, iter, offset);
3694 if (err)
3695 return err < 0 ? err : 0;
3696
3697 if (f2fs_force_buffered_io(inode, iocb, iter))
3698 return 0;
3699
3700 do_opu = allow_outplace_dio(inode, iocb, iter);
3701
3702 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3703
3704 if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3705 iocb->ki_hint = WRITE_LIFE_NOT_SET;
3706
3707 if (iocb->ki_flags & IOCB_NOWAIT) {
3708 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3709 iocb->ki_hint = hint;
3710 err = -EAGAIN;
3711 goto out;
3712 }
3713 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3714 up_read(&fi->i_gc_rwsem[rw]);
3715 iocb->ki_hint = hint;
3716 err = -EAGAIN;
3717 goto out;
3718 }
3719 } else {
3720 down_read(&fi->i_gc_rwsem[rw]);
3721 if (do_opu)
3722 down_read(&fi->i_gc_rwsem[READ]);
3723 }
3724
3725 err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3726 iter, rw == WRITE ? get_data_block_dio_write :
3727 get_data_block_dio, NULL, f2fs_dio_submit_bio,
3728 rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3729 DIO_SKIP_HOLES);
3730
3731 if (do_opu)
3732 up_read(&fi->i_gc_rwsem[READ]);
3733
3734 up_read(&fi->i_gc_rwsem[rw]);
3735
3736 if (rw == WRITE) {
3737 if (whint_mode == WHINT_MODE_OFF)
3738 iocb->ki_hint = hint;
3739 if (err > 0) {
3740 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3741 err);
3742 if (!do_opu)
3743 set_inode_flag(inode, FI_UPDATE_WRITE);
3744 } else if (err == -EIOCBQUEUED) {
3745 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3746 count - iov_iter_count(iter));
3747 } else if (err < 0) {
3748 f2fs_write_failed(mapping, offset + count);
3749 }
3750 } else {
3751 if (err > 0)
3752 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3753 else if (err == -EIOCBQUEUED)
3754 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3755 count - iov_iter_count(iter));
3756 }
3757
3758 out:
3759 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3760
3761 return err;
3762 }
3763
f2fs_invalidate_page(struct page * page,unsigned int offset,unsigned int length)3764 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3765 unsigned int length)
3766 {
3767 struct inode *inode = page->mapping->host;
3768 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3769
3770 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3771 (offset % PAGE_SIZE || length != PAGE_SIZE))
3772 return;
3773
3774 if (PageDirty(page)) {
3775 if (inode->i_ino == F2FS_META_INO(sbi)) {
3776 dec_page_count(sbi, F2FS_DIRTY_META);
3777 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3778 dec_page_count(sbi, F2FS_DIRTY_NODES);
3779 } else {
3780 inode_dec_dirty_pages(inode);
3781 f2fs_remove_dirty_inode(inode);
3782 }
3783 }
3784
3785 clear_cold_data(page);
3786
3787 if (IS_ATOMIC_WRITTEN_PAGE(page))
3788 return f2fs_drop_inmem_page(inode, page);
3789
3790 f2fs_clear_page_private(page);
3791 }
3792
f2fs_release_page(struct page * page,gfp_t wait)3793 int f2fs_release_page(struct page *page, gfp_t wait)
3794 {
3795 /* If this is dirty page, keep PagePrivate */
3796 if (PageDirty(page))
3797 return 0;
3798
3799 /* This is atomic written page, keep Private */
3800 if (IS_ATOMIC_WRITTEN_PAGE(page))
3801 return 0;
3802
3803 clear_cold_data(page);
3804 f2fs_clear_page_private(page);
3805 return 1;
3806 }
3807
f2fs_set_data_page_dirty(struct page * page)3808 static int f2fs_set_data_page_dirty(struct page *page)
3809 {
3810 struct inode *inode = page_file_mapping(page)->host;
3811
3812 trace_f2fs_set_page_dirty(page, DATA);
3813
3814 if (!PageUptodate(page))
3815 SetPageUptodate(page);
3816 if (PageSwapCache(page))
3817 return __set_page_dirty_nobuffers(page);
3818
3819 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
3820 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
3821 f2fs_register_inmem_page(inode, page);
3822 return 1;
3823 }
3824 /*
3825 * Previously, this page has been registered, we just
3826 * return here.
3827 */
3828 return 0;
3829 }
3830
3831 if (!PageDirty(page)) {
3832 __set_page_dirty_nobuffers(page);
3833 f2fs_update_dirty_page(inode, page);
3834 return 1;
3835 }
3836 return 0;
3837 }
3838
3839
f2fs_bmap_compress(struct inode * inode,sector_t block)3840 static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3841 {
3842 #ifdef CONFIG_F2FS_FS_COMPRESSION
3843 struct dnode_of_data dn;
3844 sector_t start_idx, blknr = 0;
3845 int ret;
3846
3847 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3848
3849 set_new_dnode(&dn, inode, NULL, NULL, 0);
3850 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3851 if (ret)
3852 return 0;
3853
3854 if (dn.data_blkaddr != COMPRESS_ADDR) {
3855 dn.ofs_in_node += block - start_idx;
3856 blknr = f2fs_data_blkaddr(&dn);
3857 if (!__is_valid_data_blkaddr(blknr))
3858 blknr = 0;
3859 }
3860
3861 f2fs_put_dnode(&dn);
3862 return blknr;
3863 #else
3864 return 0;
3865 #endif
3866 }
3867
3868
f2fs_bmap(struct address_space * mapping,sector_t block)3869 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3870 {
3871 struct inode *inode = mapping->host;
3872 struct buffer_head tmp = {
3873 .b_size = i_blocksize(inode),
3874 };
3875 sector_t blknr = 0;
3876
3877 if (f2fs_has_inline_data(inode))
3878 goto out;
3879
3880 /* make sure allocating whole blocks */
3881 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3882 filemap_write_and_wait(mapping);
3883
3884 /* Block number less than F2FS MAX BLOCKS */
3885 if (unlikely(block >= F2FS_I_SB(inode)->max_file_blocks))
3886 goto out;
3887
3888 if (f2fs_compressed_file(inode)) {
3889 blknr = f2fs_bmap_compress(inode, block);
3890 } else {
3891 if (!get_data_block_bmap(inode, block, &tmp, 0))
3892 blknr = tmp.b_blocknr;
3893 }
3894 out:
3895 trace_f2fs_bmap(inode, block, blknr);
3896 return blknr;
3897 }
3898
3899 #ifdef CONFIG_MIGRATION
3900 #include <linux/migrate.h>
3901
f2fs_migrate_page(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)3902 int f2fs_migrate_page(struct address_space *mapping,
3903 struct page *newpage, struct page *page, enum migrate_mode mode)
3904 {
3905 int rc, extra_count;
3906 struct f2fs_inode_info *fi = F2FS_I(mapping->host);
3907 bool atomic_written = IS_ATOMIC_WRITTEN_PAGE(page);
3908
3909 BUG_ON(PageWriteback(page));
3910
3911 /* migrating an atomic written page is safe with the inmem_lock hold */
3912 if (atomic_written) {
3913 if (mode != MIGRATE_SYNC)
3914 return -EBUSY;
3915 if (!mutex_trylock(&fi->inmem_lock))
3916 return -EAGAIN;
3917 }
3918
3919 /* one extra reference was held for atomic_write page */
3920 extra_count = atomic_written ? 1 : 0;
3921 rc = migrate_page_move_mapping(mapping, newpage,
3922 page, extra_count);
3923 if (rc != MIGRATEPAGE_SUCCESS) {
3924 if (atomic_written)
3925 mutex_unlock(&fi->inmem_lock);
3926 return rc;
3927 }
3928
3929 if (atomic_written) {
3930 struct inmem_pages *cur;
3931 list_for_each_entry(cur, &fi->inmem_pages, list)
3932 if (cur->page == page) {
3933 cur->page = newpage;
3934 break;
3935 }
3936 mutex_unlock(&fi->inmem_lock);
3937 put_page(page);
3938 get_page(newpage);
3939 }
3940
3941 if (PagePrivate(page)) {
3942 f2fs_set_page_private(newpage, page_private(page));
3943 f2fs_clear_page_private(page);
3944 }
3945
3946 if (mode != MIGRATE_SYNC_NO_COPY)
3947 migrate_page_copy(newpage, page);
3948 else
3949 migrate_page_states(newpage, page);
3950
3951 return MIGRATEPAGE_SUCCESS;
3952 }
3953 #endif
3954
3955 #ifdef CONFIG_SWAP
check_swap_activate_fast(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)3956 static int check_swap_activate_fast(struct swap_info_struct *sis,
3957 struct file *swap_file, sector_t *span)
3958 {
3959 struct address_space *mapping = swap_file->f_mapping;
3960 struct inode *inode = mapping->host;
3961 sector_t cur_lblock;
3962 sector_t last_lblock;
3963 sector_t pblock;
3964 sector_t lowest_pblock = -1;
3965 sector_t highest_pblock = 0;
3966 int nr_extents = 0;
3967 unsigned long nr_pblocks;
3968 unsigned long len;
3969 int ret;
3970
3971 /*
3972 * Map all the blocks into the extent list. This code doesn't try
3973 * to be very smart.
3974 */
3975 cur_lblock = 0;
3976 last_lblock = logical_to_blk(inode, i_size_read(inode));
3977 len = i_size_read(inode);
3978
3979 while (cur_lblock <= last_lblock && cur_lblock < sis->max) {
3980 struct buffer_head map_bh;
3981 pgoff_t next_pgofs;
3982
3983 cond_resched();
3984
3985 memset(&map_bh, 0, sizeof(struct buffer_head));
3986 map_bh.b_size = len - cur_lblock;
3987
3988 ret = get_data_block(inode, cur_lblock, &map_bh, 0,
3989 F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
3990 if (ret)
3991 goto err_out;
3992
3993 /* hole */
3994 if (!buffer_mapped(&map_bh))
3995 goto err_out;
3996
3997 pblock = map_bh.b_blocknr;
3998 nr_pblocks = logical_to_blk(inode, map_bh.b_size);
3999
4000 if (cur_lblock + nr_pblocks >= sis->max)
4001 nr_pblocks = sis->max - cur_lblock;
4002
4003 if (cur_lblock) { /* exclude the header page */
4004 if (pblock < lowest_pblock)
4005 lowest_pblock = pblock;
4006 if (pblock + nr_pblocks - 1 > highest_pblock)
4007 highest_pblock = pblock + nr_pblocks - 1;
4008 }
4009
4010 /*
4011 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4012 */
4013 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
4014 if (ret < 0)
4015 goto out;
4016 nr_extents += ret;
4017 cur_lblock += nr_pblocks;
4018 }
4019 ret = nr_extents;
4020 *span = 1 + highest_pblock - lowest_pblock;
4021 if (cur_lblock == 0)
4022 cur_lblock = 1; /* force Empty message */
4023 sis->max = cur_lblock;
4024 sis->pages = cur_lblock - 1;
4025 sis->highest_bit = cur_lblock - 1;
4026 out:
4027 return ret;
4028 err_out:
4029 pr_err("swapon: swapfile has holes\n");
4030 return -EINVAL;
4031 }
4032
4033 /* Copied from generic_swapfile_activate() to check any holes */
check_swap_activate(struct swap_info_struct * sis,struct file * swap_file,sector_t * span)4034 static int check_swap_activate(struct swap_info_struct *sis,
4035 struct file *swap_file, sector_t *span)
4036 {
4037 struct address_space *mapping = swap_file->f_mapping;
4038 struct inode *inode = mapping->host;
4039 unsigned blocks_per_page;
4040 unsigned long page_no;
4041 unsigned blkbits;
4042 sector_t probe_block;
4043 sector_t last_block;
4044 sector_t lowest_block = -1;
4045 sector_t highest_block = 0;
4046 int nr_extents = 0;
4047 int ret;
4048
4049 if (PAGE_SIZE == F2FS_BLKSIZE)
4050 return check_swap_activate_fast(sis, swap_file, span);
4051
4052 blkbits = inode->i_blkbits;
4053 blocks_per_page = PAGE_SIZE >> blkbits;
4054
4055 /*
4056 * Map all the blocks into the extent list. This code doesn't try
4057 * to be very smart.
4058 */
4059 probe_block = 0;
4060 page_no = 0;
4061 last_block = i_size_read(inode) >> blkbits;
4062 while ((probe_block + blocks_per_page) <= last_block &&
4063 page_no < sis->max) {
4064 unsigned block_in_page;
4065 sector_t first_block;
4066 sector_t block = 0;
4067 int err = 0;
4068
4069 cond_resched();
4070
4071 block = probe_block;
4072 err = bmap(inode, &block);
4073 if (err || !block)
4074 goto bad_bmap;
4075 first_block = block;
4076
4077 /*
4078 * It must be PAGE_SIZE aligned on-disk
4079 */
4080 if (first_block & (blocks_per_page - 1)) {
4081 probe_block++;
4082 goto reprobe;
4083 }
4084
4085 for (block_in_page = 1; block_in_page < blocks_per_page;
4086 block_in_page++) {
4087
4088 block = probe_block + block_in_page;
4089 err = bmap(inode, &block);
4090
4091 if (err || !block)
4092 goto bad_bmap;
4093
4094 if (block != first_block + block_in_page) {
4095 /* Discontiguity */
4096 probe_block++;
4097 goto reprobe;
4098 }
4099 }
4100
4101 first_block >>= (PAGE_SHIFT - blkbits);
4102 if (page_no) { /* exclude the header page */
4103 if (first_block < lowest_block)
4104 lowest_block = first_block;
4105 if (first_block > highest_block)
4106 highest_block = first_block;
4107 }
4108
4109 /*
4110 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4111 */
4112 ret = add_swap_extent(sis, page_no, 1, first_block);
4113 if (ret < 0)
4114 goto out;
4115 nr_extents += ret;
4116 page_no++;
4117 probe_block += blocks_per_page;
4118 reprobe:
4119 continue;
4120 }
4121 ret = nr_extents;
4122 *span = 1 + highest_block - lowest_block;
4123 if (page_no == 0)
4124 page_no = 1; /* force Empty message */
4125 sis->max = page_no;
4126 sis->pages = page_no - 1;
4127 sis->highest_bit = page_no - 1;
4128 out:
4129 return ret;
4130 bad_bmap:
4131 pr_err("swapon: swapfile has holes\n");
4132 return -EINVAL;
4133 }
4134
f2fs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)4135 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4136 sector_t *span)
4137 {
4138 struct inode *inode = file_inode(file);
4139 int ret;
4140
4141 if (!S_ISREG(inode->i_mode))
4142 return -EINVAL;
4143
4144 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
4145 return -EROFS;
4146
4147 if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
4148 f2fs_err(F2FS_I_SB(inode),
4149 "Swapfile not supported in LFS mode");
4150 return -EINVAL;
4151 }
4152
4153 ret = f2fs_convert_inline_inode(inode);
4154 if (ret)
4155 return ret;
4156
4157 if (!f2fs_disable_compressed_file(inode))
4158 return -EINVAL;
4159
4160 ret = check_swap_activate(sis, file, span);
4161 if (ret < 0)
4162 return ret;
4163
4164 set_inode_flag(inode, FI_PIN_FILE);
4165 f2fs_precache_extents(inode);
4166 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4167 return ret;
4168 }
4169
f2fs_swap_deactivate(struct file * file)4170 static void f2fs_swap_deactivate(struct file *file)
4171 {
4172 struct inode *inode = file_inode(file);
4173
4174 clear_inode_flag(inode, FI_PIN_FILE);
4175 }
4176 #else
f2fs_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)4177 static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4178 sector_t *span)
4179 {
4180 return -EOPNOTSUPP;
4181 }
4182
f2fs_swap_deactivate(struct file * file)4183 static void f2fs_swap_deactivate(struct file *file)
4184 {
4185 }
4186 #endif
4187
4188 const struct address_space_operations f2fs_dblock_aops = {
4189 .readpage = f2fs_read_data_page,
4190 .readahead = f2fs_readahead,
4191 .writepage = f2fs_write_data_page,
4192 .writepages = f2fs_write_data_pages,
4193 .write_begin = f2fs_write_begin,
4194 .write_end = f2fs_write_end,
4195 .set_page_dirty = f2fs_set_data_page_dirty,
4196 .invalidatepage = f2fs_invalidate_page,
4197 .releasepage = f2fs_release_page,
4198 .direct_IO = f2fs_direct_IO,
4199 .bmap = f2fs_bmap,
4200 .swap_activate = f2fs_swap_activate,
4201 .swap_deactivate = f2fs_swap_deactivate,
4202 #ifdef CONFIG_MIGRATION
4203 .migratepage = f2fs_migrate_page,
4204 #endif
4205 };
4206
f2fs_clear_page_cache_dirty_tag(struct page * page)4207 void f2fs_clear_page_cache_dirty_tag(struct page *page)
4208 {
4209 struct address_space *mapping = page_mapping(page);
4210 unsigned long flags;
4211
4212 xa_lock_irqsave(&mapping->i_pages, flags);
4213 __xa_clear_mark(&mapping->i_pages, page_index(page),
4214 PAGECACHE_TAG_DIRTY);
4215 xa_unlock_irqrestore(&mapping->i_pages, flags);
4216 }
4217
f2fs_init_post_read_processing(void)4218 int __init f2fs_init_post_read_processing(void)
4219 {
4220 bio_post_read_ctx_cache =
4221 kmem_cache_create("f2fs_bio_post_read_ctx",
4222 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4223 if (!bio_post_read_ctx_cache)
4224 goto fail;
4225 bio_post_read_ctx_pool =
4226 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4227 bio_post_read_ctx_cache);
4228 if (!bio_post_read_ctx_pool)
4229 goto fail_free_cache;
4230 return 0;
4231
4232 fail_free_cache:
4233 kmem_cache_destroy(bio_post_read_ctx_cache);
4234 fail:
4235 return -ENOMEM;
4236 }
4237
f2fs_destroy_post_read_processing(void)4238 void f2fs_destroy_post_read_processing(void)
4239 {
4240 mempool_destroy(bio_post_read_ctx_pool);
4241 kmem_cache_destroy(bio_post_read_ctx_cache);
4242 }
4243
f2fs_init_post_read_wq(struct f2fs_sb_info * sbi)4244 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4245 {
4246 if (!f2fs_sb_has_encrypt(sbi) &&
4247 !f2fs_sb_has_verity(sbi) &&
4248 !f2fs_sb_has_compression(sbi))
4249 return 0;
4250
4251 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4252 WQ_UNBOUND | WQ_HIGHPRI,
4253 num_online_cpus());
4254 if (!sbi->post_read_wq)
4255 return -ENOMEM;
4256 return 0;
4257 }
4258
f2fs_destroy_post_read_wq(struct f2fs_sb_info * sbi)4259 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4260 {
4261 if (sbi->post_read_wq)
4262 destroy_workqueue(sbi->post_read_wq);
4263 }
4264
f2fs_init_bio_entry_cache(void)4265 int __init f2fs_init_bio_entry_cache(void)
4266 {
4267 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4268 sizeof(struct bio_entry));
4269 if (!bio_entry_slab)
4270 return -ENOMEM;
4271 return 0;
4272 }
4273
f2fs_destroy_bio_entry_cache(void)4274 void f2fs_destroy_bio_entry_cache(void)
4275 {
4276 kmem_cache_destroy(bio_entry_slab);
4277 }
4278