• Home
  • Raw
  • Download

Lines Matching refs:bio

83 	struct bio *bio;  member
89 static void __read_end_io(struct bio *bio) in __read_end_io() argument
95 bio_for_each_segment_all(bv, bio, iter_all) { in __read_end_io()
99 if (bio->bi_status || PageError(page)) { in __read_end_io()
109 if (bio->bi_private) in __read_end_io()
110 mempool_free(bio->bi_private, bio_post_read_ctx_pool); in __read_end_io()
111 bio_put(bio); in __read_end_io()
121 fscrypt_decrypt_bio(ctx->bio); in decrypt_work()
131 fsverity_verify_bio(ctx->bio); in verity_work()
161 __read_end_io(ctx->bio); in bio_post_read_processing()
165 static bool f2fs_bio_post_read_required(struct bio *bio) in f2fs_bio_post_read_required() argument
167 return bio->bi_private && !bio->bi_status; in f2fs_bio_post_read_required()
170 static void f2fs_read_end_io(struct bio *bio) in f2fs_read_end_io() argument
172 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio)); in f2fs_read_end_io()
176 bio->bi_status = BLK_STS_IOERR; in f2fs_read_end_io()
179 if (f2fs_bio_post_read_required(bio)) { in f2fs_read_end_io()
180 struct bio_post_read_ctx *ctx = bio->bi_private; in f2fs_read_end_io()
187 __read_end_io(bio); in f2fs_read_end_io()
190 static void f2fs_write_end_io(struct bio *bio) in f2fs_write_end_io() argument
192 struct f2fs_sb_info *sbi = bio->bi_private; in f2fs_write_end_io()
198 bio->bi_status = BLK_STS_IOERR; in f2fs_write_end_io()
201 bio_for_each_segment_all(bvec, bio, iter_all) { in f2fs_write_end_io()
211 if (unlikely(bio->bi_status)) in f2fs_write_end_io()
218 if (unlikely(bio->bi_status)) { in f2fs_write_end_io()
237 bio_put(bio); in f2fs_write_end_io()
244 block_t blk_addr, struct bio *bio) in f2fs_target_device() argument
259 if (bio) { in f2fs_target_device()
260 bio_set_dev(bio, bdev); in f2fs_target_device()
261 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); in f2fs_target_device()
280 block_t blk_addr, struct bio *bio) in __same_bdev() argument
283 return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno; in __same_bdev()
289 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) in __bio_alloc()
292 struct bio *bio; in __bio_alloc() local
294 bio = f2fs_bio_alloc(sbi, npages, true); in __bio_alloc()
296 f2fs_target_device(sbi, fio->new_blkaddr, bio); in __bio_alloc()
298 bio->bi_end_io = f2fs_read_end_io; in __bio_alloc()
299 bio->bi_private = NULL; in __bio_alloc()
301 bio->bi_end_io = f2fs_write_end_io; in __bio_alloc()
302 bio->bi_private = sbi; in __bio_alloc()
303 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, in __bio_alloc()
307 wbc_init_bio(fio->io_wbc, bio); in __bio_alloc()
309 return bio; in __bio_alloc()
312 static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, in f2fs_set_bio_crypt_ctx() argument
322 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask); in f2fs_set_bio_crypt_ctx()
324 bio_set_skip_dm_default_key(bio); in f2fs_set_bio_crypt_ctx()
327 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode, in f2fs_crypt_mergeable_bio() argument
336 return !bio_has_crypt_ctx(bio) && in f2fs_crypt_mergeable_bio()
337 (bio_should_skip_dm_default_key(bio) == in f2fs_crypt_mergeable_bio()
340 return fscrypt_mergeable_bio(bio, inode, next_idx); in f2fs_crypt_mergeable_bio()
344 struct bio *bio, enum page_type type) in __submit_bio() argument
346 if (!is_read_io(bio_op(bio))) { in __submit_bio()
358 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS; in __submit_bio()
375 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) in __submit_bio()
386 if (is_read_io(bio_op(bio))) in __submit_bio()
387 trace_f2fs_submit_read_bio(sbi->sb, type, bio); in __submit_bio()
389 trace_f2fs_submit_write_bio(sbi->sb, type, bio); in __submit_bio()
390 submit_bio(bio); in __submit_bio()
397 if (!io->bio) in __submit_merged_bio()
400 bio_set_op_attrs(io->bio, fio->op, fio->op_flags); in __submit_merged_bio()
403 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio); in __submit_merged_bio()
405 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio); in __submit_merged_bio()
407 __submit_bio(io->sbi, io->bio, fio->type); in __submit_merged_bio()
408 io->bio = NULL; in __submit_merged_bio()
411 static bool __has_merged_page(struct bio *bio, struct inode *inode, in __has_merged_page() argument
418 if (!bio) in __has_merged_page()
424 bio_for_each_segment_all(bvec, bio, iter_all) { in __has_merged_page()
474 ret = __has_merged_page(io->bio, inode, page, ino); in __submit_merged_write_cond()
511 struct bio *bio; in f2fs_submit_page_bio() local
524 bio = __bio_alloc(fio, 1); in f2fs_submit_page_bio()
526 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, in f2fs_submit_page_bio()
529 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_submit_page_bio()
530 bio_put(bio); in f2fs_submit_page_bio()
537 bio_set_op_attrs(bio, fio->op, fio->op_flags); in f2fs_submit_page_bio()
542 __submit_bio(fio->sbi, bio, fio->type); in f2fs_submit_page_bio()
546 static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio, in page_is_mergeable() argument
551 return __same_bdev(sbi, cur_blkaddr, bio); in page_is_mergeable()
562 static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio, in io_is_mergeable() argument
570 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size); in io_is_mergeable()
572 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt; in io_is_mergeable()
578 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr)) in io_is_mergeable()
583 static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio, in add_bio_entry() argument
590 be->bio = bio; in add_bio_entry()
591 bio_get(bio); in add_bio_entry()
593 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) in add_bio_entry()
607 static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio, in add_ipu_page() argument
621 if (be->bio != *bio) in add_ipu_page()
626 if (bio_add_page(*bio, page, PAGE_SIZE, 0) == PAGE_SIZE) { in add_ipu_page()
633 __submit_bio(sbi, *bio, DATA); in add_ipu_page()
640 bio_put(*bio); in add_ipu_page()
641 *bio = NULL; in add_ipu_page()
648 struct bio **bio, struct page *page) in f2fs_submit_merged_ipu_write() argument
652 struct bio *target = bio ? *bio : NULL; in f2fs_submit_merged_ipu_write()
665 found = (target == be->bio); in f2fs_submit_merged_ipu_write()
667 found = __has_merged_page(be->bio, NULL, in f2fs_submit_merged_ipu_write()
682 found = (target == be->bio); in f2fs_submit_merged_ipu_write()
684 found = __has_merged_page(be->bio, NULL, in f2fs_submit_merged_ipu_write()
687 target = be->bio; in f2fs_submit_merged_ipu_write()
697 if (bio && *bio) { in f2fs_submit_merged_ipu_write()
698 bio_put(*bio); in f2fs_submit_merged_ipu_write()
699 *bio = NULL; in f2fs_submit_merged_ipu_write()
705 struct bio *bio = *fio->bio; in f2fs_merge_page_bio() local
716 if (bio && (!page_is_mergeable(fio->sbi, bio, *fio->last_block, in f2fs_merge_page_bio()
718 !f2fs_crypt_mergeable_bio(bio, fio->page->mapping->host, in f2fs_merge_page_bio()
720 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL); in f2fs_merge_page_bio()
722 if (!bio) { in f2fs_merge_page_bio()
723 bio = __bio_alloc(fio, BIO_MAX_PAGES); in f2fs_merge_page_bio()
724 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, in f2fs_merge_page_bio()
727 bio_set_op_attrs(bio, fio->op, fio->op_flags); in f2fs_merge_page_bio()
729 add_bio_entry(fio->sbi, bio, page, fio->temp); in f2fs_merge_page_bio()
731 if (add_ipu_page(fio->sbi, &bio, page)) in f2fs_merge_page_bio()
741 *fio->bio = bio; in f2fs_merge_page_bio()
778 if (io->bio && in f2fs_submit_page_write()
779 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, in f2fs_submit_page_write()
781 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host, in f2fs_submit_page_write()
785 if (io->bio == NULL) { in f2fs_submit_page_write()
793 io->bio = __bio_alloc(fio, BIO_MAX_PAGES); in f2fs_submit_page_write()
794 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host, in f2fs_submit_page_write()
800 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_submit_page_write()
828 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, in f2fs_grab_read_bio()
833 struct bio *bio; in f2fs_grab_read_bio() local
837 bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false); in f2fs_grab_read_bio()
838 if (!bio) in f2fs_grab_read_bio()
841 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS); in f2fs_grab_read_bio()
843 f2fs_target_device(sbi, blkaddr, bio); in f2fs_grab_read_bio()
844 bio->bi_end_io = f2fs_read_end_io; in f2fs_grab_read_bio()
845 bio_set_op_attrs(bio, REQ_OP_READ, op_flag); in f2fs_grab_read_bio()
856 bio_put(bio); in f2fs_grab_read_bio()
859 ctx->bio = bio; in f2fs_grab_read_bio()
861 bio->bi_private = ctx; in f2fs_grab_read_bio()
864 return bio; in f2fs_grab_read_bio()
872 struct bio *bio; in f2fs_submit_page_read() local
874 bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index); in f2fs_submit_page_read()
875 if (IS_ERR(bio)) in f2fs_submit_page_read()
876 return PTR_ERR(bio); in f2fs_submit_page_read()
881 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in f2fs_submit_page_read()
882 bio_put(bio); in f2fs_submit_page_read()
887 __submit_bio(sbi, bio, DATA); in f2fs_submit_page_read()
1809 struct bio **bio_ret, in f2fs_read_single_page()
1813 struct bio *bio = *bio_ret; in f2fs_read_single_page() local
1884 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio, in f2fs_read_single_page()
1886 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) { in f2fs_read_single_page()
1888 __submit_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_read_single_page()
1889 bio = NULL; in f2fs_read_single_page()
1891 if (bio == NULL) { in f2fs_read_single_page()
1892 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages, in f2fs_read_single_page()
1894 if (IS_ERR(bio)) { in f2fs_read_single_page()
1895 ret = PTR_ERR(bio); in f2fs_read_single_page()
1896 bio = NULL; in f2fs_read_single_page()
1907 if (bio_add_page(bio, page, blocksize, 0) < blocksize) in f2fs_read_single_page()
1915 if (bio) { in f2fs_read_single_page()
1916 __submit_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_read_single_page()
1917 bio = NULL; in f2fs_read_single_page()
1921 *bio_ret = bio; in f2fs_read_single_page()
1938 struct bio *bio = NULL; in f2fs_mpage_readpages() local
1965 ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio, in f2fs_mpage_readpages()
1977 if (bio) in f2fs_mpage_readpages()
1978 __submit_bio(F2FS_I_SB(inode), bio, DATA); in f2fs_mpage_readpages()
2250 struct bio **bio, in __write_data_page() argument
2277 .bio = bio, in __write_data_page()
2382 f2fs_submit_merged_ipu_write(sbi, bio, NULL); in __write_data_page()
2424 struct bio *bio = NULL; in f2fs_write_cache_pages() local
2512 ret = __write_data_page(page, &submitted, &bio, in f2fs_write_cache_pages()
2563 if (bio) in f2fs_write_cache_pages()
2564 f2fs_submit_merged_ipu_write(sbi, &bio, NULL); in f2fs_write_cache_pages()
2946 static void f2fs_dio_end_io(struct bio *bio) in f2fs_dio_end_io() argument
2948 struct f2fs_private_dio *dio = bio->bi_private; in f2fs_dio_end_io()
2953 bio->bi_private = dio->orig_private; in f2fs_dio_end_io()
2954 bio->bi_end_io = dio->orig_end_io; in f2fs_dio_end_io()
2958 bio_endio(bio); in f2fs_dio_end_io()
2961 static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode, in f2fs_dio_submit_bio() argument
2965 bool write = (bio_op(bio) == REQ_OP_WRITE); in f2fs_dio_submit_bio()
2973 dio->orig_end_io = bio->bi_end_io; in f2fs_dio_submit_bio()
2974 dio->orig_private = bio->bi_private; in f2fs_dio_submit_bio()
2977 bio->bi_end_io = f2fs_dio_end_io; in f2fs_dio_submit_bio()
2978 bio->bi_private = dio; in f2fs_dio_submit_bio()
2983 submit_bio(bio); in f2fs_dio_submit_bio()
2986 bio->bi_status = BLK_STS_IOERR; in f2fs_dio_submit_bio()
2987 bio_endio(bio); in f2fs_dio_submit_bio()