Lines Matching refs:bio
18 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap()
31 bio_get_first_bvec(prev_rq->bio, &pb); in bio_will_gap()
53 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) in req_gap_back_merge() argument
55 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
58 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) in req_gap_front_merge() argument
60 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
63 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split()
64 struct bio *bio, in blk_bio_discard_split() argument
87 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split()
98 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split()
104 return bio_split(bio, split_sectors, GFP_NOIO, bs); in blk_bio_discard_split()
107 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, in blk_bio_write_zeroes_split()
108 struct bio *bio, struct bio_set *bs, unsigned *nsegs) in blk_bio_write_zeroes_split() argument
115 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) in blk_bio_write_zeroes_split()
118 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); in blk_bio_write_zeroes_split()
121 static struct bio *blk_bio_write_same_split(struct request_queue *q, in blk_bio_write_same_split()
122 struct bio *bio, in blk_bio_write_same_split() argument
131 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split()
134 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); in blk_bio_write_same_split()
146 struct bio *bio) in get_max_io_size() argument
148 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0); in get_max_io_size()
152 unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1); in get_max_io_size()
246 static struct bio *blk_bio_segment_split(struct request_queue *q, in blk_bio_segment_split()
247 struct bio *bio, in blk_bio_segment_split() argument
254 const unsigned max_sectors = get_max_io_size(q, bio); in blk_bio_segment_split()
257 bio_for_each_bvec(bv, bio, iter) { in blk_bio_segment_split()
287 if (bio->bi_opf & REQ_NOWAIT) { in blk_bio_segment_split()
288 bio->bi_status = BLK_STS_AGAIN; in blk_bio_segment_split()
289 bio_endio(bio); in blk_bio_segment_split()
300 bio_clear_hipri(bio); in blk_bio_segment_split()
302 return bio_split(bio, sectors, GFP_NOIO, bs); in blk_bio_segment_split()
317 void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) in __blk_queue_split() argument
319 struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue; in __blk_queue_split()
320 struct bio *split = NULL; in __blk_queue_split()
322 switch (bio_op(*bio)) { in __blk_queue_split()
325 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split()
328 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, in __blk_queue_split()
332 split = blk_bio_write_same_split(q, *bio, &q->bio_split, in __blk_queue_split()
341 (*bio)->bi_vcnt == 1 && in __blk_queue_split()
343 (*bio)->bi_io_vec->bv_len <= q->limits.max_segment_size) && in __blk_queue_split()
344 ((*bio)->bi_io_vec[0].bv_len + in __blk_queue_split()
345 (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) { in __blk_queue_split()
347 (*bio)->bi_io_vec[0].bv_len); in __blk_queue_split()
350 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split()
352 *bio = split = NULL; in __blk_queue_split()
360 bio_chain(split, *bio); in __blk_queue_split()
361 trace_block_split(split, (*bio)->bi_iter.bi_sector); in __blk_queue_split()
362 submit_bio_noacct(*bio); in __blk_queue_split()
363 *bio = split; in __blk_queue_split()
365 blk_throtl_charge_bio_split(*bio); in __blk_queue_split()
379 void blk_queue_split(struct bio **bio) in blk_queue_split() argument
383 __blk_queue_split(bio, &nr_segs); in blk_queue_split()
394 if (!rq->bio) in blk_recalc_rq_segments()
397 switch (bio_op(rq->bio)) { in blk_recalc_rq_segments()
401 struct bio *bio = rq->bio; in blk_recalc_rq_segments() local
403 for_each_bio(bio) in blk_recalc_rq_segments()
502 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
511 for_each_bio(bio) { in __blk_bios_map_sg()
512 bio_for_each_bvec(bvec, bio, iter) { in __blk_bios_map_sg()
532 if (likely(bio->bi_iter.bi_size)) { in __blk_bios_map_sg()
552 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) in __blk_rq_map_sg()
553 nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg); in __blk_rq_map_sg()
554 else if (rq->bio) in __blk_rq_map_sg()
555 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); in __blk_rq_map_sg()
577 static inline int ll_new_hw_segment(struct request *req, struct bio *bio, in ll_new_hw_segment() argument
580 if (!blk_cgroup_mergeable(req, bio)) in ll_new_hw_segment()
583 if (blk_integrity_merge_bio(req->q, req, bio) == false) in ll_new_hw_segment()
605 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) in ll_back_merge_fn() argument
607 if (req_gap_back_merge(req, bio)) in ll_back_merge_fn()
610 integrity_req_gap_back_merge(req, bio)) in ll_back_merge_fn()
612 if (!bio_crypt_ctx_back_mergeable(req, bio)) in ll_back_merge_fn()
614 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn()
620 return ll_new_hw_segment(req, bio, nr_segs); in ll_back_merge_fn()
623 static int ll_front_merge_fn(struct request *req, struct bio *bio, in ll_front_merge_fn() argument
626 if (req_gap_front_merge(req, bio)) in ll_front_merge_fn()
629 integrity_req_gap_front_merge(req, bio)) in ll_front_merge_fn()
631 if (!bio_crypt_ctx_front_mergeable(req, bio)) in ll_front_merge_fn()
633 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn()
634 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { in ll_front_merge_fn()
639 return ll_new_hw_segment(req, bio, nr_segs); in ll_front_merge_fn()
649 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge()
665 if (req_gap_back_merge(req, next->bio)) in ll_merge_requests_fn()
679 if (!blk_cgroup_mergeable(req, next->bio)) in ll_merge_requests_fn()
705 struct bio *bio; in blk_rq_set_mixed_merge() local
715 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_set_mixed_merge()
716 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && in blk_rq_set_mixed_merge()
717 (bio->bi_opf & REQ_FAILFAST_MASK) != ff); in blk_rq_set_mixed_merge()
718 bio->bi_opf |= ff; in blk_rq_set_mixed_merge()
761 !blk_write_same_mergeable(req->bio, next->bio)) in attempt_merge()
816 req->biotail->bi_next = next->bio; in attempt_merge()
837 next->bio = NULL; in attempt_merge()
874 bool blk_rq_merge_ok(struct request *rq, struct bio *bio) in blk_rq_merge_ok() argument
876 if (!rq_mergeable(rq) || !bio_mergeable(bio)) in blk_rq_merge_ok()
879 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok()
883 if (bio_data_dir(bio) != rq_data_dir(rq)) in blk_rq_merge_ok()
887 if (rq->rq_disk != bio->bi_bdev->bd_disk) in blk_rq_merge_ok()
891 if (!blk_cgroup_mergeable(rq, bio)) in blk_rq_merge_ok()
895 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()
899 if (!bio_crypt_rq_ctx_compatible(rq, bio)) in blk_rq_merge_ok()
904 !blk_write_same_mergeable(rq->bio, bio)) in blk_rq_merge_ok()
911 if (rq->write_hint != bio->bi_write_hint) in blk_rq_merge_ok()
914 if (rq->ioprio != bio_prio(bio)) in blk_rq_merge_ok()
920 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) in blk_try_merge() argument
924 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) in blk_try_merge()
926 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge()
948 struct bio *bio, unsigned int nr_segs) in bio_attempt_back_merge() argument
950 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_back_merge()
952 if (!ll_back_merge_fn(req, bio, nr_segs)) in bio_attempt_back_merge()
955 trace_block_bio_backmerge(bio); in bio_attempt_back_merge()
956 rq_qos_merge(req->q, req, bio); in bio_attempt_back_merge()
961 req->biotail->bi_next = bio; in bio_attempt_back_merge()
962 req->biotail = bio; in bio_attempt_back_merge()
963 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_back_merge()
965 bio_crypt_free_ctx(bio); in bio_attempt_back_merge()
972 struct bio *bio, unsigned int nr_segs) in bio_attempt_front_merge() argument
974 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_front_merge()
976 if (!ll_front_merge_fn(req, bio, nr_segs)) in bio_attempt_front_merge()
979 trace_block_bio_frontmerge(bio); in bio_attempt_front_merge()
980 rq_qos_merge(req->q, req, bio); in bio_attempt_front_merge()
985 bio->bi_next = req->bio; in bio_attempt_front_merge()
986 req->bio = bio; in bio_attempt_front_merge()
988 req->__sector = bio->bi_iter.bi_sector; in bio_attempt_front_merge()
989 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_front_merge()
991 bio_crypt_do_front_merge(req, bio); in bio_attempt_front_merge()
998 struct request *req, struct bio *bio) in bio_attempt_discard_merge() argument
1004 if (blk_rq_sectors(req) + bio_sectors(bio) > in bio_attempt_discard_merge()
1008 rq_qos_merge(q, req, bio); in bio_attempt_discard_merge()
1010 req->biotail->bi_next = bio; in bio_attempt_discard_merge()
1011 req->biotail = bio; in bio_attempt_discard_merge()
1012 req->__data_len += bio->bi_iter.bi_size; in bio_attempt_discard_merge()
1024 struct bio *bio, in blk_attempt_bio_merge() argument
1028 if (!blk_rq_merge_ok(rq, bio)) in blk_attempt_bio_merge()
1031 switch (blk_try_merge(rq, bio)) { in blk_attempt_bio_merge()
1033 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1034 return bio_attempt_back_merge(rq, bio, nr_segs); in blk_attempt_bio_merge()
1037 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1038 return bio_attempt_front_merge(rq, bio, nr_segs); in blk_attempt_bio_merge()
1041 return bio_attempt_discard_merge(q, rq, bio); in blk_attempt_bio_merge()
1071 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1078 plug = blk_mq_plug(q, bio); in blk_attempt_plug_merge()
1097 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == in blk_attempt_plug_merge()
1110 struct bio *bio, unsigned int nr_segs) in blk_bio_list_merge() argument
1119 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { in blk_bio_list_merge()
1134 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_try_merge() argument
1139 switch (elv_merge(q, &rq, bio)) { in blk_mq_sched_try_merge()
1141 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1143 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK) in blk_mq_sched_try_merge()
1150 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1152 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK) in blk_mq_sched_try_merge()
1159 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; in blk_mq_sched_try_merge()