Lines Matching refs:q
52 static inline bool bio_will_gap(struct request_queue *q, in bio_will_gap() argument
57 if (!bio_has_data(prev) || !queue_virt_boundary(q)) in bio_will_gap()
69 if (pb.bv_offset & queue_virt_boundary(q)) in bio_will_gap()
83 if (biovec_phys_mergeable(q, &pb, &nb)) in bio_will_gap()
85 return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset); in bio_will_gap()
90 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
95 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
426 if (queue_max_discard_segments(rq->q) > 1) { in blk_recalc_rq_segments()
441 bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes, in blk_recalc_rq_segments()
463 static unsigned blk_bvec_map_sg(struct request_queue *q, in blk_bvec_map_sg() argument
472 unsigned len = min(get_max_segment_size(&q->limits, in blk_bvec_map_sg()
508 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, in __blk_segment_map_sg_merge() argument
517 if ((*sg)->length + nbytes > queue_max_segment_size(q)) in __blk_segment_map_sg_merge()
520 if (!biovec_phys_mergeable(q, bvprv, bvec)) in __blk_segment_map_sg_merge()
528 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
545 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) in __blk_bios_map_sg()
549 (!blk_queue_sub_page_limits(&q->limits) || in __blk_bios_map_sg()
550 bvec.bv_len <= q->limits.max_segment_size)) in __blk_bios_map_sg()
554 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); in __blk_bios_map_sg()
571 int __blk_rq_map_sg(struct request_queue *q, struct request *rq, in __blk_rq_map_sg() argument
579 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); in __blk_rq_map_sg()
597 struct request_queue *q = rq->q; in blk_rq_get_max_sectors() local
601 return q->limits.max_hw_sectors; in blk_rq_get_max_sectors()
603 max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); in blk_rq_get_max_sectors()
604 if (!q->limits.chunk_sectors || in blk_rq_get_max_sectors()
609 blk_chunk_sectors_left(offset, q->limits.chunk_sectors)); in blk_rq_get_max_sectors()
618 if (blk_integrity_merge_bio(req->q, req, bio) == false) in ll_new_hw_segment()
636 req_set_nomerge(req->q, req); in ll_new_hw_segment()
651 req_set_nomerge(req->q, req); in ll_back_merge_fn()
670 req_set_nomerge(req->q, req); in ll_front_merge_fn()
677 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, in req_attempt_discard_merge() argument
682 if (segments >= queue_max_discard_segments(q)) in req_attempt_discard_merge()
691 req_set_nomerge(q, req); in req_attempt_discard_merge()
695 static int ll_merge_requests_fn(struct request_queue *q, struct request *req, in ll_merge_requests_fn() argument
717 if (blk_integrity_merge_rq(q, req, next) == false) in ll_merge_requests_fn()
811 static struct request *attempt_merge(struct request_queue *q, in attempt_merge() argument
840 if (!req_attempt_discard_merge(q, req, next)) in attempt_merge()
844 if (!ll_merge_requests_fn(q, req, next)) in attempt_merge()
878 elv_merge_requests(q, req, next); in attempt_merge()
897 static struct request *attempt_back_merge(struct request_queue *q, in attempt_back_merge() argument
900 struct request *next = elv_latter_request(q, rq); in attempt_back_merge()
903 return attempt_merge(q, rq, next); in attempt_back_merge()
908 static struct request *attempt_front_merge(struct request_queue *q, in attempt_front_merge() argument
911 struct request *prev = elv_former_request(q, rq); in attempt_front_merge()
914 return attempt_merge(q, prev, rq); in attempt_front_merge()
924 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, in blk_attempt_req_merge() argument
927 return attempt_merge(q, rq, next); in blk_attempt_req_merge()
947 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()
1000 rq_qos_merge(req->q, req, bio); in bio_attempt_back_merge()
1026 rq_qos_merge(req->q, req, bio); in bio_attempt_front_merge()
1045 static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q, in bio_attempt_discard_merge() argument
1050 if (segments >= queue_max_discard_segments(q)) in bio_attempt_discard_merge()
1056 rq_qos_merge(q, req, bio); in bio_attempt_discard_merge()
1066 req_set_nomerge(q, req); in bio_attempt_discard_merge()
1070 static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, in blk_attempt_bio_merge() argument
1081 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1085 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1089 return bio_attempt_discard_merge(q, rq, bio); in blk_attempt_bio_merge()
1117 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1128 if (rq->q == q) { in blk_attempt_plug_merge()
1129 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == in blk_attempt_plug_merge()
1149 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, in blk_bio_list_merge() argument
1159 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { in blk_bio_list_merge()
1174 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_try_merge() argument
1179 switch (elv_merge(q, &rq, bio)) { in blk_mq_sched_try_merge()
1181 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1185 *merged_request = attempt_back_merge(q, rq); in blk_mq_sched_try_merge()
1187 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); in blk_mq_sched_try_merge()
1190 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1194 *merged_request = attempt_front_merge(q, rq); in blk_mq_sched_try_merge()
1196 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); in blk_mq_sched_try_merge()
1199 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; in blk_mq_sched_try_merge()