• Home
  • Raw
  • Download

Lines Matching refs:q

19 static inline bool bio_will_gap(struct request_queue *q,  in bio_will_gap()  argument
24 if (!bio_has_data(prev) || !queue_virt_boundary(q)) in bio_will_gap()
36 if (pb.bv_offset & queue_virt_boundary(q)) in bio_will_gap()
50 if (biovec_phys_mergeable(q, &pb, &nb)) in bio_will_gap()
52 return __bvec_gap_to_prev(q, &pb, nb.bv_offset); in bio_will_gap()
57 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
62 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
65 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split() argument
78 granularity = max(q->limits.discard_granularity >> 9, 1U); in blk_bio_discard_split()
80 max_discard_sectors = min(q->limits.max_discard_sectors, in blk_bio_discard_split()
81 bio_allowed_max_sectors(q)); in blk_bio_discard_split()
98 alignment = (q->limits.discard_alignment >> 9) % granularity; in blk_bio_discard_split()
109 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, in blk_bio_write_zeroes_split() argument
114 if (!q->limits.max_write_zeroes_sectors) in blk_bio_write_zeroes_split()
117 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) in blk_bio_write_zeroes_split()
120 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); in blk_bio_write_zeroes_split()
123 static struct bio *blk_bio_write_same_split(struct request_queue *q, in blk_bio_write_same_split() argument
130 if (!q->limits.max_write_same_sectors) in blk_bio_write_same_split()
133 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split()
136 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); in blk_bio_write_same_split()
147 static inline unsigned get_max_io_size(struct request_queue *q, in get_max_io_size() argument
150 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0); in get_max_io_size()
152 unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT; in get_max_io_size()
153 unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT; in get_max_io_size()
164 static inline unsigned get_max_segment_size(const struct request_queue *q, in get_max_segment_size() argument
168 unsigned long mask = queue_segment_boundary(q); in get_max_segment_size()
177 (unsigned long)queue_max_segment_size(q)); in get_max_segment_size()
200 static bool bvec_split_segs(const struct request_queue *q, in bvec_split_segs() argument
211 seg_size = get_max_segment_size(q, bv->bv_page, in bvec_split_segs()
219 if ((bv->bv_offset + total_len) & queue_virt_boundary(q)) in bvec_split_segs()
248 static struct bio *blk_bio_segment_split(struct request_queue *q, in blk_bio_segment_split() argument
256 const unsigned max_sectors = get_max_io_size(q, bio); in blk_bio_segment_split()
257 const unsigned max_segs = queue_max_segments(q); in blk_bio_segment_split()
264 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) in blk_bio_segment_split()
272 } else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs, in blk_bio_segment_split()
303 struct request_queue *q = (*bio)->bi_disk->queue; in __blk_queue_split() local
309 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split()
312 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, in __blk_queue_split()
316 split = blk_bio_write_same_split(q, *bio, &q->bio_split, in __blk_queue_split()
328 if (!q->limits.chunk_sectors && in __blk_queue_split()
335 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); in __blk_queue_split()
344 trace_block_split(q, split, (*bio)->bi_iter.bi_sector); in __blk_queue_split()
383 if (queue_max_discard_segments(rq->q) > 1) { in blk_recalc_rq_segments()
398 bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors, in blk_recalc_rq_segments()
420 static unsigned blk_bvec_map_sg(struct request_queue *q, in blk_bvec_map_sg() argument
429 unsigned len = min(get_max_segment_size(q, bvec->bv_page, in blk_bvec_map_sg()
465 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, in __blk_segment_map_sg_merge() argument
474 if ((*sg)->length + nbytes > queue_max_segment_size(q)) in __blk_segment_map_sg_merge()
477 if (!biovec_phys_mergeable(q, bvprv, bvec)) in __blk_segment_map_sg_merge()
485 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
502 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) in __blk_bios_map_sg()
508 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); in __blk_bios_map_sg()
525 int __blk_rq_map_sg(struct request_queue *q, struct request *rq, in __blk_rq_map_sg() argument
535 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); in __blk_rq_map_sg()
553 return queue_max_discard_segments(rq->q); in blk_rq_get_max_segments()
554 return queue_max_segments(rq->q); in blk_rq_get_max_segments()
563 if (blk_integrity_merge_bio(req->q, req, bio) == false) in ll_new_hw_segment()
581 req_set_nomerge(req->q, req); in ll_new_hw_segment()
596 req_set_nomerge(req->q, req); in ll_back_merge_fn()
615 req_set_nomerge(req->q, req); in ll_front_merge_fn()
622 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, in req_attempt_discard_merge() argument
627 if (segments >= queue_max_discard_segments(q)) in req_attempt_discard_merge()
636 req_set_nomerge(q, req); in req_attempt_discard_merge()
640 static int ll_merge_requests_fn(struct request_queue *q, struct request *req, in ll_merge_requests_fn() argument
662 if (blk_integrity_merge_rq(q, req, next) == false) in ll_merge_requests_fn()
729 static struct request *attempt_merge(struct request_queue *q, in attempt_merge() argument
766 if (!req_attempt_discard_merge(q, req, next)) in attempt_merge()
770 if (!ll_merge_requests_fn(q, req, next)) in attempt_merge()
804 elv_merge_requests(q, req, next); in attempt_merge()
813 trace_block_rq_merge(q, next); in attempt_merge()
823 static struct request *attempt_back_merge(struct request_queue *q, in attempt_back_merge() argument
826 struct request *next = elv_latter_request(q, rq); in attempt_back_merge()
829 return attempt_merge(q, rq, next); in attempt_back_merge()
834 static struct request *attempt_front_merge(struct request_queue *q, in attempt_front_merge() argument
837 struct request *prev = elv_former_request(q, rq); in attempt_front_merge()
840 return attempt_merge(q, prev, rq); in attempt_front_merge()
845 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, in blk_attempt_req_merge() argument
850 free = attempt_merge(q, rq, next); in blk_attempt_req_merge()
880 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()
940 trace_block_bio_backmerge(req->q, req, bio); in bio_attempt_back_merge()
941 rq_qos_merge(req->q, req, bio); in bio_attempt_back_merge()
964 trace_block_bio_frontmerge(req->q, req, bio); in bio_attempt_front_merge()
965 rq_qos_merge(req->q, req, bio); in bio_attempt_front_merge()
982 static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q, in bio_attempt_discard_merge() argument
987 if (segments >= queue_max_discard_segments(q)) in bio_attempt_discard_merge()
993 rq_qos_merge(q, req, bio); in bio_attempt_discard_merge()
1003 req_set_nomerge(q, req); in bio_attempt_discard_merge()
1007 static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, in blk_attempt_bio_merge() argument
1018 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1022 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) in blk_attempt_bio_merge()
1026 return bio_attempt_discard_merge(q, rq, bio); in blk_attempt_bio_merge()
1056 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, in blk_attempt_plug_merge() argument
1063 plug = blk_mq_plug(q, bio); in blk_attempt_plug_merge()
1070 if (rq->q == q && same_queue_rq) { in blk_attempt_plug_merge()
1079 if (rq->q != q) in blk_attempt_plug_merge()
1082 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == in blk_attempt_plug_merge()
1094 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, in blk_bio_list_merge() argument
1104 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { in blk_bio_list_merge()
1119 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_try_merge() argument
1124 switch (elv_merge(q, &rq, bio)) { in blk_mq_sched_try_merge()
1126 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1130 *merged_request = attempt_back_merge(q, rq); in blk_mq_sched_try_merge()
1132 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); in blk_mq_sched_try_merge()
1135 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
1139 *merged_request = attempt_front_merge(q, rq); in blk_mq_sched_try_merge()
1141 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); in blk_mq_sched_try_merge()
1144 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; in blk_mq_sched_try_merge()