• Home
  • Raw
  • Download

Lines Matching refs:q

12 static struct bio *blk_bio_discard_split(struct request_queue *q,  in blk_bio_discard_split()  argument
25 granularity = max(q->limits.discard_granularity >> 9, 1U); in blk_bio_discard_split()
27 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); in blk_bio_discard_split()
44 alignment = (q->limits.discard_alignment >> 9) % granularity; in blk_bio_discard_split()
55 static struct bio *blk_bio_write_same_split(struct request_queue *q, in blk_bio_write_same_split() argument
62 if (!q->limits.max_write_same_sectors) in blk_bio_write_same_split()
65 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split()
68 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); in blk_bio_write_same_split()
71 static inline unsigned get_max_io_size(struct request_queue *q, in get_max_io_size() argument
74 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); in get_max_io_size()
75 unsigned mask = queue_logical_block_size(q) - 1; in get_max_io_size()
83 static struct bio *blk_bio_segment_split(struct request_queue *q, in blk_bio_segment_split() argument
94 const unsigned max_sectors = get_max_io_size(q, bio); in blk_bio_segment_split()
123 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) in blk_bio_segment_split()
131 if (nsegs < queue_max_segments(q) && in blk_bio_segment_split()
141 if (bvprvp && blk_queue_cluster(q)) { in blk_bio_segment_split()
142 if (seg_size + bv.bv_len > queue_max_segment_size(q)) in blk_bio_segment_split()
146 if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) in blk_bio_segment_split()
159 if (nsegs == queue_max_segments(q)) in blk_bio_segment_split()
189 void blk_queue_split(struct request_queue *q, struct bio **bio, in blk_queue_split() argument
196 split = blk_bio_discard_split(q, *bio, bs, &nsegs); in blk_queue_split()
198 split = blk_bio_write_same_split(q, *bio, bs, &nsegs); in blk_queue_split()
200 split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); in blk_queue_split()
218 static unsigned int __blk_recalc_rq_segments(struct request_queue *q, in __blk_recalc_rq_segments() argument
242 cluster = blk_queue_cluster(q); in __blk_recalc_rq_segments()
256 > queue_max_segment_size(q)) in __blk_recalc_rq_segments()
260 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) in __blk_recalc_rq_segments()
291 &rq->q->queue_flags); in blk_recalc_rq_segments()
293 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments()
297 void blk_recount_segments(struct request_queue *q, struct bio *bio) in blk_recount_segments() argument
301 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && in blk_recount_segments()
302 (seg_cnt < queue_max_segments(q))) in blk_recount_segments()
308 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); in blk_recount_segments()
316 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, in blk_phys_contig_segment() argument
322 if (!blk_queue_cluster(q)) in blk_phys_contig_segment()
326 queue_max_segment_size(q)) in blk_phys_contig_segment()
345 if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) in blk_phys_contig_segment()
352 __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, in __blk_segment_map_sg() argument
360 if ((*sg)->length + nbytes > queue_max_segment_size(q)) in __blk_segment_map_sg()
365 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) in __blk_segment_map_sg()
394 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, in __blk_bios_map_sg() argument
403 cluster = blk_queue_cluster(q); in __blk_bios_map_sg()
430 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, in __blk_bios_map_sg()
440 int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg() argument
447 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); in blk_rq_map_sg()
450 (blk_rq_bytes(rq) & q->dma_pad_mask)) { in blk_rq_map_sg()
452 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; in blk_rq_map_sg()
458 if (q->dma_drain_size && q->dma_drain_needed(rq)) { in blk_rq_map_sg()
460 memset(q->dma_drain_buffer, 0, q->dma_drain_size); in blk_rq_map_sg()
464 sg_set_page(sg, virt_to_page(q->dma_drain_buffer), in blk_rq_map_sg()
465 q->dma_drain_size, in blk_rq_map_sg()
466 ((unsigned long)q->dma_drain_buffer) & in blk_rq_map_sg()
469 rq->extra_len += q->dma_drain_size; in blk_rq_map_sg()
485 static inline int ll_new_hw_segment(struct request_queue *q, in ll_new_hw_segment() argument
489 int nr_phys_segs = bio_phys_segments(q, bio); in ll_new_hw_segment()
491 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) in ll_new_hw_segment()
494 if (blk_integrity_merge_bio(q, req, bio) == false) in ll_new_hw_segment()
506 if (req == q->last_merge) in ll_new_hw_segment()
507 q->last_merge = NULL; in ll_new_hw_segment()
511 int ll_back_merge_fn(struct request_queue *q, struct request *req, in ll_back_merge_fn() argument
522 if (req == q->last_merge) in ll_back_merge_fn()
523 q->last_merge = NULL; in ll_back_merge_fn()
527 blk_recount_segments(q, req->biotail); in ll_back_merge_fn()
529 blk_recount_segments(q, bio); in ll_back_merge_fn()
531 return ll_new_hw_segment(q, req, bio); in ll_back_merge_fn()
534 int ll_front_merge_fn(struct request_queue *q, struct request *req, in ll_front_merge_fn() argument
546 if (req == q->last_merge) in ll_front_merge_fn()
547 q->last_merge = NULL; in ll_front_merge_fn()
551 blk_recount_segments(q, bio); in ll_front_merge_fn()
553 blk_recount_segments(q, req->bio); in ll_front_merge_fn()
555 return ll_new_hw_segment(q, req, bio); in ll_front_merge_fn()
564 struct request_queue *q = req->q; in req_no_special_merge() local
566 return !q->mq_ops && req->special; in req_no_special_merge()
569 static int ll_merge_requests_fn(struct request_queue *q, struct request *req, in ll_merge_requests_fn() argument
594 if (blk_phys_contig_segment(q, req->biotail, next->bio)) { in ll_merge_requests_fn()
602 if (total_phys_segments > queue_max_segments(q)) in ll_merge_requests_fn()
605 if (blk_integrity_merge_rq(q, req, next) == false) in ll_merge_requests_fn()
663 static int attempt_merge(struct request_queue *q, struct request *req, in attempt_merge() argument
693 if (!ll_merge_requests_fn(q, req, next)) in attempt_merge()
723 elv_merge_requests(q, req, next); in attempt_merge()
736 __blk_put_request(q, next); in attempt_merge()
740 int attempt_back_merge(struct request_queue *q, struct request *rq) in attempt_back_merge() argument
742 struct request *next = elv_latter_request(q, rq); in attempt_back_merge()
745 return attempt_merge(q, rq, next); in attempt_back_merge()
750 int attempt_front_merge(struct request_queue *q, struct request *rq) in attempt_front_merge() argument
752 struct request *prev = elv_former_request(q, rq); in attempt_front_merge()
755 return attempt_merge(q, prev, rq); in attempt_front_merge()
760 int blk_attempt_req_merge(struct request_queue *q, struct request *rq, in blk_attempt_req_merge() argument
763 return attempt_merge(q, rq, next); in blk_attempt_req_merge()
783 if (blk_integrity_merge_bio(rq->q, rq, bio) == false) in blk_rq_merge_ok()