1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to segment and merge handling
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10 #ifndef __GENKSYMS__
11 #include <linux/blk-cgroup.h>
12 #endif
13
14 #include <trace/events/block.h>
15
16 #include "blk.h"
17 #include "blk-rq-qos.h"
18
bio_will_gap(struct request_queue * q,struct request * prev_rq,struct bio * prev,struct bio * next)19 static inline bool bio_will_gap(struct request_queue *q,
20 struct request *prev_rq, struct bio *prev, struct bio *next)
21 {
22 struct bio_vec pb, nb;
23
24 if (!bio_has_data(prev) || !queue_virt_boundary(q))
25 return false;
26
27 /*
28 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
29 * is quite difficult to respect the sg gap limit. We work hard to
30 * merge a huge number of small single bios in case of mkfs.
31 */
32 if (prev_rq)
33 bio_get_first_bvec(prev_rq->bio, &pb);
34 else
35 bio_get_first_bvec(prev, &pb);
36 if (pb.bv_offset & queue_virt_boundary(q))
37 return true;
38
39 /*
40 * We don't need to worry about the situation that the merged segment
41 * ends in unaligned virt boundary:
42 *
43 * - if 'pb' ends aligned, the merged segment ends aligned
44 * - if 'pb' ends unaligned, the next bio must include
45 * one single bvec of 'nb', otherwise the 'nb' can't
46 * merge with 'pb'
47 */
48 bio_get_last_bvec(prev, &pb);
49 bio_get_first_bvec(next, &nb);
50 if (biovec_phys_mergeable(q, &pb, &nb))
51 return false;
52 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
53 }
54
req_gap_back_merge(struct request * req,struct bio * bio)55 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
56 {
57 return bio_will_gap(req->q, req, req->biotail, bio);
58 }
59
req_gap_front_merge(struct request * req,struct bio * bio)60 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
61 {
62 return bio_will_gap(req->q, NULL, bio, req->bio);
63 }
64
blk_bio_discard_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * nsegs)65 static struct bio *blk_bio_discard_split(struct request_queue *q,
66 struct bio *bio,
67 struct bio_set *bs,
68 unsigned *nsegs)
69 {
70 unsigned int max_discard_sectors, granularity;
71 int alignment;
72 sector_t tmp;
73 unsigned split_sectors;
74
75 *nsegs = 1;
76
77 /* Zero-sector (unknown) and one-sector granularities are the same. */
78 granularity = max(q->limits.discard_granularity >> 9, 1U);
79
80 max_discard_sectors = min(q->limits.max_discard_sectors,
81 bio_allowed_max_sectors(q));
82 max_discard_sectors -= max_discard_sectors % granularity;
83
84 if (unlikely(!max_discard_sectors)) {
85 /* XXX: warn */
86 return NULL;
87 }
88
89 if (bio_sectors(bio) <= max_discard_sectors)
90 return NULL;
91
92 split_sectors = max_discard_sectors;
93
94 /*
95 * If the next starting sector would be misaligned, stop the discard at
96 * the previous aligned sector.
97 */
98 alignment = (q->limits.discard_alignment >> 9) % granularity;
99
100 tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
101 tmp = sector_div(tmp, granularity);
102
103 if (split_sectors > tmp)
104 split_sectors -= tmp;
105
106 return bio_split(bio, split_sectors, GFP_NOIO, bs);
107 }
108
blk_bio_write_zeroes_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * nsegs)109 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
110 struct bio *bio, struct bio_set *bs, unsigned *nsegs)
111 {
112 *nsegs = 0;
113
114 if (!q->limits.max_write_zeroes_sectors)
115 return NULL;
116
117 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
118 return NULL;
119
120 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
121 }
122
blk_bio_write_same_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * nsegs)123 static struct bio *blk_bio_write_same_split(struct request_queue *q,
124 struct bio *bio,
125 struct bio_set *bs,
126 unsigned *nsegs)
127 {
128 *nsegs = 1;
129
130 if (!q->limits.max_write_same_sectors)
131 return NULL;
132
133 if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
134 return NULL;
135
136 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
137 }
138
139 /*
140 * Return the maximum number of sectors from the start of a bio that may be
141 * submitted as a single request to a block device. If enough sectors remain,
142 * align the end to the physical block size. Otherwise align the end to the
143 * logical block size. This approach minimizes the number of non-aligned
144 * requests that are submitted to a block device if the start of a bio is not
145 * aligned to a physical block boundary.
146 */
get_max_io_size(struct request_queue * q,struct bio * bio)147 static inline unsigned get_max_io_size(struct request_queue *q,
148 struct bio *bio)
149 {
150 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
151 unsigned max_sectors = sectors;
152 unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
153 unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
154 unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
155
156 max_sectors += start_offset;
157 max_sectors &= ~(pbs - 1);
158 if (max_sectors > start_offset)
159 return max_sectors - start_offset;
160
161 return sectors & ~(lbs - 1);
162 }
163
get_max_segment_size(const struct request_queue * q,struct page * start_page,unsigned long offset)164 static inline unsigned get_max_segment_size(const struct request_queue *q,
165 struct page *start_page,
166 unsigned long offset)
167 {
168 unsigned long mask = queue_segment_boundary(q);
169
170 offset = mask & (page_to_phys(start_page) + offset);
171
172 /*
173 * overflow may be triggered in case of zero page physical address
174 * on 32bit arch, use queue's max segment size when that happens.
175 */
176 return min_not_zero(mask - offset + 1,
177 (unsigned long)queue_max_segment_size(q));
178 }
179
180 /**
181 * bvec_split_segs - verify whether or not a bvec should be split in the middle
182 * @q: [in] request queue associated with the bio associated with @bv
183 * @bv: [in] bvec to examine
184 * @nsegs: [in,out] Number of segments in the bio being built. Incremented
185 * by the number of segments from @bv that may be appended to that
186 * bio without exceeding @max_segs
187 * @sectors: [in,out] Number of sectors in the bio being built. Incremented
188 * by the number of sectors from @bv that may be appended to that
189 * bio without exceeding @max_sectors
190 * @max_segs: [in] upper bound for *@nsegs
191 * @max_sectors: [in] upper bound for *@sectors
192 *
193 * When splitting a bio, it can happen that a bvec is encountered that is too
194 * big to fit in a single segment and hence that it has to be split in the
195 * middle. This function verifies whether or not that should happen. The value
196 * %true is returned if and only if appending the entire @bv to a bio with
197 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
198 * the block driver.
199 */
bvec_split_segs(const struct request_queue * q,const struct bio_vec * bv,unsigned * nsegs,unsigned * sectors,unsigned max_segs,unsigned max_sectors)200 static bool bvec_split_segs(const struct request_queue *q,
201 const struct bio_vec *bv, unsigned *nsegs,
202 unsigned *sectors, unsigned max_segs,
203 unsigned max_sectors)
204 {
205 unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
206 unsigned len = min(bv->bv_len, max_len);
207 unsigned total_len = 0;
208 unsigned seg_size = 0;
209
210 while (len && *nsegs < max_segs) {
211 seg_size = get_max_segment_size(q, bv->bv_page,
212 bv->bv_offset + total_len);
213 seg_size = min(seg_size, len);
214
215 (*nsegs)++;
216 total_len += seg_size;
217 len -= seg_size;
218
219 if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
220 break;
221 }
222
223 *sectors += total_len >> 9;
224
225 /* tell the caller to split the bvec if it is too big to fit */
226 return len > 0 || bv->bv_len > max_len;
227 }
228
229 /**
230 * blk_bio_segment_split - split a bio in two bios
231 * @q: [in] request queue pointer
232 * @bio: [in] bio to be split
233 * @bs: [in] bio set to allocate the clone from
234 * @segs: [out] number of segments in the bio with the first half of the sectors
235 *
236 * Clone @bio, update the bi_iter of the clone to represent the first sectors
237 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
238 * following is guaranteed for the cloned bio:
239 * - That it has at most get_max_io_size(@q, @bio) sectors.
240 * - That it has at most queue_max_segments(@q) segments.
241 *
242 * Except for discard requests the cloned bio will point at the bi_io_vec of
243 * the original bio. It is the responsibility of the caller to ensure that the
244 * original bio is not freed before the cloned bio. The caller is also
245 * responsible for ensuring that @bs is only destroyed after processing of the
246 * split bio has finished.
247 */
blk_bio_segment_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * segs)248 static struct bio *blk_bio_segment_split(struct request_queue *q,
249 struct bio *bio,
250 struct bio_set *bs,
251 unsigned *segs)
252 {
253 struct bio_vec bv, bvprv, *bvprvp = NULL;
254 struct bvec_iter iter;
255 unsigned nsegs = 0, sectors = 0;
256 const unsigned max_sectors = get_max_io_size(q, bio);
257 const unsigned max_segs = queue_max_segments(q);
258
259 bio_for_each_bvec(bv, bio, iter) {
260 /*
261 * If the queue doesn't support SG gaps and adding this
262 * offset would create a gap, disallow it.
263 */
264 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
265 goto split;
266
267 if (nsegs < max_segs &&
268 sectors + (bv.bv_len >> 9) <= max_sectors &&
269 bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
270 nsegs++;
271 sectors += bv.bv_len >> 9;
272 } else if (bvec_split_segs(q, &bv, &nsegs, §ors, max_segs,
273 max_sectors)) {
274 goto split;
275 }
276
277 bvprv = bv;
278 bvprvp = &bvprv;
279 }
280
281 *segs = nsegs;
282 return NULL;
283 split:
284 *segs = nsegs;
285 return bio_split(bio, sectors, GFP_NOIO, bs);
286 }
287
288 /**
289 * __blk_queue_split - split a bio and submit the second half
290 * @bio: [in, out] bio to be split
291 * @nr_segs: [out] number of segments in the first bio
292 *
293 * Split a bio into two bios, chain the two bios, submit the second half and
294 * store a pointer to the first half in *@bio. If the second bio is still too
295 * big it will be split by a recursive call to this function. Since this
296 * function may allocate a new bio from @bio->bi_disk->queue->bio_split, it is
297 * the responsibility of the caller to ensure that
298 * @bio->bi_disk->queue->bio_split is only released after processing of the
299 * split bio has finished.
300 */
__blk_queue_split(struct bio ** bio,unsigned int * nr_segs)301 void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
302 {
303 struct request_queue *q = (*bio)->bi_disk->queue;
304 struct bio *split = NULL;
305
306 switch (bio_op(*bio)) {
307 case REQ_OP_DISCARD:
308 case REQ_OP_SECURE_ERASE:
309 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
310 break;
311 case REQ_OP_WRITE_ZEROES:
312 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
313 nr_segs);
314 break;
315 case REQ_OP_WRITE_SAME:
316 split = blk_bio_write_same_split(q, *bio, &q->bio_split,
317 nr_segs);
318 break;
319 default:
320 /*
321 * All drivers must accept single-segments bios that are <=
322 * PAGE_SIZE. This is a quick and dirty check that relies on
323 * the fact that bi_io_vec[0] is always valid if a bio has data.
324 * The check might lead to occasional false negatives when bios
325 * are cloned, but compared to the performance impact of cloned
326 * bios themselves the loop below doesn't matter anyway.
327 */
328 if (!q->limits.chunk_sectors &&
329 (*bio)->bi_vcnt == 1 &&
330 ((*bio)->bi_io_vec[0].bv_len +
331 (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
332 *nr_segs = 1;
333 break;
334 }
335 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
336 break;
337 }
338
339 if (split) {
340 /* there isn't chance to merge the splitted bio */
341 split->bi_opf |= REQ_NOMERGE;
342
343 bio_chain(split, *bio);
344 trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
345 submit_bio_noacct(*bio);
346 *bio = split;
347
348 blk_throtl_charge_bio_split(*bio);
349 }
350 }
351
352 /**
353 * blk_queue_split - split a bio and submit the second half
354 * @bio: [in, out] bio to be split
355 *
356 * Split a bio into two bios, chains the two bios, submit the second half and
357 * store a pointer to the first half in *@bio. Since this function may allocate
358 * a new bio from @bio->bi_disk->queue->bio_split, it is the responsibility of
359 * the caller to ensure that @bio->bi_disk->queue->bio_split is only released
360 * after processing of the split bio has finished.
361 */
blk_queue_split(struct bio ** bio)362 void blk_queue_split(struct bio **bio)
363 {
364 unsigned int nr_segs;
365
366 __blk_queue_split(bio, &nr_segs);
367 }
368 EXPORT_SYMBOL(blk_queue_split);
369
blk_recalc_rq_segments(struct request * rq)370 unsigned int blk_recalc_rq_segments(struct request *rq)
371 {
372 unsigned int nr_phys_segs = 0;
373 unsigned int nr_sectors = 0;
374 struct req_iterator iter;
375 struct bio_vec bv;
376
377 if (!rq->bio)
378 return 0;
379
380 switch (bio_op(rq->bio)) {
381 case REQ_OP_DISCARD:
382 case REQ_OP_SECURE_ERASE:
383 if (queue_max_discard_segments(rq->q) > 1) {
384 struct bio *bio = rq->bio;
385
386 for_each_bio(bio)
387 nr_phys_segs++;
388 return nr_phys_segs;
389 }
390 return 1;
391 case REQ_OP_WRITE_ZEROES:
392 return 0;
393 case REQ_OP_WRITE_SAME:
394 return 1;
395 }
396
397 rq_for_each_bvec(bv, rq, iter)
398 bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
399 UINT_MAX, UINT_MAX);
400 return nr_phys_segs;
401 }
402
blk_next_sg(struct scatterlist ** sg,struct scatterlist * sglist)403 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
404 struct scatterlist *sglist)
405 {
406 if (!*sg)
407 return sglist;
408
409 /*
410 * If the driver previously mapped a shorter list, we could see a
411 * termination bit prematurely unless it fully inits the sg table
412 * on each mapping. We KNOW that there must be more entries here
413 * or the driver would be buggy, so force clear the termination bit
414 * to avoid doing a full sg_init_table() in drivers for each command.
415 */
416 sg_unmark_end(*sg);
417 return sg_next(*sg);
418 }
419
blk_bvec_map_sg(struct request_queue * q,struct bio_vec * bvec,struct scatterlist * sglist,struct scatterlist ** sg)420 static unsigned blk_bvec_map_sg(struct request_queue *q,
421 struct bio_vec *bvec, struct scatterlist *sglist,
422 struct scatterlist **sg)
423 {
424 unsigned nbytes = bvec->bv_len;
425 unsigned nsegs = 0, total = 0;
426
427 while (nbytes > 0) {
428 unsigned offset = bvec->bv_offset + total;
429 unsigned len = min(get_max_segment_size(q, bvec->bv_page,
430 offset), nbytes);
431 struct page *page = bvec->bv_page;
432
433 /*
434 * Unfortunately a fair number of drivers barf on scatterlists
435 * that have an offset larger than PAGE_SIZE, despite other
436 * subsystems dealing with that invariant just fine. For now
437 * stick to the legacy format where we never present those from
438 * the block layer, but the code below should be removed once
439 * these offenders (mostly MMC/SD drivers) are fixed.
440 */
441 page += (offset >> PAGE_SHIFT);
442 offset &= ~PAGE_MASK;
443
444 *sg = blk_next_sg(sg, sglist);
445 sg_set_page(*sg, page, len, offset);
446
447 total += len;
448 nbytes -= len;
449 nsegs++;
450 }
451
452 return nsegs;
453 }
454
__blk_bvec_map_sg(struct bio_vec bv,struct scatterlist * sglist,struct scatterlist ** sg)455 static inline int __blk_bvec_map_sg(struct bio_vec bv,
456 struct scatterlist *sglist, struct scatterlist **sg)
457 {
458 *sg = blk_next_sg(sg, sglist);
459 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
460 return 1;
461 }
462
463 /* only try to merge bvecs into one sg if they are from two bios */
464 static inline bool
__blk_segment_map_sg_merge(struct request_queue * q,struct bio_vec * bvec,struct bio_vec * bvprv,struct scatterlist ** sg)465 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
466 struct bio_vec *bvprv, struct scatterlist **sg)
467 {
468
469 int nbytes = bvec->bv_len;
470
471 if (!*sg)
472 return false;
473
474 if ((*sg)->length + nbytes > queue_max_segment_size(q))
475 return false;
476
477 if (!biovec_phys_mergeable(q, bvprv, bvec))
478 return false;
479
480 (*sg)->length += nbytes;
481
482 return true;
483 }
484
__blk_bios_map_sg(struct request_queue * q,struct bio * bio,struct scatterlist * sglist,struct scatterlist ** sg)485 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
486 struct scatterlist *sglist,
487 struct scatterlist **sg)
488 {
489 struct bio_vec bvec, bvprv = { NULL };
490 struct bvec_iter iter;
491 int nsegs = 0;
492 bool new_bio = false;
493
494 for_each_bio(bio) {
495 bio_for_each_bvec(bvec, bio, iter) {
496 /*
497 * Only try to merge bvecs from two bios given we
498 * have done bio internal merge when adding pages
499 * to bio
500 */
501 if (new_bio &&
502 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
503 goto next_bvec;
504
505 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
506 nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
507 else
508 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
509 next_bvec:
510 new_bio = false;
511 }
512 if (likely(bio->bi_iter.bi_size)) {
513 bvprv = bvec;
514 new_bio = true;
515 }
516 }
517
518 return nsegs;
519 }
520
521 /*
522 * map a request to scatterlist, return number of sg entries setup. Caller
523 * must make sure sg can hold rq->nr_phys_segments entries
524 */
__blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist,struct scatterlist ** last_sg)525 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
526 struct scatterlist *sglist, struct scatterlist **last_sg)
527 {
528 int nsegs = 0;
529
530 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
531 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
532 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
533 nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
534 else if (rq->bio)
535 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
536
537 if (*last_sg)
538 sg_mark_end(*last_sg);
539
540 /*
541 * Something must have been wrong if the figured number of
542 * segment is bigger than number of req's physical segments
543 */
544 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
545
546 return nsegs;
547 }
548 EXPORT_SYMBOL(__blk_rq_map_sg);
549
blk_rq_get_max_segments(struct request * rq)550 static inline unsigned int blk_rq_get_max_segments(struct request *rq)
551 {
552 if (req_op(rq) == REQ_OP_DISCARD)
553 return queue_max_discard_segments(rq->q);
554 return queue_max_segments(rq->q);
555 }
556
ll_new_hw_segment(struct request * req,struct bio * bio,unsigned int nr_phys_segs)557 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
558 unsigned int nr_phys_segs)
559 {
560 if (!blk_cgroup_mergeable(req, bio))
561 goto no_merge;
562
563 if (blk_integrity_merge_bio(req->q, req, bio) == false)
564 goto no_merge;
565
566 /* discard request merge won't add new segment */
567 if (req_op(req) == REQ_OP_DISCARD)
568 return 1;
569
570 if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
571 goto no_merge;
572
573 /*
574 * This will form the start of a new hw segment. Bump both
575 * counters.
576 */
577 req->nr_phys_segments += nr_phys_segs;
578 return 1;
579
580 no_merge:
581 req_set_nomerge(req->q, req);
582 return 0;
583 }
584
ll_back_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)585 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
586 {
587 if (req_gap_back_merge(req, bio))
588 return 0;
589 if (blk_integrity_rq(req) &&
590 integrity_req_gap_back_merge(req, bio))
591 return 0;
592 if (!bio_crypt_ctx_back_mergeable(req, bio))
593 return 0;
594 if (blk_rq_sectors(req) + bio_sectors(bio) >
595 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
596 req_set_nomerge(req->q, req);
597 return 0;
598 }
599
600 return ll_new_hw_segment(req, bio, nr_segs);
601 }
602
ll_front_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)603 static int ll_front_merge_fn(struct request *req, struct bio *bio,
604 unsigned int nr_segs)
605 {
606 if (req_gap_front_merge(req, bio))
607 return 0;
608 if (blk_integrity_rq(req) &&
609 integrity_req_gap_front_merge(req, bio))
610 return 0;
611 if (!bio_crypt_ctx_front_mergeable(req, bio))
612 return 0;
613 if (blk_rq_sectors(req) + bio_sectors(bio) >
614 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
615 req_set_nomerge(req->q, req);
616 return 0;
617 }
618
619 return ll_new_hw_segment(req, bio, nr_segs);
620 }
621
req_attempt_discard_merge(struct request_queue * q,struct request * req,struct request * next)622 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
623 struct request *next)
624 {
625 unsigned short segments = blk_rq_nr_discard_segments(req);
626
627 if (segments >= queue_max_discard_segments(q))
628 goto no_merge;
629 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
630 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
631 goto no_merge;
632
633 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
634 return true;
635 no_merge:
636 req_set_nomerge(q, req);
637 return false;
638 }
639
ll_merge_requests_fn(struct request_queue * q,struct request * req,struct request * next)640 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
641 struct request *next)
642 {
643 int total_phys_segments;
644
645 if (req_gap_back_merge(req, next->bio))
646 return 0;
647
648 /*
649 * Will it become too large?
650 */
651 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
652 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
653 return 0;
654
655 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
656 if (total_phys_segments > blk_rq_get_max_segments(req))
657 return 0;
658
659 if (!blk_cgroup_mergeable(req, next->bio))
660 return 0;
661
662 if (blk_integrity_merge_rq(q, req, next) == false)
663 return 0;
664
665 if (!bio_crypt_ctx_merge_rq(req, next))
666 return 0;
667
668 /* Merge is OK... */
669 req->nr_phys_segments = total_phys_segments;
670 return 1;
671 }
672
673 /**
674 * blk_rq_set_mixed_merge - mark a request as mixed merge
675 * @rq: request to mark as mixed merge
676 *
677 * Description:
678 * @rq is about to be mixed merged. Make sure the attributes
679 * which can be mixed are set in each bio and mark @rq as mixed
680 * merged.
681 */
blk_rq_set_mixed_merge(struct request * rq)682 void blk_rq_set_mixed_merge(struct request *rq)
683 {
684 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
685 struct bio *bio;
686
687 if (rq->rq_flags & RQF_MIXED_MERGE)
688 return;
689
690 /*
691 * @rq will no longer represent mixable attributes for all the
692 * contained bios. It will just track those of the first one.
693 * Distributes the attributs to each bio.
694 */
695 for (bio = rq->bio; bio; bio = bio->bi_next) {
696 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
697 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
698 bio->bi_opf |= ff;
699 }
700 rq->rq_flags |= RQF_MIXED_MERGE;
701 }
702
blk_account_io_merge_request(struct request * req)703 static void blk_account_io_merge_request(struct request *req)
704 {
705 if (blk_do_io_stat(req)) {
706 part_stat_lock();
707 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
708 part_stat_unlock();
709
710 hd_struct_put(req->part);
711 }
712 }
713
blk_try_req_merge(struct request * req,struct request * next)714 static enum elv_merge blk_try_req_merge(struct request *req,
715 struct request *next)
716 {
717 if (blk_discard_mergable(req))
718 return ELEVATOR_DISCARD_MERGE;
719 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
720 return ELEVATOR_BACK_MERGE;
721
722 return ELEVATOR_NO_MERGE;
723 }
724
725 /*
726 * For non-mq, this has to be called with the request spinlock acquired.
727 * For mq with scheduling, the appropriate queue wide lock should be held.
728 */
attempt_merge(struct request_queue * q,struct request * req,struct request * next)729 static struct request *attempt_merge(struct request_queue *q,
730 struct request *req, struct request *next)
731 {
732 if (!rq_mergeable(req) || !rq_mergeable(next))
733 return NULL;
734
735 if (req_op(req) != req_op(next))
736 return NULL;
737
738 if (rq_data_dir(req) != rq_data_dir(next)
739 || req->rq_disk != next->rq_disk)
740 return NULL;
741
742 if (req_op(req) == REQ_OP_WRITE_SAME &&
743 !blk_write_same_mergeable(req->bio, next->bio))
744 return NULL;
745
746 /*
747 * Don't allow merge of different write hints, or for a hint with
748 * non-hint IO.
749 */
750 if (req->write_hint != next->write_hint)
751 return NULL;
752
753 if (req->ioprio != next->ioprio)
754 return NULL;
755
756 /*
757 * If we are allowed to merge, then append bio list
758 * from next to rq and release next. merge_requests_fn
759 * will have updated segment counts, update sector
760 * counts here. Handle DISCARDs separately, as they
761 * have separate settings.
762 */
763
764 switch (blk_try_req_merge(req, next)) {
765 case ELEVATOR_DISCARD_MERGE:
766 if (!req_attempt_discard_merge(q, req, next))
767 return NULL;
768 break;
769 case ELEVATOR_BACK_MERGE:
770 if (!ll_merge_requests_fn(q, req, next))
771 return NULL;
772 break;
773 default:
774 return NULL;
775 }
776
777 /*
778 * If failfast settings disagree or any of the two is already
779 * a mixed merge, mark both as mixed before proceeding. This
780 * makes sure that all involved bios have mixable attributes
781 * set properly.
782 */
783 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
784 (req->cmd_flags & REQ_FAILFAST_MASK) !=
785 (next->cmd_flags & REQ_FAILFAST_MASK)) {
786 blk_rq_set_mixed_merge(req);
787 blk_rq_set_mixed_merge(next);
788 }
789
790 /*
791 * At this point we have either done a back merge or front merge. We
792 * need the smaller start_time_ns of the merged requests to be the
793 * current request for accounting purposes.
794 */
795 if (next->start_time_ns < req->start_time_ns)
796 req->start_time_ns = next->start_time_ns;
797
798 req->biotail->bi_next = next->bio;
799 req->biotail = next->biotail;
800
801 req->__data_len += blk_rq_bytes(next);
802
803 if (!blk_discard_mergable(req))
804 elv_merge_requests(q, req, next);
805
806 blk_crypto_rq_put_keyslot(next);
807
808 /*
809 * 'next' is going away, so update stats accordingly
810 */
811 blk_account_io_merge_request(next);
812
813 trace_block_rq_merge(q, next);
814
815 /*
816 * ownership of bio passed from next to req, return 'next' for
817 * the caller to free
818 */
819 next->bio = NULL;
820 return next;
821 }
822
attempt_back_merge(struct request_queue * q,struct request * rq)823 static struct request *attempt_back_merge(struct request_queue *q,
824 struct request *rq)
825 {
826 struct request *next = elv_latter_request(q, rq);
827
828 if (next)
829 return attempt_merge(q, rq, next);
830
831 return NULL;
832 }
833
attempt_front_merge(struct request_queue * q,struct request * rq)834 static struct request *attempt_front_merge(struct request_queue *q,
835 struct request *rq)
836 {
837 struct request *prev = elv_former_request(q, rq);
838
839 if (prev)
840 return attempt_merge(q, prev, rq);
841
842 return NULL;
843 }
844
blk_attempt_req_merge(struct request_queue * q,struct request * rq,struct request * next)845 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
846 struct request *next)
847 {
848 struct request *free;
849
850 free = attempt_merge(q, rq, next);
851 if (free) {
852 blk_put_request(free);
853 return 1;
854 }
855
856 return 0;
857 }
858
blk_rq_merge_ok(struct request * rq,struct bio * bio)859 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
860 {
861 if (!rq_mergeable(rq) || !bio_mergeable(bio))
862 return false;
863
864 if (req_op(rq) != bio_op(bio))
865 return false;
866
867 /* different data direction or already started, don't merge */
868 if (bio_data_dir(bio) != rq_data_dir(rq))
869 return false;
870
871 /* must be same device */
872 if (rq->rq_disk != bio->bi_disk)
873 return false;
874
875 /* don't merge across cgroup boundaries */
876 if (!blk_cgroup_mergeable(rq, bio))
877 return false;
878
879 /* only merge integrity protected bio into ditto rq */
880 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
881 return false;
882
883 /* Only merge if the crypt contexts are compatible */
884 if (!bio_crypt_rq_ctx_compatible(rq, bio))
885 return false;
886
887 /* must be using the same buffer */
888 if (req_op(rq) == REQ_OP_WRITE_SAME &&
889 !blk_write_same_mergeable(rq->bio, bio))
890 return false;
891
892 /*
893 * Don't allow merge of different write hints, or for a hint with
894 * non-hint IO.
895 */
896 if (rq->write_hint != bio->bi_write_hint)
897 return false;
898
899 if (rq->ioprio != bio_prio(bio))
900 return false;
901
902 return true;
903 }
904
blk_try_merge(struct request * rq,struct bio * bio)905 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
906 {
907 if (blk_discard_mergable(rq))
908 return ELEVATOR_DISCARD_MERGE;
909 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
910 return ELEVATOR_BACK_MERGE;
911 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
912 return ELEVATOR_FRONT_MERGE;
913 return ELEVATOR_NO_MERGE;
914 }
915
blk_account_io_merge_bio(struct request * req)916 static void blk_account_io_merge_bio(struct request *req)
917 {
918 if (!blk_do_io_stat(req))
919 return;
920
921 part_stat_lock();
922 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
923 part_stat_unlock();
924 }
925
926 enum bio_merge_status {
927 BIO_MERGE_OK,
928 BIO_MERGE_NONE,
929 BIO_MERGE_FAILED,
930 };
931
bio_attempt_back_merge(struct request * req,struct bio * bio,unsigned int nr_segs)932 static enum bio_merge_status bio_attempt_back_merge(struct request *req,
933 struct bio *bio, unsigned int nr_segs)
934 {
935 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
936
937 if (!ll_back_merge_fn(req, bio, nr_segs))
938 return BIO_MERGE_FAILED;
939
940 trace_block_bio_backmerge(req->q, req, bio);
941 rq_qos_merge(req->q, req, bio);
942
943 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
944 blk_rq_set_mixed_merge(req);
945
946 req->biotail->bi_next = bio;
947 req->biotail = bio;
948 req->__data_len += bio->bi_iter.bi_size;
949
950 bio_crypt_free_ctx(bio);
951
952 blk_account_io_merge_bio(req);
953 return BIO_MERGE_OK;
954 }
955
bio_attempt_front_merge(struct request * req,struct bio * bio,unsigned int nr_segs)956 static enum bio_merge_status bio_attempt_front_merge(struct request *req,
957 struct bio *bio, unsigned int nr_segs)
958 {
959 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
960
961 if (!ll_front_merge_fn(req, bio, nr_segs))
962 return BIO_MERGE_FAILED;
963
964 trace_block_bio_frontmerge(req->q, req, bio);
965 rq_qos_merge(req->q, req, bio);
966
967 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
968 blk_rq_set_mixed_merge(req);
969
970 bio->bi_next = req->bio;
971 req->bio = bio;
972
973 req->__sector = bio->bi_iter.bi_sector;
974 req->__data_len += bio->bi_iter.bi_size;
975
976 bio_crypt_do_front_merge(req, bio);
977
978 blk_account_io_merge_bio(req);
979 return BIO_MERGE_OK;
980 }
981
bio_attempt_discard_merge(struct request_queue * q,struct request * req,struct bio * bio)982 static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
983 struct request *req, struct bio *bio)
984 {
985 unsigned short segments = blk_rq_nr_discard_segments(req);
986
987 if (segments >= queue_max_discard_segments(q))
988 goto no_merge;
989 if (blk_rq_sectors(req) + bio_sectors(bio) >
990 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
991 goto no_merge;
992
993 rq_qos_merge(q, req, bio);
994
995 req->biotail->bi_next = bio;
996 req->biotail = bio;
997 req->__data_len += bio->bi_iter.bi_size;
998 req->nr_phys_segments = segments + 1;
999
1000 blk_account_io_merge_bio(req);
1001 return BIO_MERGE_OK;
1002 no_merge:
1003 req_set_nomerge(q, req);
1004 return BIO_MERGE_FAILED;
1005 }
1006
blk_attempt_bio_merge(struct request_queue * q,struct request * rq,struct bio * bio,unsigned int nr_segs,bool sched_allow_merge)1007 static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1008 struct request *rq,
1009 struct bio *bio,
1010 unsigned int nr_segs,
1011 bool sched_allow_merge)
1012 {
1013 if (!blk_rq_merge_ok(rq, bio))
1014 return BIO_MERGE_NONE;
1015
1016 switch (blk_try_merge(rq, bio)) {
1017 case ELEVATOR_BACK_MERGE:
1018 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1019 return bio_attempt_back_merge(rq, bio, nr_segs);
1020 break;
1021 case ELEVATOR_FRONT_MERGE:
1022 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1023 return bio_attempt_front_merge(rq, bio, nr_segs);
1024 break;
1025 case ELEVATOR_DISCARD_MERGE:
1026 return bio_attempt_discard_merge(q, rq, bio);
1027 default:
1028 return BIO_MERGE_NONE;
1029 }
1030
1031 return BIO_MERGE_FAILED;
1032 }
1033
1034 /**
1035 * blk_attempt_plug_merge - try to merge with %current's plugged list
1036 * @q: request_queue new bio is being queued at
1037 * @bio: new bio being queued
1038 * @nr_segs: number of segments in @bio
1039 * @same_queue_rq: pointer to &struct request that gets filled in when
1040 * another request associated with @q is found on the plug list
1041 * (optional, may be %NULL)
1042 *
1043 * Determine whether @bio being queued on @q can be merged with a request
1044 * on %current's plugged list. Returns %true if merge was successful,
1045 * otherwise %false.
1046 *
1047 * Plugging coalesces IOs from the same issuer for the same purpose without
1048 * going through @q->queue_lock. As such it's more of an issuing mechanism
1049 * than scheduling, and the request, while may have elvpriv data, is not
1050 * added on the elevator at this point. In addition, we don't have
1051 * reliable access to the elevator outside queue lock. Only check basic
1052 * merging parameters without querying the elevator.
1053 *
1054 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1055 */
blk_attempt_plug_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs,struct request ** same_queue_rq)1056 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1057 unsigned int nr_segs, struct request **same_queue_rq)
1058 {
1059 struct blk_plug *plug;
1060 struct request *rq;
1061 struct list_head *plug_list;
1062
1063 plug = blk_mq_plug(q, bio);
1064 if (!plug)
1065 return false;
1066
1067 plug_list = &plug->mq_list;
1068
1069 list_for_each_entry_reverse(rq, plug_list, queuelist) {
1070 if (rq->q == q && same_queue_rq) {
1071 /*
1072 * Only blk-mq multiple hardware queues case checks the
1073 * rq in the same queue, there should be only one such
1074 * rq in a queue
1075 **/
1076 *same_queue_rq = rq;
1077 }
1078
1079 if (rq->q != q)
1080 continue;
1081
1082 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1083 BIO_MERGE_OK)
1084 return true;
1085 }
1086
1087 return false;
1088 }
1089
1090 /*
1091 * Iterate list of requests and see if we can merge this bio with any
1092 * of them.
1093 */
blk_bio_list_merge(struct request_queue * q,struct list_head * list,struct bio * bio,unsigned int nr_segs)1094 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1095 struct bio *bio, unsigned int nr_segs)
1096 {
1097 struct request *rq;
1098 int checked = 8;
1099
1100 list_for_each_entry_reverse(rq, list, queuelist) {
1101 if (!checked--)
1102 break;
1103
1104 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1105 case BIO_MERGE_NONE:
1106 continue;
1107 case BIO_MERGE_OK:
1108 return true;
1109 case BIO_MERGE_FAILED:
1110 return false;
1111 }
1112
1113 }
1114
1115 return false;
1116 }
1117 EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1118
blk_mq_sched_try_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs,struct request ** merged_request)1119 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1120 unsigned int nr_segs, struct request **merged_request)
1121 {
1122 struct request *rq;
1123
1124 switch (elv_merge(q, &rq, bio)) {
1125 case ELEVATOR_BACK_MERGE:
1126 if (!blk_mq_sched_allow_merge(q, rq, bio))
1127 return false;
1128 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1129 return false;
1130 *merged_request = attempt_back_merge(q, rq);
1131 if (!*merged_request)
1132 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1133 return true;
1134 case ELEVATOR_FRONT_MERGE:
1135 if (!blk_mq_sched_allow_merge(q, rq, bio))
1136 return false;
1137 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1138 return false;
1139 *merged_request = attempt_front_merge(q, rq);
1140 if (!*merged_request)
1141 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1142 return true;
1143 case ELEVATOR_DISCARD_MERGE:
1144 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1145 default:
1146 return false;
1147 }
1148 }
1149 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1150