• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to segment and merge handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10 #include <linux/blk-cgroup.h>
11 
12 #include <trace/events/block.h>
13 
14 #include "blk.h"
15 #include "blk-rq-qos.h"
16 
bio_will_gap(struct request_queue * q,struct request * prev_rq,struct bio * prev,struct bio * next)17 static inline bool bio_will_gap(struct request_queue *q,
18 		struct request *prev_rq, struct bio *prev, struct bio *next)
19 {
20 	struct bio_vec pb, nb;
21 
22 	if (!bio_has_data(prev) || !queue_virt_boundary(q))
23 		return false;
24 
25 	/*
26 	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
27 	 * is quite difficult to respect the sg gap limit.  We work hard to
28 	 * merge a huge number of small single bios in case of mkfs.
29 	 */
30 	if (prev_rq)
31 		bio_get_first_bvec(prev_rq->bio, &pb);
32 	else
33 		bio_get_first_bvec(prev, &pb);
34 	if (pb.bv_offset & queue_virt_boundary(q))
35 		return true;
36 
37 	/*
38 	 * We don't need to worry about the situation that the merged segment
39 	 * ends in unaligned virt boundary:
40 	 *
41 	 * - if 'pb' ends aligned, the merged segment ends aligned
42 	 * - if 'pb' ends unaligned, the next bio must include
43 	 *   one single bvec of 'nb', otherwise the 'nb' can't
44 	 *   merge with 'pb'
45 	 */
46 	bio_get_last_bvec(prev, &pb);
47 	bio_get_first_bvec(next, &nb);
48 	if (biovec_phys_mergeable(q, &pb, &nb))
49 		return false;
50 	return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
51 }
52 
req_gap_back_merge(struct request * req,struct bio * bio)53 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
54 {
55 	return bio_will_gap(req->q, req, req->biotail, bio);
56 }
57 
req_gap_front_merge(struct request * req,struct bio * bio)58 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
59 {
60 	return bio_will_gap(req->q, NULL, bio, req->bio);
61 }
62 
blk_bio_discard_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * nsegs)63 static struct bio *blk_bio_discard_split(struct request_queue *q,
64 					 struct bio *bio,
65 					 struct bio_set *bs,
66 					 unsigned *nsegs)
67 {
68 	unsigned int max_discard_sectors, granularity;
69 	int alignment;
70 	sector_t tmp;
71 	unsigned split_sectors;
72 
73 	*nsegs = 1;
74 
75 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
76 	granularity = max(q->limits.discard_granularity >> 9, 1U);
77 
78 	max_discard_sectors = min(q->limits.max_discard_sectors,
79 			bio_allowed_max_sectors(q));
80 	max_discard_sectors -= max_discard_sectors % granularity;
81 
82 	if (unlikely(!max_discard_sectors)) {
83 		/* XXX: warn */
84 		return NULL;
85 	}
86 
87 	if (bio_sectors(bio) <= max_discard_sectors)
88 		return NULL;
89 
90 	split_sectors = max_discard_sectors;
91 
92 	/*
93 	 * If the next starting sector would be misaligned, stop the discard at
94 	 * the previous aligned sector.
95 	 */
96 	alignment = (q->limits.discard_alignment >> 9) % granularity;
97 
98 	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
99 	tmp = sector_div(tmp, granularity);
100 
101 	if (split_sectors > tmp)
102 		split_sectors -= tmp;
103 
104 	return bio_split(bio, split_sectors, GFP_NOIO, bs);
105 }
106 
blk_bio_write_zeroes_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * nsegs)107 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
108 		struct bio *bio, struct bio_set *bs, unsigned *nsegs)
109 {
110 	*nsegs = 0;
111 
112 	if (!q->limits.max_write_zeroes_sectors)
113 		return NULL;
114 
115 	if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
116 		return NULL;
117 
118 	return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
119 }
120 
blk_bio_write_same_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * nsegs)121 static struct bio *blk_bio_write_same_split(struct request_queue *q,
122 					    struct bio *bio,
123 					    struct bio_set *bs,
124 					    unsigned *nsegs)
125 {
126 	*nsegs = 1;
127 
128 	if (!q->limits.max_write_same_sectors)
129 		return NULL;
130 
131 	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
132 		return NULL;
133 
134 	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
135 }
136 
137 /*
138  * Return the maximum number of sectors from the start of a bio that may be
139  * submitted as a single request to a block device. If enough sectors remain,
140  * align the end to the physical block size. Otherwise align the end to the
141  * logical block size. This approach minimizes the number of non-aligned
142  * requests that are submitted to a block device if the start of a bio is not
143  * aligned to a physical block boundary.
144  */
get_max_io_size(struct request_queue * q,struct bio * bio)145 static inline unsigned get_max_io_size(struct request_queue *q,
146 				       struct bio *bio)
147 {
148 	unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0);
149 	unsigned max_sectors = sectors;
150 	unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
151 	unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
152 	unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
153 
154 	max_sectors += start_offset;
155 	max_sectors &= ~(pbs - 1);
156 	if (max_sectors > start_offset)
157 		return max_sectors - start_offset;
158 
159 	return sectors & ~(lbs - 1);
160 }
161 
get_max_segment_size(const struct request_queue * q,struct page * start_page,unsigned long offset)162 static inline unsigned get_max_segment_size(const struct request_queue *q,
163 					    struct page *start_page,
164 					    unsigned long offset)
165 {
166 	unsigned long mask = queue_segment_boundary(q);
167 
168 	offset = mask & (page_to_phys(start_page) + offset);
169 
170 	/*
171 	 * overflow may be triggered in case of zero page physical address
172 	 * on 32bit arch, use queue's max segment size when that happens.
173 	 */
174 	return min_not_zero(mask - offset + 1,
175 			(unsigned long)queue_max_segment_size(q));
176 }
177 
178 /**
179  * bvec_split_segs - verify whether or not a bvec should be split in the middle
180  * @q:        [in] request queue associated with the bio associated with @bv
181  * @bv:       [in] bvec to examine
182  * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
183  *            by the number of segments from @bv that may be appended to that
184  *            bio without exceeding @max_segs
185  * @sectors:  [in,out] Number of sectors in the bio being built. Incremented
186  *            by the number of sectors from @bv that may be appended to that
187  *            bio without exceeding @max_sectors
188  * @max_segs: [in] upper bound for *@nsegs
189  * @max_sectors: [in] upper bound for *@sectors
190  *
191  * When splitting a bio, it can happen that a bvec is encountered that is too
192  * big to fit in a single segment and hence that it has to be split in the
193  * middle. This function verifies whether or not that should happen. The value
194  * %true is returned if and only if appending the entire @bv to a bio with
195  * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
196  * the block driver.
197  */
bvec_split_segs(const struct request_queue * q,const struct bio_vec * bv,unsigned * nsegs,unsigned * sectors,unsigned max_segs,unsigned max_sectors)198 static bool bvec_split_segs(const struct request_queue *q,
199 			    const struct bio_vec *bv, unsigned *nsegs,
200 			    unsigned *sectors, unsigned max_segs,
201 			    unsigned max_sectors)
202 {
203 	unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
204 	unsigned len = min(bv->bv_len, max_len);
205 	unsigned total_len = 0;
206 	unsigned seg_size = 0;
207 
208 	while (len && *nsegs < max_segs) {
209 		seg_size = get_max_segment_size(q, bv->bv_page,
210 						bv->bv_offset + total_len);
211 		seg_size = min(seg_size, len);
212 
213 		(*nsegs)++;
214 		total_len += seg_size;
215 		len -= seg_size;
216 
217 		if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
218 			break;
219 	}
220 
221 	*sectors += total_len >> 9;
222 
223 	/* tell the caller to split the bvec if it is too big to fit */
224 	return len > 0 || bv->bv_len > max_len;
225 }
226 
227 /**
228  * blk_bio_segment_split - split a bio in two bios
229  * @q:    [in] request queue pointer
230  * @bio:  [in] bio to be split
231  * @bs:	  [in] bio set to allocate the clone from
232  * @segs: [out] number of segments in the bio with the first half of the sectors
233  *
234  * Clone @bio, update the bi_iter of the clone to represent the first sectors
235  * of @bio and update @bio->bi_iter to represent the remaining sectors. The
236  * following is guaranteed for the cloned bio:
237  * - That it has at most get_max_io_size(@q, @bio) sectors.
238  * - That it has at most queue_max_segments(@q) segments.
239  *
240  * Except for discard requests the cloned bio will point at the bi_io_vec of
241  * the original bio. It is the responsibility of the caller to ensure that the
242  * original bio is not freed before the cloned bio. The caller is also
243  * responsible for ensuring that @bs is only destroyed after processing of the
244  * split bio has finished.
245  */
blk_bio_segment_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * segs)246 static struct bio *blk_bio_segment_split(struct request_queue *q,
247 					 struct bio *bio,
248 					 struct bio_set *bs,
249 					 unsigned *segs)
250 {
251 	struct bio_vec bv, bvprv, *bvprvp = NULL;
252 	struct bvec_iter iter;
253 	unsigned nsegs = 0, sectors = 0;
254 	const unsigned max_sectors = get_max_io_size(q, bio);
255 	const unsigned max_segs = queue_max_segments(q);
256 
257 	bio_for_each_bvec(bv, bio, iter) {
258 		/*
259 		 * If the queue doesn't support SG gaps and adding this
260 		 * offset would create a gap, disallow it.
261 		 */
262 		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
263 			goto split;
264 
265 		if (nsegs < max_segs &&
266 		    sectors + (bv.bv_len >> 9) <= max_sectors &&
267 		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
268 			nsegs++;
269 			sectors += bv.bv_len >> 9;
270 		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
271 					 max_sectors)) {
272 			goto split;
273 		}
274 
275 		bvprv = bv;
276 		bvprvp = &bvprv;
277 	}
278 
279 	*segs = nsegs;
280 	return NULL;
281 split:
282 	*segs = nsegs;
283 	return bio_split(bio, sectors, GFP_NOIO, bs);
284 }
285 
286 /**
287  * __blk_queue_split - split a bio and submit the second half
288  * @bio:     [in, out] bio to be split
289  * @nr_segs: [out] number of segments in the first bio
290  *
291  * Split a bio into two bios, chain the two bios, submit the second half and
292  * store a pointer to the first half in *@bio. If the second bio is still too
293  * big it will be split by a recursive call to this function. Since this
294  * function may allocate a new bio from @bio->bi_disk->queue->bio_split, it is
295  * the responsibility of the caller to ensure that
296  * @bio->bi_disk->queue->bio_split is only released after processing of the
297  * split bio has finished.
298  */
__blk_queue_split(struct bio ** bio,unsigned int * nr_segs)299 void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
300 {
301 	struct request_queue *q = (*bio)->bi_disk->queue;
302 	struct bio *split = NULL;
303 
304 	switch (bio_op(*bio)) {
305 	case REQ_OP_DISCARD:
306 	case REQ_OP_SECURE_ERASE:
307 		split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
308 		break;
309 	case REQ_OP_WRITE_ZEROES:
310 		split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
311 				nr_segs);
312 		break;
313 	case REQ_OP_WRITE_SAME:
314 		split = blk_bio_write_same_split(q, *bio, &q->bio_split,
315 				nr_segs);
316 		break;
317 	default:
318 		/*
319 		 * All drivers must accept single-segments bios that are <=
320 		 * PAGE_SIZE.  This is a quick and dirty check that relies on
321 		 * the fact that bi_io_vec[0] is always valid if a bio has data.
322 		 * The check might lead to occasional false negatives when bios
323 		 * are cloned, but compared to the performance impact of cloned
324 		 * bios themselves the loop below doesn't matter anyway.
325 		 */
326 		if (!q->limits.chunk_sectors &&
327 		    (*bio)->bi_vcnt == 1 &&
328 		    ((*bio)->bi_io_vec[0].bv_len +
329 		     (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
330 			*nr_segs = 1;
331 			break;
332 		}
333 		split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
334 		break;
335 	}
336 
337 	if (split) {
338 		/* there isn't chance to merge the splitted bio */
339 		split->bi_opf |= REQ_NOMERGE;
340 
341 		bio_chain(split, *bio);
342 		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
343 		submit_bio_noacct(*bio);
344 		*bio = split;
345 
346 		blk_throtl_charge_bio_split(*bio);
347 	}
348 }
349 
350 /**
351  * blk_queue_split - split a bio and submit the second half
352  * @bio: [in, out] bio to be split
353  *
354  * Split a bio into two bios, chains the two bios, submit the second half and
355  * store a pointer to the first half in *@bio. Since this function may allocate
356  * a new bio from @bio->bi_disk->queue->bio_split, it is the responsibility of
357  * the caller to ensure that @bio->bi_disk->queue->bio_split is only released
358  * after processing of the split bio has finished.
359  */
blk_queue_split(struct bio ** bio)360 void blk_queue_split(struct bio **bio)
361 {
362 	unsigned int nr_segs;
363 
364 	__blk_queue_split(bio, &nr_segs);
365 }
366 EXPORT_SYMBOL(blk_queue_split);
367 
blk_recalc_rq_segments(struct request * rq)368 unsigned int blk_recalc_rq_segments(struct request *rq)
369 {
370 	unsigned int nr_phys_segs = 0;
371 	unsigned int nr_sectors = 0;
372 	struct req_iterator iter;
373 	struct bio_vec bv;
374 
375 	if (!rq->bio)
376 		return 0;
377 
378 	switch (bio_op(rq->bio)) {
379 	case REQ_OP_DISCARD:
380 	case REQ_OP_SECURE_ERASE:
381 		if (queue_max_discard_segments(rq->q) > 1) {
382 			struct bio *bio = rq->bio;
383 
384 			for_each_bio(bio)
385 				nr_phys_segs++;
386 			return nr_phys_segs;
387 		}
388 		return 1;
389 	case REQ_OP_WRITE_ZEROES:
390 		return 0;
391 	case REQ_OP_WRITE_SAME:
392 		return 1;
393 	}
394 
395 	rq_for_each_bvec(bv, rq, iter)
396 		bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
397 				UINT_MAX, UINT_MAX);
398 	return nr_phys_segs;
399 }
400 
blk_next_sg(struct scatterlist ** sg,struct scatterlist * sglist)401 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
402 		struct scatterlist *sglist)
403 {
404 	if (!*sg)
405 		return sglist;
406 
407 	/*
408 	 * If the driver previously mapped a shorter list, we could see a
409 	 * termination bit prematurely unless it fully inits the sg table
410 	 * on each mapping. We KNOW that there must be more entries here
411 	 * or the driver would be buggy, so force clear the termination bit
412 	 * to avoid doing a full sg_init_table() in drivers for each command.
413 	 */
414 	sg_unmark_end(*sg);
415 	return sg_next(*sg);
416 }
417 
blk_bvec_map_sg(struct request_queue * q,struct bio_vec * bvec,struct scatterlist * sglist,struct scatterlist ** sg)418 static unsigned blk_bvec_map_sg(struct request_queue *q,
419 		struct bio_vec *bvec, struct scatterlist *sglist,
420 		struct scatterlist **sg)
421 {
422 	unsigned nbytes = bvec->bv_len;
423 	unsigned nsegs = 0, total = 0;
424 
425 	while (nbytes > 0) {
426 		unsigned offset = bvec->bv_offset + total;
427 		unsigned len = min(get_max_segment_size(q, bvec->bv_page,
428 					offset), nbytes);
429 		struct page *page = bvec->bv_page;
430 
431 		/*
432 		 * Unfortunately a fair number of drivers barf on scatterlists
433 		 * that have an offset larger than PAGE_SIZE, despite other
434 		 * subsystems dealing with that invariant just fine.  For now
435 		 * stick to the legacy format where we never present those from
436 		 * the block layer, but the code below should be removed once
437 		 * these offenders (mostly MMC/SD drivers) are fixed.
438 		 */
439 		page += (offset >> PAGE_SHIFT);
440 		offset &= ~PAGE_MASK;
441 
442 		*sg = blk_next_sg(sg, sglist);
443 		sg_set_page(*sg, page, len, offset);
444 
445 		total += len;
446 		nbytes -= len;
447 		nsegs++;
448 	}
449 
450 	return nsegs;
451 }
452 
__blk_bvec_map_sg(struct bio_vec bv,struct scatterlist * sglist,struct scatterlist ** sg)453 static inline int __blk_bvec_map_sg(struct bio_vec bv,
454 		struct scatterlist *sglist, struct scatterlist **sg)
455 {
456 	*sg = blk_next_sg(sg, sglist);
457 	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
458 	return 1;
459 }
460 
461 /* only try to merge bvecs into one sg if they are from two bios */
462 static inline bool
__blk_segment_map_sg_merge(struct request_queue * q,struct bio_vec * bvec,struct bio_vec * bvprv,struct scatterlist ** sg)463 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
464 			   struct bio_vec *bvprv, struct scatterlist **sg)
465 {
466 
467 	int nbytes = bvec->bv_len;
468 
469 	if (!*sg)
470 		return false;
471 
472 	if ((*sg)->length + nbytes > queue_max_segment_size(q))
473 		return false;
474 
475 	if (!biovec_phys_mergeable(q, bvprv, bvec))
476 		return false;
477 
478 	(*sg)->length += nbytes;
479 
480 	return true;
481 }
482 
__blk_bios_map_sg(struct request_queue * q,struct bio * bio,struct scatterlist * sglist,struct scatterlist ** sg)483 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
484 			     struct scatterlist *sglist,
485 			     struct scatterlist **sg)
486 {
487 	struct bio_vec bvec, bvprv = { NULL };
488 	struct bvec_iter iter;
489 	int nsegs = 0;
490 	bool new_bio = false;
491 
492 	for_each_bio(bio) {
493 		bio_for_each_bvec(bvec, bio, iter) {
494 			/*
495 			 * Only try to merge bvecs from two bios given we
496 			 * have done bio internal merge when adding pages
497 			 * to bio
498 			 */
499 			if (new_bio &&
500 			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
501 				goto next_bvec;
502 
503 			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
504 				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
505 			else
506 				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
507  next_bvec:
508 			new_bio = false;
509 		}
510 		if (likely(bio->bi_iter.bi_size)) {
511 			bvprv = bvec;
512 			new_bio = true;
513 		}
514 	}
515 
516 	return nsegs;
517 }
518 
519 /*
520  * map a request to scatterlist, return number of sg entries setup. Caller
521  * must make sure sg can hold rq->nr_phys_segments entries
522  */
__blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist,struct scatterlist ** last_sg)523 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
524 		struct scatterlist *sglist, struct scatterlist **last_sg)
525 {
526 	int nsegs = 0;
527 
528 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
529 		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
530 	else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
531 		nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
532 	else if (rq->bio)
533 		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
534 
535 	if (*last_sg)
536 		sg_mark_end(*last_sg);
537 
538 	/*
539 	 * Something must have been wrong if the figured number of
540 	 * segment is bigger than number of req's physical segments
541 	 */
542 	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
543 
544 	return nsegs;
545 }
546 EXPORT_SYMBOL(__blk_rq_map_sg);
547 
blk_rq_get_max_segments(struct request * rq)548 static inline unsigned int blk_rq_get_max_segments(struct request *rq)
549 {
550 	if (req_op(rq) == REQ_OP_DISCARD)
551 		return queue_max_discard_segments(rq->q);
552 	return queue_max_segments(rq->q);
553 }
554 
ll_new_hw_segment(struct request * req,struct bio * bio,unsigned int nr_phys_segs)555 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
556 		unsigned int nr_phys_segs)
557 {
558 	if (!blk_cgroup_mergeable(req, bio))
559 		goto no_merge;
560 
561 	if (blk_integrity_merge_bio(req->q, req, bio) == false)
562 		goto no_merge;
563 
564 	/* discard request merge won't add new segment */
565 	if (req_op(req) == REQ_OP_DISCARD)
566 		return 1;
567 
568 	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
569 		goto no_merge;
570 
571 	/*
572 	 * This will form the start of a new hw segment.  Bump both
573 	 * counters.
574 	 */
575 	req->nr_phys_segments += nr_phys_segs;
576 	return 1;
577 
578 no_merge:
579 	req_set_nomerge(req->q, req);
580 	return 0;
581 }
582 
ll_back_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)583 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
584 {
585 	if (req_gap_back_merge(req, bio))
586 		return 0;
587 	if (blk_integrity_rq(req) &&
588 	    integrity_req_gap_back_merge(req, bio))
589 		return 0;
590 	if (!bio_crypt_ctx_back_mergeable(req, bio))
591 		return 0;
592 	if (blk_rq_sectors(req) + bio_sectors(bio) >
593 	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
594 		req_set_nomerge(req->q, req);
595 		return 0;
596 	}
597 
598 	return ll_new_hw_segment(req, bio, nr_segs);
599 }
600 
ll_front_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)601 static int ll_front_merge_fn(struct request *req, struct bio *bio,
602 		unsigned int nr_segs)
603 {
604 	if (req_gap_front_merge(req, bio))
605 		return 0;
606 	if (blk_integrity_rq(req) &&
607 	    integrity_req_gap_front_merge(req, bio))
608 		return 0;
609 	if (!bio_crypt_ctx_front_mergeable(req, bio))
610 		return 0;
611 	if (blk_rq_sectors(req) + bio_sectors(bio) >
612 	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
613 		req_set_nomerge(req->q, req);
614 		return 0;
615 	}
616 
617 	return ll_new_hw_segment(req, bio, nr_segs);
618 }
619 
req_attempt_discard_merge(struct request_queue * q,struct request * req,struct request * next)620 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
621 		struct request *next)
622 {
623 	unsigned short segments = blk_rq_nr_discard_segments(req);
624 
625 	if (segments >= queue_max_discard_segments(q))
626 		goto no_merge;
627 	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
628 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
629 		goto no_merge;
630 
631 	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
632 	return true;
633 no_merge:
634 	req_set_nomerge(q, req);
635 	return false;
636 }
637 
ll_merge_requests_fn(struct request_queue * q,struct request * req,struct request * next)638 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
639 				struct request *next)
640 {
641 	int total_phys_segments;
642 
643 	if (req_gap_back_merge(req, next->bio))
644 		return 0;
645 
646 	/*
647 	 * Will it become too large?
648 	 */
649 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
650 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
651 		return 0;
652 
653 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
654 	if (total_phys_segments > blk_rq_get_max_segments(req))
655 		return 0;
656 
657 	if (!blk_cgroup_mergeable(req, next->bio))
658 		return 0;
659 
660 	if (blk_integrity_merge_rq(q, req, next) == false)
661 		return 0;
662 
663 	if (!bio_crypt_ctx_merge_rq(req, next))
664 		return 0;
665 
666 	/* Merge is OK... */
667 	req->nr_phys_segments = total_phys_segments;
668 	return 1;
669 }
670 
671 /**
672  * blk_rq_set_mixed_merge - mark a request as mixed merge
673  * @rq: request to mark as mixed merge
674  *
675  * Description:
676  *     @rq is about to be mixed merged.  Make sure the attributes
677  *     which can be mixed are set in each bio and mark @rq as mixed
678  *     merged.
679  */
blk_rq_set_mixed_merge(struct request * rq)680 void blk_rq_set_mixed_merge(struct request *rq)
681 {
682 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
683 	struct bio *bio;
684 
685 	if (rq->rq_flags & RQF_MIXED_MERGE)
686 		return;
687 
688 	/*
689 	 * @rq will no longer represent mixable attributes for all the
690 	 * contained bios.  It will just track those of the first one.
691 	 * Distributes the attributs to each bio.
692 	 */
693 	for (bio = rq->bio; bio; bio = bio->bi_next) {
694 		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
695 			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
696 		bio->bi_opf |= ff;
697 	}
698 	rq->rq_flags |= RQF_MIXED_MERGE;
699 }
700 
blk_account_io_merge_request(struct request * req)701 static void blk_account_io_merge_request(struct request *req)
702 {
703 	if (blk_do_io_stat(req)) {
704 		part_stat_lock();
705 		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
706 		part_stat_unlock();
707 
708 		hd_struct_put(req->part);
709 	}
710 }
711 
blk_try_req_merge(struct request * req,struct request * next)712 static enum elv_merge blk_try_req_merge(struct request *req,
713 					struct request *next)
714 {
715 	if (blk_discard_mergable(req))
716 		return ELEVATOR_DISCARD_MERGE;
717 	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
718 		return ELEVATOR_BACK_MERGE;
719 
720 	return ELEVATOR_NO_MERGE;
721 }
722 
723 /*
724  * For non-mq, this has to be called with the request spinlock acquired.
725  * For mq with scheduling, the appropriate queue wide lock should be held.
726  */
attempt_merge(struct request_queue * q,struct request * req,struct request * next)727 static struct request *attempt_merge(struct request_queue *q,
728 				     struct request *req, struct request *next)
729 {
730 	if (!rq_mergeable(req) || !rq_mergeable(next))
731 		return NULL;
732 
733 	if (req_op(req) != req_op(next))
734 		return NULL;
735 
736 	if (rq_data_dir(req) != rq_data_dir(next)
737 	    || req->rq_disk != next->rq_disk)
738 		return NULL;
739 
740 	if (req_op(req) == REQ_OP_WRITE_SAME &&
741 	    !blk_write_same_mergeable(req->bio, next->bio))
742 		return NULL;
743 
744 	/*
745 	 * Don't allow merge of different write hints, or for a hint with
746 	 * non-hint IO.
747 	 */
748 	if (req->write_hint != next->write_hint)
749 		return NULL;
750 
751 	if (req->ioprio != next->ioprio)
752 		return NULL;
753 
754 	/*
755 	 * If we are allowed to merge, then append bio list
756 	 * from next to rq and release next. merge_requests_fn
757 	 * will have updated segment counts, update sector
758 	 * counts here. Handle DISCARDs separately, as they
759 	 * have separate settings.
760 	 */
761 
762 	switch (blk_try_req_merge(req, next)) {
763 	case ELEVATOR_DISCARD_MERGE:
764 		if (!req_attempt_discard_merge(q, req, next))
765 			return NULL;
766 		break;
767 	case ELEVATOR_BACK_MERGE:
768 		if (!ll_merge_requests_fn(q, req, next))
769 			return NULL;
770 		break;
771 	default:
772 		return NULL;
773 	}
774 
775 	/*
776 	 * If failfast settings disagree or any of the two is already
777 	 * a mixed merge, mark both as mixed before proceeding.  This
778 	 * makes sure that all involved bios have mixable attributes
779 	 * set properly.
780 	 */
781 	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
782 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
783 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
784 		blk_rq_set_mixed_merge(req);
785 		blk_rq_set_mixed_merge(next);
786 	}
787 
788 	/*
789 	 * At this point we have either done a back merge or front merge. We
790 	 * need the smaller start_time_ns of the merged requests to be the
791 	 * current request for accounting purposes.
792 	 */
793 	if (next->start_time_ns < req->start_time_ns)
794 		req->start_time_ns = next->start_time_ns;
795 
796 	req->biotail->bi_next = next->bio;
797 	req->biotail = next->biotail;
798 
799 	req->__data_len += blk_rq_bytes(next);
800 
801 	if (!blk_discard_mergable(req))
802 		elv_merge_requests(q, req, next);
803 
804 	/*
805 	 * 'next' is going away, so update stats accordingly
806 	 */
807 	blk_account_io_merge_request(next);
808 
809 	trace_block_rq_merge(next);
810 
811 	/*
812 	 * ownership of bio passed from next to req, return 'next' for
813 	 * the caller to free
814 	 */
815 	next->bio = NULL;
816 	return next;
817 }
818 
attempt_back_merge(struct request_queue * q,struct request * rq)819 static struct request *attempt_back_merge(struct request_queue *q,
820 		struct request *rq)
821 {
822 	struct request *next = elv_latter_request(q, rq);
823 
824 	if (next)
825 		return attempt_merge(q, rq, next);
826 
827 	return NULL;
828 }
829 
attempt_front_merge(struct request_queue * q,struct request * rq)830 static struct request *attempt_front_merge(struct request_queue *q,
831 		struct request *rq)
832 {
833 	struct request *prev = elv_former_request(q, rq);
834 
835 	if (prev)
836 		return attempt_merge(q, prev, rq);
837 
838 	return NULL;
839 }
840 
841 /*
842  * Try to merge 'next' into 'rq'. Return true if the merge happened, false
843  * otherwise. The caller is responsible for freeing 'next' if the merge
844  * happened.
845  */
blk_attempt_req_merge(struct request_queue * q,struct request * rq,struct request * next)846 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
847 			   struct request *next)
848 {
849 	return attempt_merge(q, rq, next);
850 }
851 
blk_rq_merge_ok(struct request * rq,struct bio * bio)852 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
853 {
854 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
855 		return false;
856 
857 	if (req_op(rq) != bio_op(bio))
858 		return false;
859 
860 	/* different data direction or already started, don't merge */
861 	if (bio_data_dir(bio) != rq_data_dir(rq))
862 		return false;
863 
864 	/* must be same device */
865 	if (rq->rq_disk != bio->bi_disk)
866 		return false;
867 
868 	/* don't merge across cgroup boundaries */
869 	if (!blk_cgroup_mergeable(rq, bio))
870 		return false;
871 
872 	/* only merge integrity protected bio into ditto rq */
873 	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
874 		return false;
875 
876 	/* Only merge if the crypt contexts are compatible */
877 	if (!bio_crypt_rq_ctx_compatible(rq, bio))
878 		return false;
879 
880 	/* must be using the same buffer */
881 	if (req_op(rq) == REQ_OP_WRITE_SAME &&
882 	    !blk_write_same_mergeable(rq->bio, bio))
883 		return false;
884 
885 	/*
886 	 * Don't allow merge of different write hints, or for a hint with
887 	 * non-hint IO.
888 	 */
889 	if (rq->write_hint != bio->bi_write_hint)
890 		return false;
891 
892 	if (rq->ioprio != bio_prio(bio))
893 		return false;
894 
895 	return true;
896 }
897 
blk_try_merge(struct request * rq,struct bio * bio)898 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
899 {
900 	if (blk_discard_mergable(rq))
901 		return ELEVATOR_DISCARD_MERGE;
902 	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
903 		return ELEVATOR_BACK_MERGE;
904 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
905 		return ELEVATOR_FRONT_MERGE;
906 	return ELEVATOR_NO_MERGE;
907 }
908 
blk_account_io_merge_bio(struct request * req)909 static void blk_account_io_merge_bio(struct request *req)
910 {
911 	if (!blk_do_io_stat(req))
912 		return;
913 
914 	part_stat_lock();
915 	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
916 	part_stat_unlock();
917 }
918 
919 enum bio_merge_status {
920 	BIO_MERGE_OK,
921 	BIO_MERGE_NONE,
922 	BIO_MERGE_FAILED,
923 };
924 
bio_attempt_back_merge(struct request * req,struct bio * bio,unsigned int nr_segs)925 static enum bio_merge_status bio_attempt_back_merge(struct request *req,
926 		struct bio *bio, unsigned int nr_segs)
927 {
928 	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
929 
930 	if (!ll_back_merge_fn(req, bio, nr_segs))
931 		return BIO_MERGE_FAILED;
932 
933 	trace_block_bio_backmerge(req->q, req, bio);
934 	rq_qos_merge(req->q, req, bio);
935 
936 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
937 		blk_rq_set_mixed_merge(req);
938 
939 	req->biotail->bi_next = bio;
940 	req->biotail = bio;
941 	req->__data_len += bio->bi_iter.bi_size;
942 
943 	bio_crypt_free_ctx(bio);
944 
945 	blk_account_io_merge_bio(req);
946 	return BIO_MERGE_OK;
947 }
948 
bio_attempt_front_merge(struct request * req,struct bio * bio,unsigned int nr_segs)949 static enum bio_merge_status bio_attempt_front_merge(struct request *req,
950 		struct bio *bio, unsigned int nr_segs)
951 {
952 	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
953 
954 	if (!ll_front_merge_fn(req, bio, nr_segs))
955 		return BIO_MERGE_FAILED;
956 
957 	trace_block_bio_frontmerge(req->q, req, bio);
958 	rq_qos_merge(req->q, req, bio);
959 
960 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
961 		blk_rq_set_mixed_merge(req);
962 
963 	bio->bi_next = req->bio;
964 	req->bio = bio;
965 
966 	req->__sector = bio->bi_iter.bi_sector;
967 	req->__data_len += bio->bi_iter.bi_size;
968 
969 	bio_crypt_do_front_merge(req, bio);
970 
971 	blk_account_io_merge_bio(req);
972 	return BIO_MERGE_OK;
973 }
974 
bio_attempt_discard_merge(struct request_queue * q,struct request * req,struct bio * bio)975 static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
976 		struct request *req, struct bio *bio)
977 {
978 	unsigned short segments = blk_rq_nr_discard_segments(req);
979 
980 	if (segments >= queue_max_discard_segments(q))
981 		goto no_merge;
982 	if (blk_rq_sectors(req) + bio_sectors(bio) >
983 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
984 		goto no_merge;
985 
986 	rq_qos_merge(q, req, bio);
987 
988 	req->biotail->bi_next = bio;
989 	req->biotail = bio;
990 	req->__data_len += bio->bi_iter.bi_size;
991 	req->nr_phys_segments = segments + 1;
992 
993 	blk_account_io_merge_bio(req);
994 	return BIO_MERGE_OK;
995 no_merge:
996 	req_set_nomerge(q, req);
997 	return BIO_MERGE_FAILED;
998 }
999 
blk_attempt_bio_merge(struct request_queue * q,struct request * rq,struct bio * bio,unsigned int nr_segs,bool sched_allow_merge)1000 static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1001 						   struct request *rq,
1002 						   struct bio *bio,
1003 						   unsigned int nr_segs,
1004 						   bool sched_allow_merge)
1005 {
1006 	if (!blk_rq_merge_ok(rq, bio))
1007 		return BIO_MERGE_NONE;
1008 
1009 	switch (blk_try_merge(rq, bio)) {
1010 	case ELEVATOR_BACK_MERGE:
1011 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1012 			return bio_attempt_back_merge(rq, bio, nr_segs);
1013 		break;
1014 	case ELEVATOR_FRONT_MERGE:
1015 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1016 			return bio_attempt_front_merge(rq, bio, nr_segs);
1017 		break;
1018 	case ELEVATOR_DISCARD_MERGE:
1019 		return bio_attempt_discard_merge(q, rq, bio);
1020 	default:
1021 		return BIO_MERGE_NONE;
1022 	}
1023 
1024 	return BIO_MERGE_FAILED;
1025 }
1026 
1027 /**
1028  * blk_attempt_plug_merge - try to merge with %current's plugged list
1029  * @q: request_queue new bio is being queued at
1030  * @bio: new bio being queued
1031  * @nr_segs: number of segments in @bio
1032  * @same_queue_rq: pointer to &struct request that gets filled in when
1033  * another request associated with @q is found on the plug list
1034  * (optional, may be %NULL)
1035  *
1036  * Determine whether @bio being queued on @q can be merged with a request
1037  * on %current's plugged list.  Returns %true if merge was successful,
1038  * otherwise %false.
1039  *
1040  * Plugging coalesces IOs from the same issuer for the same purpose without
1041  * going through @q->queue_lock.  As such it's more of an issuing mechanism
1042  * than scheduling, and the request, while may have elvpriv data, is not
1043  * added on the elevator at this point.  In addition, we don't have
1044  * reliable access to the elevator outside queue lock.  Only check basic
1045  * merging parameters without querying the elevator.
1046  *
1047  * Caller must ensure !blk_queue_nomerges(q) beforehand.
1048  */
blk_attempt_plug_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs,struct request ** same_queue_rq)1049 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1050 		unsigned int nr_segs, struct request **same_queue_rq)
1051 {
1052 	struct blk_plug *plug;
1053 	struct request *rq;
1054 	struct list_head *plug_list;
1055 
1056 	plug = blk_mq_plug(q, bio);
1057 	if (!plug)
1058 		return false;
1059 
1060 	plug_list = &plug->mq_list;
1061 
1062 	list_for_each_entry_reverse(rq, plug_list, queuelist) {
1063 		if (rq->q == q && same_queue_rq) {
1064 			/*
1065 			 * Only blk-mq multiple hardware queues case checks the
1066 			 * rq in the same queue, there should be only one such
1067 			 * rq in a queue
1068 			 **/
1069 			*same_queue_rq = rq;
1070 		}
1071 
1072 		if (rq->q != q)
1073 			continue;
1074 
1075 		if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1076 		    BIO_MERGE_OK)
1077 			return true;
1078 	}
1079 
1080 	return false;
1081 }
1082 
1083 /*
1084  * Iterate list of requests and see if we can merge this bio with any
1085  * of them.
1086  */
blk_bio_list_merge(struct request_queue * q,struct list_head * list,struct bio * bio,unsigned int nr_segs)1087 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1088 			struct bio *bio, unsigned int nr_segs)
1089 {
1090 	struct request *rq;
1091 	int checked = 8;
1092 
1093 	list_for_each_entry_reverse(rq, list, queuelist) {
1094 		if (!checked--)
1095 			break;
1096 
1097 		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1098 		case BIO_MERGE_NONE:
1099 			continue;
1100 		case BIO_MERGE_OK:
1101 			return true;
1102 		case BIO_MERGE_FAILED:
1103 			return false;
1104 		}
1105 
1106 	}
1107 
1108 	return false;
1109 }
1110 EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1111 
blk_mq_sched_try_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs,struct request ** merged_request)1112 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1113 		unsigned int nr_segs, struct request **merged_request)
1114 {
1115 	struct request *rq;
1116 
1117 	switch (elv_merge(q, &rq, bio)) {
1118 	case ELEVATOR_BACK_MERGE:
1119 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1120 			return false;
1121 		if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1122 			return false;
1123 		*merged_request = attempt_back_merge(q, rq);
1124 		if (!*merged_request)
1125 			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1126 		return true;
1127 	case ELEVATOR_FRONT_MERGE:
1128 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1129 			return false;
1130 		if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1131 			return false;
1132 		*merged_request = attempt_front_merge(q, rq);
1133 		if (!*merged_request)
1134 			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1135 		return true;
1136 	case ELEVATOR_DISCARD_MERGE:
1137 		return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1138 	default:
1139 		return false;
1140 	}
1141 }
1142 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1143