• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to segment and merge handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/scatterlist.h>
11 #include <linux/part_stat.h>
12 #include <linux/blk-cgroup.h>
13 
14 #include <trace/events/block.h>
15 
16 #include "blk.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-throttle.h"
20 
bio_get_first_bvec(struct bio * bio,struct bio_vec * bv)21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
22 {
23 	*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
24 }
25 
bio_get_last_bvec(struct bio * bio,struct bio_vec * bv)26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
27 {
28 	struct bvec_iter iter = bio->bi_iter;
29 	int idx;
30 
31 	bio_get_first_bvec(bio, bv);
32 	if (bv->bv_len == bio->bi_iter.bi_size)
33 		return;		/* this bio only has a single bvec */
34 
35 	bio_advance_iter(bio, &iter, iter.bi_size);
36 
37 	if (!iter.bi_bvec_done)
38 		idx = iter.bi_idx - 1;
39 	else	/* in the middle of bvec */
40 		idx = iter.bi_idx;
41 
42 	*bv = bio->bi_io_vec[idx];
43 
44 	/*
45 	 * iter.bi_bvec_done records actual length of the last bvec
46 	 * if this bio ends in the middle of one io vector
47 	 */
48 	if (iter.bi_bvec_done)
49 		bv->bv_len = iter.bi_bvec_done;
50 }
51 
bio_will_gap(struct request_queue * q,struct request * prev_rq,struct bio * prev,struct bio * next)52 static inline bool bio_will_gap(struct request_queue *q,
53 		struct request *prev_rq, struct bio *prev, struct bio *next)
54 {
55 	struct bio_vec pb, nb;
56 
57 	if (!bio_has_data(prev) || !queue_virt_boundary(q))
58 		return false;
59 
60 	/*
61 	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
62 	 * is quite difficult to respect the sg gap limit.  We work hard to
63 	 * merge a huge number of small single bios in case of mkfs.
64 	 */
65 	if (prev_rq)
66 		bio_get_first_bvec(prev_rq->bio, &pb);
67 	else
68 		bio_get_first_bvec(prev, &pb);
69 	if (pb.bv_offset & queue_virt_boundary(q))
70 		return true;
71 
72 	/*
73 	 * We don't need to worry about the situation that the merged segment
74 	 * ends in unaligned virt boundary:
75 	 *
76 	 * - if 'pb' ends aligned, the merged segment ends aligned
77 	 * - if 'pb' ends unaligned, the next bio must include
78 	 *   one single bvec of 'nb', otherwise the 'nb' can't
79 	 *   merge with 'pb'
80 	 */
81 	bio_get_last_bvec(prev, &pb);
82 	bio_get_first_bvec(next, &nb);
83 	if (biovec_phys_mergeable(q, &pb, &nb))
84 		return false;
85 	return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
86 }
87 
req_gap_back_merge(struct request * req,struct bio * bio)88 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
89 {
90 	return bio_will_gap(req->q, req, req->biotail, bio);
91 }
92 
req_gap_front_merge(struct request * req,struct bio * bio)93 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
94 {
95 	return bio_will_gap(req->q, NULL, bio, req->bio);
96 }
97 
98 /*
99  * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
100  * is defined as 'unsigned int', meantime it has to be aligned to with the
101  * logical block size, which is the minimum accepted unit by hardware.
102  */
bio_allowed_max_sectors(const struct queue_limits * lim)103 static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
104 {
105 	return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
106 }
107 
bio_split_discard(struct bio * bio,const struct queue_limits * lim,unsigned * nsegs,struct bio_set * bs)108 static struct bio *bio_split_discard(struct bio *bio,
109 				     const struct queue_limits *lim,
110 				     unsigned *nsegs, struct bio_set *bs)
111 {
112 	unsigned int max_discard_sectors, granularity;
113 	sector_t tmp;
114 	unsigned split_sectors;
115 
116 	*nsegs = 1;
117 
118 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
119 	granularity = max(lim->discard_granularity >> 9, 1U);
120 
121 	max_discard_sectors =
122 		min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
123 	max_discard_sectors -= max_discard_sectors % granularity;
124 
125 	if (unlikely(!max_discard_sectors)) {
126 		/* XXX: warn */
127 		return NULL;
128 	}
129 
130 	if (bio_sectors(bio) <= max_discard_sectors)
131 		return NULL;
132 
133 	split_sectors = max_discard_sectors;
134 
135 	/*
136 	 * If the next starting sector would be misaligned, stop the discard at
137 	 * the previous aligned sector.
138 	 */
139 	tmp = bio->bi_iter.bi_sector + split_sectors -
140 		((lim->discard_alignment >> 9) % granularity);
141 	tmp = sector_div(tmp, granularity);
142 
143 	if (split_sectors > tmp)
144 		split_sectors -= tmp;
145 
146 	return bio_split(bio, split_sectors, GFP_NOIO, bs);
147 }
148 
bio_split_write_zeroes(struct bio * bio,const struct queue_limits * lim,unsigned * nsegs,struct bio_set * bs)149 static struct bio *bio_split_write_zeroes(struct bio *bio,
150 					  const struct queue_limits *lim,
151 					  unsigned *nsegs, struct bio_set *bs)
152 {
153 	*nsegs = 0;
154 	if (!lim->max_write_zeroes_sectors)
155 		return NULL;
156 	if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
157 		return NULL;
158 	return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
159 }
160 
161 /*
162  * Return the maximum number of sectors from the start of a bio that may be
163  * submitted as a single request to a block device. If enough sectors remain,
164  * align the end to the physical block size. Otherwise align the end to the
165  * logical block size. This approach minimizes the number of non-aligned
166  * requests that are submitted to a block device if the start of a bio is not
167  * aligned to a physical block boundary.
168  */
get_max_io_size(struct bio * bio,const struct queue_limits * lim)169 static inline unsigned get_max_io_size(struct bio *bio,
170 				       const struct queue_limits *lim)
171 {
172 	unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
173 	unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
174 	unsigned max_sectors = lim->max_sectors, start, end;
175 
176 	if (lim->chunk_sectors) {
177 		max_sectors = min(max_sectors,
178 			blk_chunk_sectors_left(bio->bi_iter.bi_sector,
179 					       lim->chunk_sectors));
180 	}
181 
182 	start = bio->bi_iter.bi_sector & (pbs - 1);
183 	end = (start + max_sectors) & ~(pbs - 1);
184 	if (end > start)
185 		return end - start;
186 	return max_sectors & ~(lbs - 1);
187 }
188 
189 /**
190  * get_max_segment_size() - maximum number of bytes to add as a single segment
191  * @lim: Request queue limits.
192  * @start_page: See below.
193  * @offset: Offset from @start_page where to add a segment.
194  *
195  * Returns the maximum number of bytes that can be added as a single segment.
196  */
get_max_segment_size(const struct queue_limits * lim,struct page * start_page,unsigned long offset)197 static inline unsigned get_max_segment_size(const struct queue_limits *lim,
198 		struct page *start_page, unsigned long offset)
199 {
200 	unsigned long mask = lim->seg_boundary_mask;
201 
202 	offset = mask & (page_to_phys(start_page) + offset);
203 
204 	/*
205 	 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
206 	 * after having calculated the minimum.
207 	 */
208 	return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
209 }
210 
211 /**
212  * bvec_split_segs - verify whether or not a bvec should be split in the middle
213  * @lim:      [in] queue limits to split based on
214  * @bv:       [in] bvec to examine
215  * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
216  *            by the number of segments from @bv that may be appended to that
217  *            bio without exceeding @max_segs
218  * @bytes:    [in,out] Number of bytes in the bio being built. Incremented
219  *            by the number of bytes from @bv that may be appended to that
220  *            bio without exceeding @max_bytes
221  * @max_segs: [in] upper bound for *@nsegs
222  * @max_bytes: [in] upper bound for *@bytes
223  *
224  * When splitting a bio, it can happen that a bvec is encountered that is too
225  * big to fit in a single segment and hence that it has to be split in the
226  * middle. This function verifies whether or not that should happen. The value
227  * %true is returned if and only if appending the entire @bv to a bio with
228  * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
229  * the block driver.
230  */
bvec_split_segs(const struct queue_limits * lim,const struct bio_vec * bv,unsigned * nsegs,unsigned * bytes,unsigned max_segs,unsigned max_bytes)231 static bool bvec_split_segs(const struct queue_limits *lim,
232 		const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
233 		unsigned max_segs, unsigned max_bytes)
234 {
235 	unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
236 	unsigned len = min(bv->bv_len, max_len);
237 	unsigned total_len = 0;
238 	unsigned seg_size = 0;
239 
240 	while (len && *nsegs < max_segs) {
241 		seg_size = get_max_segment_size(lim, bv->bv_page,
242 						bv->bv_offset + total_len);
243 		seg_size = min(seg_size, len);
244 
245 		(*nsegs)++;
246 		total_len += seg_size;
247 		len -= seg_size;
248 
249 		if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
250 			break;
251 	}
252 
253 	*bytes += total_len;
254 
255 	/* tell the caller to split the bvec if it is too big to fit */
256 	return len > 0 || bv->bv_len > max_len;
257 }
258 
259 /**
260  * bio_split_rw - split a bio in two bios
261  * @bio:  [in] bio to be split
262  * @lim:  [in] queue limits to split based on
263  * @segs: [out] number of segments in the bio with the first half of the sectors
264  * @bs:	  [in] bio set to allocate the clone from
265  * @max_bytes: [in] maximum number of bytes per bio
266  *
267  * Clone @bio, update the bi_iter of the clone to represent the first sectors
268  * of @bio and update @bio->bi_iter to represent the remaining sectors. The
269  * following is guaranteed for the cloned bio:
270  * - That it has at most @max_bytes worth of data
271  * - That it has at most queue_max_segments(@q) segments.
272  *
273  * Except for discard requests the cloned bio will point at the bi_io_vec of
274  * the original bio. It is the responsibility of the caller to ensure that the
275  * original bio is not freed before the cloned bio. The caller is also
276  * responsible for ensuring that @bs is only destroyed after processing of the
277  * split bio has finished.
278  */
bio_split_rw(struct bio * bio,const struct queue_limits * lim,unsigned * segs,struct bio_set * bs,unsigned max_bytes)279 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
280 		unsigned *segs, struct bio_set *bs, unsigned max_bytes)
281 {
282 	struct bio_vec bv, bvprv, *bvprvp = NULL;
283 	struct bvec_iter iter;
284 	unsigned nsegs = 0, bytes = 0;
285 
286 	bio_for_each_bvec(bv, bio, iter) {
287 		/*
288 		 * If the queue doesn't support SG gaps and adding this
289 		 * offset would create a gap, disallow it.
290 		 */
291 		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
292 			goto split;
293 
294 		if (nsegs < lim->max_segments &&
295 		    bytes + bv.bv_len <= max_bytes &&
296 		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
297 			/* single-page bvec optimization */
298 			nsegs += blk_segments(lim, bv.bv_len);
299 			bytes += bv.bv_len;
300 		} else {
301 			if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
302 					lim->max_segments, max_bytes))
303 				goto split;
304 		}
305 
306 		bvprv = bv;
307 		bvprvp = &bvprv;
308 	}
309 
310 	*segs = nsegs;
311 	return NULL;
312 split:
313 	/*
314 	 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
315 	 * with EAGAIN if splitting is required and return an error pointer.
316 	 */
317 	if (bio->bi_opf & REQ_NOWAIT) {
318 		bio->bi_status = BLK_STS_AGAIN;
319 		bio_endio(bio);
320 		return ERR_PTR(-EAGAIN);
321 	}
322 
323 	*segs = nsegs;
324 
325 	/*
326 	 * Individual bvecs might not be logical block aligned. Round down the
327 	 * split size so that each bio is properly block size aligned, even if
328 	 * we do not use the full hardware limits.
329 	 */
330 	bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
331 
332 	/*
333 	 * Bio splitting may cause subtle trouble such as hang when doing sync
334 	 * iopoll in direct IO routine. Given performance gain of iopoll for
335 	 * big IO can be trival, disable iopoll when split needed.
336 	 */
337 	bio_clear_polled(bio);
338 	return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
339 }
340 EXPORT_SYMBOL_GPL(bio_split_rw);
341 
342 /**
343  * __bio_split_to_limits - split a bio to fit the queue limits
344  * @bio:     bio to be split
345  * @lim:     queue limits to split based on
346  * @nr_segs: returns the number of segments in the returned bio
347  *
348  * Check if @bio needs splitting based on the queue limits, and if so split off
349  * a bio fitting the limits from the beginning of @bio and return it.  @bio is
350  * shortened to the remainder and re-submitted.
351  *
352  * The split bio is allocated from @q->bio_split, which is provided by the
353  * block layer.
354  */
__bio_split_to_limits(struct bio * bio,const struct queue_limits * lim,unsigned int * nr_segs)355 struct bio *__bio_split_to_limits(struct bio *bio,
356 				  const struct queue_limits *lim,
357 				  unsigned int *nr_segs)
358 {
359 	struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
360 	struct bio *split;
361 
362 	switch (bio_op(bio)) {
363 	case REQ_OP_DISCARD:
364 	case REQ_OP_SECURE_ERASE:
365 		split = bio_split_discard(bio, lim, nr_segs, bs);
366 		break;
367 	case REQ_OP_WRITE_ZEROES:
368 		split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
369 		break;
370 	default:
371 		split = bio_split_rw(bio, lim, nr_segs, bs,
372 				get_max_io_size(bio, lim) << SECTOR_SHIFT);
373 		if (IS_ERR(split))
374 			return NULL;
375 		break;
376 	}
377 
378 	if (split) {
379 		/* there isn't chance to merge the split bio */
380 		split->bi_opf |= REQ_NOMERGE;
381 
382 		blkcg_bio_issue_init(split);
383 		bio_chain(split, bio);
384 		trace_block_split(split, bio->bi_iter.bi_sector);
385 		submit_bio_noacct(bio);
386 		return split;
387 	}
388 	return bio;
389 }
390 
391 /**
392  * bio_split_to_limits - split a bio to fit the queue limits
393  * @bio:     bio to be split
394  *
395  * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
396  * if so split off a bio fitting the limits from the beginning of @bio and
397  * return it.  @bio is shortened to the remainder and re-submitted.
398  *
399  * The split bio is allocated from @q->bio_split, which is provided by the
400  * block layer.
401  */
bio_split_to_limits(struct bio * bio)402 struct bio *bio_split_to_limits(struct bio *bio)
403 {
404 	const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
405 	unsigned int nr_segs;
406 
407 	if (bio_may_exceed_limits(bio, lim))
408 		return __bio_split_to_limits(bio, lim, &nr_segs);
409 	return bio;
410 }
411 EXPORT_SYMBOL(bio_split_to_limits);
412 
blk_recalc_rq_segments(struct request * rq)413 unsigned int blk_recalc_rq_segments(struct request *rq)
414 {
415 	unsigned int nr_phys_segs = 0;
416 	unsigned int bytes = 0;
417 	struct req_iterator iter;
418 	struct bio_vec bv;
419 
420 	if (!rq->bio)
421 		return 0;
422 
423 	switch (bio_op(rq->bio)) {
424 	case REQ_OP_DISCARD:
425 	case REQ_OP_SECURE_ERASE:
426 		if (queue_max_discard_segments(rq->q) > 1) {
427 			struct bio *bio = rq->bio;
428 
429 			for_each_bio(bio)
430 				nr_phys_segs++;
431 			return nr_phys_segs;
432 		}
433 		return 1;
434 	case REQ_OP_WRITE_ZEROES:
435 		return 0;
436 	default:
437 		break;
438 	}
439 
440 	rq_for_each_bvec(bv, rq, iter)
441 		bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
442 				UINT_MAX, UINT_MAX);
443 	return nr_phys_segs;
444 }
445 
blk_next_sg(struct scatterlist ** sg,struct scatterlist * sglist)446 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
447 		struct scatterlist *sglist)
448 {
449 	if (!*sg)
450 		return sglist;
451 
452 	/*
453 	 * If the driver previously mapped a shorter list, we could see a
454 	 * termination bit prematurely unless it fully inits the sg table
455 	 * on each mapping. We KNOW that there must be more entries here
456 	 * or the driver would be buggy, so force clear the termination bit
457 	 * to avoid doing a full sg_init_table() in drivers for each command.
458 	 */
459 	sg_unmark_end(*sg);
460 	return sg_next(*sg);
461 }
462 
blk_bvec_map_sg(struct request_queue * q,struct bio_vec * bvec,struct scatterlist * sglist,struct scatterlist ** sg)463 static unsigned blk_bvec_map_sg(struct request_queue *q,
464 		struct bio_vec *bvec, struct scatterlist *sglist,
465 		struct scatterlist **sg)
466 {
467 	unsigned nbytes = bvec->bv_len;
468 	unsigned nsegs = 0, total = 0;
469 
470 	while (nbytes > 0) {
471 		unsigned offset = bvec->bv_offset + total;
472 		unsigned len = min(get_max_segment_size(&q->limits,
473 				   bvec->bv_page, offset), nbytes);
474 		struct page *page = bvec->bv_page;
475 
476 		/*
477 		 * Unfortunately a fair number of drivers barf on scatterlists
478 		 * that have an offset larger than PAGE_SIZE, despite other
479 		 * subsystems dealing with that invariant just fine.  For now
480 		 * stick to the legacy format where we never present those from
481 		 * the block layer, but the code below should be removed once
482 		 * these offenders (mostly MMC/SD drivers) are fixed.
483 		 */
484 		page += (offset >> PAGE_SHIFT);
485 		offset &= ~PAGE_MASK;
486 
487 		*sg = blk_next_sg(sg, sglist);
488 		sg_set_page(*sg, page, len, offset);
489 
490 		total += len;
491 		nbytes -= len;
492 		nsegs++;
493 	}
494 
495 	return nsegs;
496 }
497 
__blk_bvec_map_sg(struct bio_vec bv,struct scatterlist * sglist,struct scatterlist ** sg)498 static inline int __blk_bvec_map_sg(struct bio_vec bv,
499 		struct scatterlist *sglist, struct scatterlist **sg)
500 {
501 	*sg = blk_next_sg(sg, sglist);
502 	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
503 	return 1;
504 }
505 
506 /* only try to merge bvecs into one sg if they are from two bios */
507 static inline bool
__blk_segment_map_sg_merge(struct request_queue * q,struct bio_vec * bvec,struct bio_vec * bvprv,struct scatterlist ** sg)508 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
509 			   struct bio_vec *bvprv, struct scatterlist **sg)
510 {
511 
512 	int nbytes = bvec->bv_len;
513 
514 	if (!*sg)
515 		return false;
516 
517 	if ((*sg)->length + nbytes > queue_max_segment_size(q))
518 		return false;
519 
520 	if (!biovec_phys_mergeable(q, bvprv, bvec))
521 		return false;
522 
523 	(*sg)->length += nbytes;
524 
525 	return true;
526 }
527 
__blk_bios_map_sg(struct request_queue * q,struct bio * bio,struct scatterlist * sglist,struct scatterlist ** sg)528 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
529 			     struct scatterlist *sglist,
530 			     struct scatterlist **sg)
531 {
532 	struct bio_vec bvec, bvprv = { NULL };
533 	struct bvec_iter iter;
534 	int nsegs = 0;
535 	bool new_bio = false;
536 
537 	for_each_bio(bio) {
538 		bio_for_each_bvec(bvec, bio, iter) {
539 			/*
540 			 * Only try to merge bvecs from two bios given we
541 			 * have done bio internal merge when adding pages
542 			 * to bio
543 			 */
544 			if (new_bio &&
545 			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
546 				goto next_bvec;
547 
548 			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE &&
549 			    (!blk_queue_sub_page_limits(&q->limits) ||
550 			     bvec.bv_len <= q->limits.max_segment_size))
551 				/* single-segment bvec optimization */
552 				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
553 			else
554 				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
555  next_bvec:
556 			new_bio = false;
557 		}
558 		if (likely(bio->bi_iter.bi_size)) {
559 			bvprv = bvec;
560 			new_bio = true;
561 		}
562 	}
563 
564 	return nsegs;
565 }
566 
567 /*
568  * map a request to scatterlist, return number of sg entries setup. Caller
569  * must make sure sg can hold rq->nr_phys_segments entries
570  */
__blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist,struct scatterlist ** last_sg)571 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
572 		struct scatterlist *sglist, struct scatterlist **last_sg)
573 {
574 	int nsegs = 0;
575 
576 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
577 		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
578 	else if (rq->bio)
579 		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
580 
581 	if (*last_sg)
582 		sg_mark_end(*last_sg);
583 
584 	/*
585 	 * Something must have been wrong if the figured number of
586 	 * segment is bigger than number of req's physical segments
587 	 */
588 	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
589 
590 	return nsegs;
591 }
592 EXPORT_SYMBOL(__blk_rq_map_sg);
593 
blk_rq_get_max_sectors(struct request * rq,sector_t offset)594 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
595 						  sector_t offset)
596 {
597 	struct request_queue *q = rq->q;
598 	unsigned int max_sectors;
599 
600 	if (blk_rq_is_passthrough(rq))
601 		return q->limits.max_hw_sectors;
602 
603 	max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
604 	if (!q->limits.chunk_sectors ||
605 	    req_op(rq) == REQ_OP_DISCARD ||
606 	    req_op(rq) == REQ_OP_SECURE_ERASE)
607 		return max_sectors;
608 	return min(max_sectors,
609 		   blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
610 }
611 
ll_new_hw_segment(struct request * req,struct bio * bio,unsigned int nr_phys_segs)612 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
613 		unsigned int nr_phys_segs)
614 {
615 	if (!blk_cgroup_mergeable(req, bio))
616 		goto no_merge;
617 
618 	if (blk_integrity_merge_bio(req->q, req, bio) == false)
619 		goto no_merge;
620 
621 	/* discard request merge won't add new segment */
622 	if (req_op(req) == REQ_OP_DISCARD)
623 		return 1;
624 
625 	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
626 		goto no_merge;
627 
628 	/*
629 	 * This will form the start of a new hw segment.  Bump both
630 	 * counters.
631 	 */
632 	req->nr_phys_segments += nr_phys_segs;
633 	return 1;
634 
635 no_merge:
636 	req_set_nomerge(req->q, req);
637 	return 0;
638 }
639 
ll_back_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)640 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
641 {
642 	if (req_gap_back_merge(req, bio))
643 		return 0;
644 	if (blk_integrity_rq(req) &&
645 	    integrity_req_gap_back_merge(req, bio))
646 		return 0;
647 	if (!bio_crypt_ctx_back_mergeable(req, bio))
648 		return 0;
649 	if (blk_rq_sectors(req) + bio_sectors(bio) >
650 	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
651 		req_set_nomerge(req->q, req);
652 		return 0;
653 	}
654 
655 	return ll_new_hw_segment(req, bio, nr_segs);
656 }
657 
ll_front_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)658 static int ll_front_merge_fn(struct request *req, struct bio *bio,
659 		unsigned int nr_segs)
660 {
661 	if (req_gap_front_merge(req, bio))
662 		return 0;
663 	if (blk_integrity_rq(req) &&
664 	    integrity_req_gap_front_merge(req, bio))
665 		return 0;
666 	if (!bio_crypt_ctx_front_mergeable(req, bio))
667 		return 0;
668 	if (blk_rq_sectors(req) + bio_sectors(bio) >
669 	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
670 		req_set_nomerge(req->q, req);
671 		return 0;
672 	}
673 
674 	return ll_new_hw_segment(req, bio, nr_segs);
675 }
676 
req_attempt_discard_merge(struct request_queue * q,struct request * req,struct request * next)677 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
678 		struct request *next)
679 {
680 	unsigned short segments = blk_rq_nr_discard_segments(req);
681 
682 	if (segments >= queue_max_discard_segments(q))
683 		goto no_merge;
684 	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
685 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
686 		goto no_merge;
687 
688 	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
689 	return true;
690 no_merge:
691 	req_set_nomerge(q, req);
692 	return false;
693 }
694 
ll_merge_requests_fn(struct request_queue * q,struct request * req,struct request * next)695 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
696 				struct request *next)
697 {
698 	int total_phys_segments;
699 
700 	if (req_gap_back_merge(req, next->bio))
701 		return 0;
702 
703 	/*
704 	 * Will it become too large?
705 	 */
706 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
707 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
708 		return 0;
709 
710 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
711 	if (total_phys_segments > blk_rq_get_max_segments(req))
712 		return 0;
713 
714 	if (!blk_cgroup_mergeable(req, next->bio))
715 		return 0;
716 
717 	if (blk_integrity_merge_rq(q, req, next) == false)
718 		return 0;
719 
720 	if (!bio_crypt_ctx_merge_rq(req, next))
721 		return 0;
722 
723 	/* Merge is OK... */
724 	req->nr_phys_segments = total_phys_segments;
725 	return 1;
726 }
727 
728 /**
729  * blk_rq_set_mixed_merge - mark a request as mixed merge
730  * @rq: request to mark as mixed merge
731  *
732  * Description:
733  *     @rq is about to be mixed merged.  Make sure the attributes
734  *     which can be mixed are set in each bio and mark @rq as mixed
735  *     merged.
736  */
blk_rq_set_mixed_merge(struct request * rq)737 void blk_rq_set_mixed_merge(struct request *rq)
738 {
739 	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
740 	struct bio *bio;
741 
742 	if (rq->rq_flags & RQF_MIXED_MERGE)
743 		return;
744 
745 	/*
746 	 * @rq will no longer represent mixable attributes for all the
747 	 * contained bios.  It will just track those of the first one.
748 	 * Distributes the attributs to each bio.
749 	 */
750 	for (bio = rq->bio; bio; bio = bio->bi_next) {
751 		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
752 			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
753 		bio->bi_opf |= ff;
754 	}
755 	rq->rq_flags |= RQF_MIXED_MERGE;
756 }
757 
bio_failfast(const struct bio * bio)758 static inline blk_opf_t bio_failfast(const struct bio *bio)
759 {
760 	if (bio->bi_opf & REQ_RAHEAD)
761 		return REQ_FAILFAST_MASK;
762 
763 	return bio->bi_opf & REQ_FAILFAST_MASK;
764 }
765 
766 /*
767  * After we are marked as MIXED_MERGE, any new RA bio has to be updated
768  * as failfast, and request's failfast has to be updated in case of
769  * front merge.
770  */
blk_update_mixed_merge(struct request * req,struct bio * bio,bool front_merge)771 static inline void blk_update_mixed_merge(struct request *req,
772 		struct bio *bio, bool front_merge)
773 {
774 	if (req->rq_flags & RQF_MIXED_MERGE) {
775 		if (bio->bi_opf & REQ_RAHEAD)
776 			bio->bi_opf |= REQ_FAILFAST_MASK;
777 
778 		if (front_merge) {
779 			req->cmd_flags &= ~REQ_FAILFAST_MASK;
780 			req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
781 		}
782 	}
783 }
784 
blk_account_io_merge_request(struct request * req)785 static void blk_account_io_merge_request(struct request *req)
786 {
787 	if (blk_do_io_stat(req)) {
788 		part_stat_lock();
789 		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
790 		part_stat_local_dec(req->part,
791 				    in_flight[op_is_write(req_op(req))]);
792 		part_stat_unlock();
793 	}
794 }
795 
blk_try_req_merge(struct request * req,struct request * next)796 static enum elv_merge blk_try_req_merge(struct request *req,
797 					struct request *next)
798 {
799 	if (blk_discard_mergable(req))
800 		return ELEVATOR_DISCARD_MERGE;
801 	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
802 		return ELEVATOR_BACK_MERGE;
803 
804 	return ELEVATOR_NO_MERGE;
805 }
806 
807 /*
808  * For non-mq, this has to be called with the request spinlock acquired.
809  * For mq with scheduling, the appropriate queue wide lock should be held.
810  */
attempt_merge(struct request_queue * q,struct request * req,struct request * next)811 static struct request *attempt_merge(struct request_queue *q,
812 				     struct request *req, struct request *next)
813 {
814 	if (!rq_mergeable(req) || !rq_mergeable(next))
815 		return NULL;
816 
817 	if (req_op(req) != req_op(next))
818 		return NULL;
819 
820 	if (rq_data_dir(req) != rq_data_dir(next))
821 		return NULL;
822 
823 	/* Don't merge requests with different write hints. */
824 	if (req->write_hint != next->write_hint)
825 		return NULL;
826 
827 	if (req->ioprio != next->ioprio)
828 		return NULL;
829 
830 	/*
831 	 * If we are allowed to merge, then append bio list
832 	 * from next to rq and release next. merge_requests_fn
833 	 * will have updated segment counts, update sector
834 	 * counts here. Handle DISCARDs separately, as they
835 	 * have separate settings.
836 	 */
837 
838 	switch (blk_try_req_merge(req, next)) {
839 	case ELEVATOR_DISCARD_MERGE:
840 		if (!req_attempt_discard_merge(q, req, next))
841 			return NULL;
842 		break;
843 	case ELEVATOR_BACK_MERGE:
844 		if (!ll_merge_requests_fn(q, req, next))
845 			return NULL;
846 		break;
847 	default:
848 		return NULL;
849 	}
850 
851 	/*
852 	 * If failfast settings disagree or any of the two is already
853 	 * a mixed merge, mark both as mixed before proceeding.  This
854 	 * makes sure that all involved bios have mixable attributes
855 	 * set properly.
856 	 */
857 	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
858 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
859 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
860 		blk_rq_set_mixed_merge(req);
861 		blk_rq_set_mixed_merge(next);
862 	}
863 
864 	/*
865 	 * At this point we have either done a back merge or front merge. We
866 	 * need the smaller start_time_ns of the merged requests to be the
867 	 * current request for accounting purposes.
868 	 */
869 	if (next->start_time_ns < req->start_time_ns)
870 		req->start_time_ns = next->start_time_ns;
871 
872 	req->biotail->bi_next = next->bio;
873 	req->biotail = next->biotail;
874 
875 	req->__data_len += blk_rq_bytes(next);
876 
877 	if (!blk_discard_mergable(req))
878 		elv_merge_requests(q, req, next);
879 
880 	blk_crypto_rq_put_keyslot(next);
881 
882 	/*
883 	 * 'next' is going away, so update stats accordingly
884 	 */
885 	blk_account_io_merge_request(next);
886 
887 	trace_block_rq_merge(next);
888 
889 	/*
890 	 * ownership of bio passed from next to req, return 'next' for
891 	 * the caller to free
892 	 */
893 	next->bio = NULL;
894 	return next;
895 }
896 
attempt_back_merge(struct request_queue * q,struct request * rq)897 static struct request *attempt_back_merge(struct request_queue *q,
898 		struct request *rq)
899 {
900 	struct request *next = elv_latter_request(q, rq);
901 
902 	if (next)
903 		return attempt_merge(q, rq, next);
904 
905 	return NULL;
906 }
907 
attempt_front_merge(struct request_queue * q,struct request * rq)908 static struct request *attempt_front_merge(struct request_queue *q,
909 		struct request *rq)
910 {
911 	struct request *prev = elv_former_request(q, rq);
912 
913 	if (prev)
914 		return attempt_merge(q, prev, rq);
915 
916 	return NULL;
917 }
918 
919 /*
920  * Try to merge 'next' into 'rq'. Return true if the merge happened, false
921  * otherwise. The caller is responsible for freeing 'next' if the merge
922  * happened.
923  */
blk_attempt_req_merge(struct request_queue * q,struct request * rq,struct request * next)924 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
925 			   struct request *next)
926 {
927 	return attempt_merge(q, rq, next);
928 }
929 
blk_rq_merge_ok(struct request * rq,struct bio * bio)930 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
931 {
932 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
933 		return false;
934 
935 	if (req_op(rq) != bio_op(bio))
936 		return false;
937 
938 	/* different data direction or already started, don't merge */
939 	if (bio_data_dir(bio) != rq_data_dir(rq))
940 		return false;
941 
942 	/* don't merge across cgroup boundaries */
943 	if (!blk_cgroup_mergeable(rq, bio))
944 		return false;
945 
946 	/* only merge integrity protected bio into ditto rq */
947 	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
948 		return false;
949 
950 	/* Only merge if the crypt contexts are compatible */
951 	if (!bio_crypt_rq_ctx_compatible(rq, bio))
952 		return false;
953 
954 	/* Don't merge requests with different write hints. */
955 	if (rq->write_hint != bio->bi_write_hint)
956 		return false;
957 
958 	if (rq->ioprio != bio_prio(bio))
959 		return false;
960 
961 	return true;
962 }
963 
blk_try_merge(struct request * rq,struct bio * bio)964 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
965 {
966 	if (blk_discard_mergable(rq))
967 		return ELEVATOR_DISCARD_MERGE;
968 	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
969 		return ELEVATOR_BACK_MERGE;
970 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
971 		return ELEVATOR_FRONT_MERGE;
972 	return ELEVATOR_NO_MERGE;
973 }
974 
blk_account_io_merge_bio(struct request * req)975 static void blk_account_io_merge_bio(struct request *req)
976 {
977 	if (!blk_do_io_stat(req))
978 		return;
979 
980 	part_stat_lock();
981 	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
982 	part_stat_unlock();
983 }
984 
985 enum bio_merge_status {
986 	BIO_MERGE_OK,
987 	BIO_MERGE_NONE,
988 	BIO_MERGE_FAILED,
989 };
990 
bio_attempt_back_merge(struct request * req,struct bio * bio,unsigned int nr_segs)991 static enum bio_merge_status bio_attempt_back_merge(struct request *req,
992 		struct bio *bio, unsigned int nr_segs)
993 {
994 	const blk_opf_t ff = bio_failfast(bio);
995 
996 	if (!ll_back_merge_fn(req, bio, nr_segs))
997 		return BIO_MERGE_FAILED;
998 
999 	trace_block_bio_backmerge(bio);
1000 	rq_qos_merge(req->q, req, bio);
1001 
1002 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1003 		blk_rq_set_mixed_merge(req);
1004 
1005 	blk_update_mixed_merge(req, bio, false);
1006 
1007 	req->biotail->bi_next = bio;
1008 	req->biotail = bio;
1009 	req->__data_len += bio->bi_iter.bi_size;
1010 
1011 	bio_crypt_free_ctx(bio);
1012 
1013 	blk_account_io_merge_bio(req);
1014 	return BIO_MERGE_OK;
1015 }
1016 
bio_attempt_front_merge(struct request * req,struct bio * bio,unsigned int nr_segs)1017 static enum bio_merge_status bio_attempt_front_merge(struct request *req,
1018 		struct bio *bio, unsigned int nr_segs)
1019 {
1020 	const blk_opf_t ff = bio_failfast(bio);
1021 
1022 	if (!ll_front_merge_fn(req, bio, nr_segs))
1023 		return BIO_MERGE_FAILED;
1024 
1025 	trace_block_bio_frontmerge(bio);
1026 	rq_qos_merge(req->q, req, bio);
1027 
1028 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1029 		blk_rq_set_mixed_merge(req);
1030 
1031 	blk_update_mixed_merge(req, bio, true);
1032 
1033 	bio->bi_next = req->bio;
1034 	req->bio = bio;
1035 
1036 	req->__sector = bio->bi_iter.bi_sector;
1037 	req->__data_len += bio->bi_iter.bi_size;
1038 
1039 	bio_crypt_do_front_merge(req, bio);
1040 
1041 	blk_account_io_merge_bio(req);
1042 	return BIO_MERGE_OK;
1043 }
1044 
bio_attempt_discard_merge(struct request_queue * q,struct request * req,struct bio * bio)1045 static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
1046 		struct request *req, struct bio *bio)
1047 {
1048 	unsigned short segments = blk_rq_nr_discard_segments(req);
1049 
1050 	if (segments >= queue_max_discard_segments(q))
1051 		goto no_merge;
1052 	if (blk_rq_sectors(req) + bio_sectors(bio) >
1053 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1054 		goto no_merge;
1055 
1056 	rq_qos_merge(q, req, bio);
1057 
1058 	req->biotail->bi_next = bio;
1059 	req->biotail = bio;
1060 	req->__data_len += bio->bi_iter.bi_size;
1061 	req->nr_phys_segments = segments + 1;
1062 
1063 	blk_account_io_merge_bio(req);
1064 	return BIO_MERGE_OK;
1065 no_merge:
1066 	req_set_nomerge(q, req);
1067 	return BIO_MERGE_FAILED;
1068 }
1069 
blk_attempt_bio_merge(struct request_queue * q,struct request * rq,struct bio * bio,unsigned int nr_segs,bool sched_allow_merge)1070 static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1071 						   struct request *rq,
1072 						   struct bio *bio,
1073 						   unsigned int nr_segs,
1074 						   bool sched_allow_merge)
1075 {
1076 	if (!blk_rq_merge_ok(rq, bio))
1077 		return BIO_MERGE_NONE;
1078 
1079 	switch (blk_try_merge(rq, bio)) {
1080 	case ELEVATOR_BACK_MERGE:
1081 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1082 			return bio_attempt_back_merge(rq, bio, nr_segs);
1083 		break;
1084 	case ELEVATOR_FRONT_MERGE:
1085 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1086 			return bio_attempt_front_merge(rq, bio, nr_segs);
1087 		break;
1088 	case ELEVATOR_DISCARD_MERGE:
1089 		return bio_attempt_discard_merge(q, rq, bio);
1090 	default:
1091 		return BIO_MERGE_NONE;
1092 	}
1093 
1094 	return BIO_MERGE_FAILED;
1095 }
1096 
1097 /**
1098  * blk_attempt_plug_merge - try to merge with %current's plugged list
1099  * @q: request_queue new bio is being queued at
1100  * @bio: new bio being queued
1101  * @nr_segs: number of segments in @bio
1102  * from the passed in @q already in the plug list
1103  *
1104  * Determine whether @bio being queued on @q can be merged with the previous
1105  * request on %current's plugged list.  Returns %true if merge was successful,
1106  * otherwise %false.
1107  *
1108  * Plugging coalesces IOs from the same issuer for the same purpose without
1109  * going through @q->queue_lock.  As such it's more of an issuing mechanism
1110  * than scheduling, and the request, while may have elvpriv data, is not
1111  * added on the elevator at this point.  In addition, we don't have
1112  * reliable access to the elevator outside queue lock.  Only check basic
1113  * merging parameters without querying the elevator.
1114  *
1115  * Caller must ensure !blk_queue_nomerges(q) beforehand.
1116  */
blk_attempt_plug_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)1117 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1118 		unsigned int nr_segs)
1119 {
1120 	struct blk_plug *plug;
1121 	struct request *rq;
1122 
1123 	plug = blk_mq_plug(bio);
1124 	if (!plug || rq_list_empty(plug->mq_list))
1125 		return false;
1126 
1127 	rq_list_for_each(&plug->mq_list, rq) {
1128 		if (rq->q == q) {
1129 			if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1130 			    BIO_MERGE_OK)
1131 				return true;
1132 			break;
1133 		}
1134 
1135 		/*
1136 		 * Only keep iterating plug list for merges if we have multiple
1137 		 * queues
1138 		 */
1139 		if (!plug->multiple_queues)
1140 			break;
1141 	}
1142 	return false;
1143 }
1144 
1145 /*
1146  * Iterate list of requests and see if we can merge this bio with any
1147  * of them.
1148  */
blk_bio_list_merge(struct request_queue * q,struct list_head * list,struct bio * bio,unsigned int nr_segs)1149 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1150 			struct bio *bio, unsigned int nr_segs)
1151 {
1152 	struct request *rq;
1153 	int checked = 8;
1154 
1155 	list_for_each_entry_reverse(rq, list, queuelist) {
1156 		if (!checked--)
1157 			break;
1158 
1159 		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1160 		case BIO_MERGE_NONE:
1161 			continue;
1162 		case BIO_MERGE_OK:
1163 			return true;
1164 		case BIO_MERGE_FAILED:
1165 			return false;
1166 		}
1167 
1168 	}
1169 
1170 	return false;
1171 }
1172 EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1173 
blk_mq_sched_try_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs,struct request ** merged_request)1174 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1175 		unsigned int nr_segs, struct request **merged_request)
1176 {
1177 	struct request *rq;
1178 
1179 	switch (elv_merge(q, &rq, bio)) {
1180 	case ELEVATOR_BACK_MERGE:
1181 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1182 			return false;
1183 		if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1184 			return false;
1185 		*merged_request = attempt_back_merge(q, rq);
1186 		if (!*merged_request)
1187 			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1188 		return true;
1189 	case ELEVATOR_FRONT_MERGE:
1190 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1191 			return false;
1192 		if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1193 			return false;
1194 		*merged_request = attempt_front_merge(q, rq);
1195 		if (!*merged_request)
1196 			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1197 		return true;
1198 	case ELEVATOR_DISCARD_MERGE:
1199 		return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1200 	default:
1201 		return false;
1202 	}
1203 }
1204 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1205