• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to segment and merge handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/scatterlist.h>
11 #include <linux/part_stat.h>
12 #include <linux/blk-cgroup.h>
13 
14 #include <trace/events/block.h>
15 
16 #include "blk.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-throttle.h"
20 
bio_get_first_bvec(struct bio * bio,struct bio_vec * bv)21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
22 {
23 	*bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
24 }
25 
bio_get_last_bvec(struct bio * bio,struct bio_vec * bv)26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
27 {
28 	struct bvec_iter iter = bio->bi_iter;
29 	int idx;
30 
31 	bio_get_first_bvec(bio, bv);
32 	if (bv->bv_len == bio->bi_iter.bi_size)
33 		return;		/* this bio only has a single bvec */
34 
35 	bio_advance_iter(bio, &iter, iter.bi_size);
36 
37 	if (!iter.bi_bvec_done)
38 		idx = iter.bi_idx - 1;
39 	else	/* in the middle of bvec */
40 		idx = iter.bi_idx;
41 
42 	*bv = bio->bi_io_vec[idx];
43 
44 	/*
45 	 * iter.bi_bvec_done records actual length of the last bvec
46 	 * if this bio ends in the middle of one io vector
47 	 */
48 	if (iter.bi_bvec_done)
49 		bv->bv_len = iter.bi_bvec_done;
50 }
51 
bio_will_gap(struct request_queue * q,struct request * prev_rq,struct bio * prev,struct bio * next)52 static inline bool bio_will_gap(struct request_queue *q,
53 		struct request *prev_rq, struct bio *prev, struct bio *next)
54 {
55 	struct bio_vec pb, nb;
56 
57 	if (!bio_has_data(prev) || !queue_virt_boundary(q))
58 		return false;
59 
60 	/*
61 	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
62 	 * is quite difficult to respect the sg gap limit.  We work hard to
63 	 * merge a huge number of small single bios in case of mkfs.
64 	 */
65 	if (prev_rq)
66 		bio_get_first_bvec(prev_rq->bio, &pb);
67 	else
68 		bio_get_first_bvec(prev, &pb);
69 	if (pb.bv_offset & queue_virt_boundary(q))
70 		return true;
71 
72 	/*
73 	 * We don't need to worry about the situation that the merged segment
74 	 * ends in unaligned virt boundary:
75 	 *
76 	 * - if 'pb' ends aligned, the merged segment ends aligned
77 	 * - if 'pb' ends unaligned, the next bio must include
78 	 *   one single bvec of 'nb', otherwise the 'nb' can't
79 	 *   merge with 'pb'
80 	 */
81 	bio_get_last_bvec(prev, &pb);
82 	bio_get_first_bvec(next, &nb);
83 	if (biovec_phys_mergeable(q, &pb, &nb))
84 		return false;
85 	return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
86 }
87 
req_gap_back_merge(struct request * req,struct bio * bio)88 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
89 {
90 	return bio_will_gap(req->q, req, req->biotail, bio);
91 }
92 
req_gap_front_merge(struct request * req,struct bio * bio)93 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
94 {
95 	return bio_will_gap(req->q, NULL, bio, req->bio);
96 }
97 
98 /*
99  * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
100  * is defined as 'unsigned int', meantime it has to be aligned to with the
101  * logical block size, which is the minimum accepted unit by hardware.
102  */
bio_allowed_max_sectors(const struct queue_limits * lim)103 static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
104 {
105 	return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
106 }
107 
bio_submit_split(struct bio * bio,int split_sectors)108 static struct bio *bio_submit_split(struct bio *bio, int split_sectors)
109 {
110 	if (unlikely(split_sectors < 0)) {
111 		bio->bi_status = errno_to_blk_status(split_sectors);
112 		bio_endio(bio);
113 		return NULL;
114 	}
115 
116 	if (split_sectors) {
117 		struct bio *split;
118 
119 		split = bio_split(bio, split_sectors, GFP_NOIO,
120 				&bio->bi_bdev->bd_disk->bio_split);
121 		split->bi_opf |= REQ_NOMERGE;
122 		blkcg_bio_issue_init(split);
123 		bio_chain(split, bio);
124 		trace_block_split(split, bio->bi_iter.bi_sector);
125 		WARN_ON_ONCE(bio_zone_write_plugging(bio));
126 		submit_bio_noacct(bio);
127 		return split;
128 	}
129 
130 	return bio;
131 }
132 
bio_split_discard(struct bio * bio,const struct queue_limits * lim,unsigned * nsegs)133 struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
134 		unsigned *nsegs)
135 {
136 	unsigned int max_discard_sectors, granularity;
137 	sector_t tmp;
138 	unsigned split_sectors;
139 
140 	*nsegs = 1;
141 
142 	granularity = max(lim->discard_granularity >> 9, 1U);
143 
144 	max_discard_sectors =
145 		min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
146 	max_discard_sectors -= max_discard_sectors % granularity;
147 	if (unlikely(!max_discard_sectors))
148 		return bio;
149 
150 	if (bio_sectors(bio) <= max_discard_sectors)
151 		return bio;
152 
153 	split_sectors = max_discard_sectors;
154 
155 	/*
156 	 * If the next starting sector would be misaligned, stop the discard at
157 	 * the previous aligned sector.
158 	 */
159 	tmp = bio->bi_iter.bi_sector + split_sectors -
160 		((lim->discard_alignment >> 9) % granularity);
161 	tmp = sector_div(tmp, granularity);
162 
163 	if (split_sectors > tmp)
164 		split_sectors -= tmp;
165 
166 	return bio_submit_split(bio, split_sectors);
167 }
168 
blk_boundary_sectors(const struct queue_limits * lim,bool is_atomic)169 static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim,
170 						bool is_atomic)
171 {
172 	/*
173 	 * chunk_sectors must be a multiple of atomic_write_boundary_sectors if
174 	 * both non-zero.
175 	 */
176 	if (is_atomic && lim->atomic_write_boundary_sectors)
177 		return lim->atomic_write_boundary_sectors;
178 
179 	return lim->chunk_sectors;
180 }
181 
182 /*
183  * Return the maximum number of sectors from the start of a bio that may be
184  * submitted as a single request to a block device. If enough sectors remain,
185  * align the end to the physical block size. Otherwise align the end to the
186  * logical block size. This approach minimizes the number of non-aligned
187  * requests that are submitted to a block device if the start of a bio is not
188  * aligned to a physical block boundary.
189  */
get_max_io_size(struct bio * bio,const struct queue_limits * lim)190 static inline unsigned get_max_io_size(struct bio *bio,
191 				       const struct queue_limits *lim)
192 {
193 	unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
194 	unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
195 	bool is_atomic = bio->bi_opf & REQ_ATOMIC;
196 	unsigned boundary_sectors = blk_boundary_sectors(lim, is_atomic);
197 	unsigned max_sectors, start, end;
198 
199 	/*
200 	 * We ignore lim->max_sectors for atomic writes because it may less
201 	 * than the actual bio size, which we cannot tolerate.
202 	 */
203 	if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
204 		max_sectors = lim->max_write_zeroes_sectors;
205 	else if (is_atomic)
206 		max_sectors = lim->atomic_write_max_sectors;
207 	else
208 		max_sectors = lim->max_sectors;
209 
210 	if (boundary_sectors) {
211 		max_sectors = min(max_sectors,
212 			blk_boundary_sectors_left(bio->bi_iter.bi_sector,
213 					      boundary_sectors));
214 	}
215 
216 	start = bio->bi_iter.bi_sector & (pbs - 1);
217 	end = (start + max_sectors) & ~(pbs - 1);
218 	if (end > start)
219 		return end - start;
220 	return max_sectors & ~(lbs - 1);
221 }
222 
223 /**
224  * get_max_segment_size() - maximum number of bytes to add as a single segment
225  * @lim: Request queue limits.
226  * @paddr: address of the range to add
227  * @len: maximum length available to add at @paddr
228  *
229  * Returns the maximum number of bytes of the range starting at @paddr that can
230  * be added to a single segment.
231  */
get_max_segment_size(const struct queue_limits * lim,phys_addr_t paddr,unsigned int len)232 static inline unsigned get_max_segment_size(const struct queue_limits *lim,
233 		phys_addr_t paddr, unsigned int len)
234 {
235 	/*
236 	 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
237 	 * after having calculated the minimum.
238 	 */
239 	return min_t(unsigned long, len,
240 		min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr),
241 		    (unsigned long)lim->max_segment_size - 1) + 1);
242 }
243 
244 /**
245  * bvec_split_segs - verify whether or not a bvec should be split in the middle
246  * @lim:      [in] queue limits to split based on
247  * @bv:       [in] bvec to examine
248  * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
249  *            by the number of segments from @bv that may be appended to that
250  *            bio without exceeding @max_segs
251  * @bytes:    [in,out] Number of bytes in the bio being built. Incremented
252  *            by the number of bytes from @bv that may be appended to that
253  *            bio without exceeding @max_bytes
254  * @max_segs: [in] upper bound for *@nsegs
255  * @max_bytes: [in] upper bound for *@bytes
256  *
257  * When splitting a bio, it can happen that a bvec is encountered that is too
258  * big to fit in a single segment and hence that it has to be split in the
259  * middle. This function verifies whether or not that should happen. The value
260  * %true is returned if and only if appending the entire @bv to a bio with
261  * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
262  * the block driver.
263  */
bvec_split_segs(const struct queue_limits * lim,const struct bio_vec * bv,unsigned * nsegs,unsigned * bytes,unsigned max_segs,unsigned max_bytes)264 static bool bvec_split_segs(const struct queue_limits *lim,
265 		const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
266 		unsigned max_segs, unsigned max_bytes)
267 {
268 	unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
269 	unsigned len = min(bv->bv_len, max_len);
270 	unsigned total_len = 0;
271 	unsigned seg_size = 0;
272 
273 	while (len && *nsegs < max_segs) {
274 		seg_size = get_max_segment_size(lim, bvec_phys(bv) + total_len, len);
275 
276 		(*nsegs)++;
277 		total_len += seg_size;
278 		len -= seg_size;
279 
280 		if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
281 			break;
282 	}
283 
284 	*bytes += total_len;
285 
286 	/* tell the caller to split the bvec if it is too big to fit */
287 	return len > 0 || bv->bv_len > max_len;
288 }
289 
bio_split_alignment(struct bio * bio,const struct queue_limits * lim)290 static unsigned int bio_split_alignment(struct bio *bio,
291 		const struct queue_limits *lim)
292 {
293 	if (op_is_write(bio_op(bio)) && lim->zone_write_granularity)
294 		return lim->zone_write_granularity;
295 	return lim->logical_block_size;
296 }
297 
298 /**
299  * bio_split_rw_at - check if and where to split a read/write bio
300  * @bio:  [in] bio to be split
301  * @lim:  [in] queue limits to split based on
302  * @segs: [out] number of segments in the bio with the first half of the sectors
303  * @max_bytes: [in] maximum number of bytes per bio
304  *
305  * Find out if @bio needs to be split to fit the queue limits in @lim and a
306  * maximum size of @max_bytes.  Returns a negative error number if @bio can't be
307  * split, 0 if the bio doesn't have to be split, or a positive sector offset if
308  * @bio needs to be split.
309  */
bio_split_rw_at(struct bio * bio,const struct queue_limits * lim,unsigned * segs,unsigned max_bytes)310 int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim,
311 		unsigned *segs, unsigned max_bytes)
312 {
313 	struct bio_vec bv, bvprv, *bvprvp = NULL;
314 	struct bvec_iter iter;
315 	unsigned nsegs = 0, bytes = 0;
316 
317 	bio_for_each_bvec(bv, bio, iter) {
318 		/*
319 		 * If the queue doesn't support SG gaps and adding this
320 		 * offset would create a gap, disallow it.
321 		 */
322 		if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
323 			goto split;
324 
325 		if (nsegs < lim->max_segments &&
326 		    bytes + bv.bv_len <= max_bytes &&
327 		    bv.bv_offset + bv.bv_len <= lim->min_segment_size) {
328 			nsegs++;
329 			bytes += bv.bv_len;
330 		} else {
331 			if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
332 					lim->max_segments, max_bytes))
333 				goto split;
334 		}
335 
336 		bvprv = bv;
337 		bvprvp = &bvprv;
338 	}
339 
340 	*segs = nsegs;
341 	return 0;
342 split:
343 	if (bio->bi_opf & REQ_ATOMIC)
344 		return -EINVAL;
345 
346 	/*
347 	 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
348 	 * with EAGAIN if splitting is required and return an error pointer.
349 	 */
350 	if (bio->bi_opf & REQ_NOWAIT)
351 		return -EAGAIN;
352 
353 	*segs = nsegs;
354 
355 	/*
356 	 * Individual bvecs might not be logical block aligned. Round down the
357 	 * split size so that each bio is properly block size aligned, even if
358 	 * we do not use the full hardware limits.
359 	 */
360 	bytes = ALIGN_DOWN(bytes, bio_split_alignment(bio, lim));
361 
362 	/*
363 	 * Bio splitting may cause subtle trouble such as hang when doing sync
364 	 * iopoll in direct IO routine. Given performance gain of iopoll for
365 	 * big IO can be trival, disable iopoll when split needed.
366 	 */
367 	bio_clear_polled(bio);
368 	return bytes >> SECTOR_SHIFT;
369 }
370 EXPORT_SYMBOL_GPL(bio_split_rw_at);
371 
bio_split_rw(struct bio * bio,const struct queue_limits * lim,unsigned * nr_segs)372 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
373 		unsigned *nr_segs)
374 {
375 	return bio_submit_split(bio,
376 		bio_split_rw_at(bio, lim, nr_segs,
377 			get_max_io_size(bio, lim) << SECTOR_SHIFT));
378 }
379 
380 /*
381  * REQ_OP_ZONE_APPEND bios must never be split by the block layer.
382  *
383  * But we want the nr_segs calculation provided by bio_split_rw_at, and having
384  * a good sanity check that the submitter built the bio correctly is nice to
385  * have as well.
386  */
bio_split_zone_append(struct bio * bio,const struct queue_limits * lim,unsigned * nr_segs)387 struct bio *bio_split_zone_append(struct bio *bio,
388 		const struct queue_limits *lim, unsigned *nr_segs)
389 {
390 	unsigned int max_sectors = queue_limits_max_zone_append_sectors(lim);
391 	int split_sectors;
392 
393 	split_sectors = bio_split_rw_at(bio, lim, nr_segs,
394 			max_sectors << SECTOR_SHIFT);
395 	if (WARN_ON_ONCE(split_sectors > 0))
396 		split_sectors = -EINVAL;
397 	return bio_submit_split(bio, split_sectors);
398 }
399 
bio_split_write_zeroes(struct bio * bio,const struct queue_limits * lim,unsigned * nsegs)400 struct bio *bio_split_write_zeroes(struct bio *bio,
401 		const struct queue_limits *lim, unsigned *nsegs)
402 {
403 	unsigned int max_sectors = get_max_io_size(bio, lim);
404 
405 	*nsegs = 0;
406 
407 	/*
408 	 * An unset limit should normally not happen, as bio submission is keyed
409 	 * off having a non-zero limit.  But SCSI can clear the limit in the
410 	 * I/O completion handler, and we can race and see this.  Splitting to a
411 	 * zero limit obviously doesn't make sense, so band-aid it here.
412 	 */
413 	if (!max_sectors)
414 		return bio;
415 	if (bio_sectors(bio) <= max_sectors)
416 		return bio;
417 	return bio_submit_split(bio, max_sectors);
418 }
419 
420 /**
421  * bio_split_to_limits - split a bio to fit the queue limits
422  * @bio:     bio to be split
423  *
424  * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
425  * if so split off a bio fitting the limits from the beginning of @bio and
426  * return it.  @bio is shortened to the remainder and re-submitted.
427  *
428  * The split bio is allocated from @q->bio_split, which is provided by the
429  * block layer.
430  */
bio_split_to_limits(struct bio * bio)431 struct bio *bio_split_to_limits(struct bio *bio)
432 {
433 	const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
434 	unsigned int nr_segs;
435 
436 	return __bio_split_to_limits(bio, lim, &nr_segs);
437 }
438 EXPORT_SYMBOL(bio_split_to_limits);
439 
blk_recalc_rq_segments(struct request * rq)440 unsigned int blk_recalc_rq_segments(struct request *rq)
441 {
442 	unsigned int nr_phys_segs = 0;
443 	unsigned int bytes = 0;
444 	struct req_iterator iter;
445 	struct bio_vec bv;
446 
447 	if (!rq->bio)
448 		return 0;
449 
450 	switch (bio_op(rq->bio)) {
451 	case REQ_OP_DISCARD:
452 	case REQ_OP_SECURE_ERASE:
453 		if (queue_max_discard_segments(rq->q) > 1) {
454 			struct bio *bio = rq->bio;
455 
456 			for_each_bio(bio)
457 				nr_phys_segs++;
458 			return nr_phys_segs;
459 		}
460 		return 1;
461 	case REQ_OP_WRITE_ZEROES:
462 		return 0;
463 	default:
464 		break;
465 	}
466 
467 	rq_for_each_bvec(bv, rq, iter)
468 		bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
469 				UINT_MAX, UINT_MAX);
470 	return nr_phys_segs;
471 }
472 
473 struct phys_vec {
474 	phys_addr_t	paddr;
475 	u32		len;
476 };
477 
blk_map_iter_next(struct request * req,struct req_iterator * iter,struct phys_vec * vec)478 static bool blk_map_iter_next(struct request *req,
479 		struct req_iterator *iter, struct phys_vec *vec)
480 {
481 	unsigned int max_size;
482 	struct bio_vec bv;
483 
484 	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
485 		if (!iter->bio)
486 			return false;
487 		vec->paddr = bvec_phys(&req->special_vec);
488 		vec->len = req->special_vec.bv_len;
489 		iter->bio = NULL;
490 		return true;
491 	}
492 
493 	if (!iter->iter.bi_size)
494 		return false;
495 
496 	bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
497 	vec->paddr = bvec_phys(&bv);
498 	max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
499 	bv.bv_len = min(bv.bv_len, max_size);
500 	bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len);
501 
502 	/*
503 	 * If we are entirely done with this bi_io_vec entry, check if the next
504 	 * one could be merged into it.  This typically happens when moving to
505 	 * the next bio, but some callers also don't pack bvecs tight.
506 	 */
507 	while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) {
508 		struct bio_vec next;
509 
510 		if (!iter->iter.bi_size) {
511 			if (!iter->bio->bi_next)
512 				break;
513 			iter->bio = iter->bio->bi_next;
514 			iter->iter = iter->bio->bi_iter;
515 		}
516 
517 		next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
518 		if (bv.bv_len + next.bv_len > max_size ||
519 		    !biovec_phys_mergeable(req->q, &bv, &next))
520 			break;
521 
522 		bv.bv_len += next.bv_len;
523 		bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len);
524 	}
525 
526 	vec->len = bv.bv_len;
527 	return true;
528 }
529 
blk_next_sg(struct scatterlist ** sg,struct scatterlist * sglist)530 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
531 		struct scatterlist *sglist)
532 {
533 	if (!*sg)
534 		return sglist;
535 
536 	/*
537 	 * If the driver previously mapped a shorter list, we could see a
538 	 * termination bit prematurely unless it fully inits the sg table
539 	 * on each mapping. We KNOW that there must be more entries here
540 	 * or the driver would be buggy, so force clear the termination bit
541 	 * to avoid doing a full sg_init_table() in drivers for each command.
542 	 */
543 	sg_unmark_end(*sg);
544 	return sg_next(*sg);
545 }
546 
547 /*
548  * Map a request to scatterlist, return number of sg entries setup. Caller
549  * must make sure sg can hold rq->nr_phys_segments entries.
550  */
__blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist,struct scatterlist ** last_sg)551 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
552 		struct scatterlist *sglist, struct scatterlist **last_sg)
553 {
554 	struct req_iterator iter = {
555 		.bio	= rq->bio,
556 	};
557 	struct phys_vec vec;
558 	int nsegs = 0;
559 
560 	/* the internal flush request may not have bio attached */
561 	if (iter.bio)
562 		iter.iter = iter.bio->bi_iter;
563 
564 	while (blk_map_iter_next(rq, &iter, &vec)) {
565 		*last_sg = blk_next_sg(last_sg, sglist);
566 		sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
567 				offset_in_page(vec.paddr));
568 		nsegs++;
569 	}
570 
571 	if (*last_sg)
572 		sg_mark_end(*last_sg);
573 
574 	/*
575 	 * Something must have been wrong if the figured number of
576 	 * segment is bigger than number of req's physical segments
577 	 */
578 	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
579 
580 	return nsegs;
581 }
582 EXPORT_SYMBOL(__blk_rq_map_sg);
583 
blk_rq_get_max_sectors(struct request * rq,sector_t offset)584 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
585 						  sector_t offset)
586 {
587 	struct request_queue *q = rq->q;
588 	struct queue_limits *lim = &q->limits;
589 	unsigned int max_sectors, boundary_sectors;
590 	bool is_atomic = rq->cmd_flags & REQ_ATOMIC;
591 
592 	if (blk_rq_is_passthrough(rq))
593 		return q->limits.max_hw_sectors;
594 
595 	boundary_sectors = blk_boundary_sectors(lim, is_atomic);
596 	max_sectors = blk_queue_get_max_sectors(rq);
597 
598 	if (!boundary_sectors ||
599 	    req_op(rq) == REQ_OP_DISCARD ||
600 	    req_op(rq) == REQ_OP_SECURE_ERASE)
601 		return max_sectors;
602 	return min(max_sectors,
603 		   blk_boundary_sectors_left(offset, boundary_sectors));
604 }
605 
ll_new_hw_segment(struct request * req,struct bio * bio,unsigned int nr_phys_segs)606 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
607 		unsigned int nr_phys_segs)
608 {
609 	if (!blk_cgroup_mergeable(req, bio))
610 		goto no_merge;
611 
612 	if (blk_integrity_merge_bio(req->q, req, bio) == false)
613 		goto no_merge;
614 
615 	/* discard request merge won't add new segment */
616 	if (req_op(req) == REQ_OP_DISCARD)
617 		return 1;
618 
619 	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
620 		goto no_merge;
621 
622 	/*
623 	 * This will form the start of a new hw segment.  Bump both
624 	 * counters.
625 	 */
626 	req->nr_phys_segments += nr_phys_segs;
627 	if (bio_integrity(bio))
628 		req->nr_integrity_segments += blk_rq_count_integrity_sg(req->q,
629 									bio);
630 	return 1;
631 
632 no_merge:
633 	req_set_nomerge(req->q, req);
634 	return 0;
635 }
636 
ll_back_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)637 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
638 {
639 	if (req_gap_back_merge(req, bio))
640 		return 0;
641 	if (blk_integrity_rq(req) &&
642 	    integrity_req_gap_back_merge(req, bio))
643 		return 0;
644 	if (!bio_crypt_ctx_back_mergeable(req, bio))
645 		return 0;
646 	if (blk_rq_sectors(req) + bio_sectors(bio) >
647 	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
648 		req_set_nomerge(req->q, req);
649 		return 0;
650 	}
651 
652 	return ll_new_hw_segment(req, bio, nr_segs);
653 }
654 
ll_front_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)655 static int ll_front_merge_fn(struct request *req, struct bio *bio,
656 		unsigned int nr_segs)
657 {
658 	if (req_gap_front_merge(req, bio))
659 		return 0;
660 	if (blk_integrity_rq(req) &&
661 	    integrity_req_gap_front_merge(req, bio))
662 		return 0;
663 	if (!bio_crypt_ctx_front_mergeable(req, bio))
664 		return 0;
665 	if (blk_rq_sectors(req) + bio_sectors(bio) >
666 	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
667 		req_set_nomerge(req->q, req);
668 		return 0;
669 	}
670 
671 	return ll_new_hw_segment(req, bio, nr_segs);
672 }
673 
req_attempt_discard_merge(struct request_queue * q,struct request * req,struct request * next)674 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
675 		struct request *next)
676 {
677 	unsigned short segments = blk_rq_nr_discard_segments(req);
678 
679 	if (segments >= queue_max_discard_segments(q))
680 		goto no_merge;
681 	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
682 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
683 		goto no_merge;
684 
685 	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
686 	return true;
687 no_merge:
688 	req_set_nomerge(q, req);
689 	return false;
690 }
691 
ll_merge_requests_fn(struct request_queue * q,struct request * req,struct request * next)692 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
693 				struct request *next)
694 {
695 	int total_phys_segments;
696 
697 	if (req_gap_back_merge(req, next->bio))
698 		return 0;
699 
700 	/*
701 	 * Will it become too large?
702 	 */
703 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
704 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
705 		return 0;
706 
707 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
708 	if (total_phys_segments > blk_rq_get_max_segments(req))
709 		return 0;
710 
711 	if (!blk_cgroup_mergeable(req, next->bio))
712 		return 0;
713 
714 	if (blk_integrity_merge_rq(q, req, next) == false)
715 		return 0;
716 
717 	if (!bio_crypt_ctx_merge_rq(req, next))
718 		return 0;
719 
720 	/* Merge is OK... */
721 	req->nr_phys_segments = total_phys_segments;
722 	req->nr_integrity_segments += next->nr_integrity_segments;
723 	return 1;
724 }
725 
726 /**
727  * blk_rq_set_mixed_merge - mark a request as mixed merge
728  * @rq: request to mark as mixed merge
729  *
730  * Description:
731  *     @rq is about to be mixed merged.  Make sure the attributes
732  *     which can be mixed are set in each bio and mark @rq as mixed
733  *     merged.
734  */
blk_rq_set_mixed_merge(struct request * rq)735 static void blk_rq_set_mixed_merge(struct request *rq)
736 {
737 	blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
738 	struct bio *bio;
739 
740 	if (rq->rq_flags & RQF_MIXED_MERGE)
741 		return;
742 
743 	/*
744 	 * @rq will no longer represent mixable attributes for all the
745 	 * contained bios.  It will just track those of the first one.
746 	 * Distributes the attributs to each bio.
747 	 */
748 	for (bio = rq->bio; bio; bio = bio->bi_next) {
749 		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
750 			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
751 		bio->bi_opf |= ff;
752 	}
753 	rq->rq_flags |= RQF_MIXED_MERGE;
754 }
755 
bio_failfast(const struct bio * bio)756 static inline blk_opf_t bio_failfast(const struct bio *bio)
757 {
758 	if (bio->bi_opf & REQ_RAHEAD)
759 		return REQ_FAILFAST_MASK;
760 
761 	return bio->bi_opf & REQ_FAILFAST_MASK;
762 }
763 
764 /*
765  * After we are marked as MIXED_MERGE, any new RA bio has to be updated
766  * as failfast, and request's failfast has to be updated in case of
767  * front merge.
768  */
blk_update_mixed_merge(struct request * req,struct bio * bio,bool front_merge)769 static inline void blk_update_mixed_merge(struct request *req,
770 		struct bio *bio, bool front_merge)
771 {
772 	if (req->rq_flags & RQF_MIXED_MERGE) {
773 		if (bio->bi_opf & REQ_RAHEAD)
774 			bio->bi_opf |= REQ_FAILFAST_MASK;
775 
776 		if (front_merge) {
777 			req->cmd_flags &= ~REQ_FAILFAST_MASK;
778 			req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
779 		}
780 	}
781 }
782 
blk_account_io_merge_request(struct request * req)783 static void blk_account_io_merge_request(struct request *req)
784 {
785 	if (blk_do_io_stat(req)) {
786 		part_stat_lock();
787 		part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
788 		part_stat_local_dec(req->part,
789 				    in_flight[op_is_write(req_op(req))]);
790 		part_stat_unlock();
791 	}
792 }
793 
blk_try_req_merge(struct request * req,struct request * next)794 static enum elv_merge blk_try_req_merge(struct request *req,
795 					struct request *next)
796 {
797 	if (blk_discard_mergable(req))
798 		return ELEVATOR_DISCARD_MERGE;
799 	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
800 		return ELEVATOR_BACK_MERGE;
801 
802 	return ELEVATOR_NO_MERGE;
803 }
804 
blk_atomic_write_mergeable_rq_bio(struct request * rq,struct bio * bio)805 static bool blk_atomic_write_mergeable_rq_bio(struct request *rq,
806 					      struct bio *bio)
807 {
808 	return (rq->cmd_flags & REQ_ATOMIC) == (bio->bi_opf & REQ_ATOMIC);
809 }
810 
blk_atomic_write_mergeable_rqs(struct request * rq,struct request * next)811 static bool blk_atomic_write_mergeable_rqs(struct request *rq,
812 					   struct request *next)
813 {
814 	return (rq->cmd_flags & REQ_ATOMIC) == (next->cmd_flags & REQ_ATOMIC);
815 }
816 
817 /*
818  * For non-mq, this has to be called with the request spinlock acquired.
819  * For mq with scheduling, the appropriate queue wide lock should be held.
820  */
attempt_merge(struct request_queue * q,struct request * req,struct request * next)821 static struct request *attempt_merge(struct request_queue *q,
822 				     struct request *req, struct request *next)
823 {
824 	if (!rq_mergeable(req) || !rq_mergeable(next))
825 		return NULL;
826 
827 	if (req_op(req) != req_op(next))
828 		return NULL;
829 
830 	if (rq_data_dir(req) != rq_data_dir(next))
831 		return NULL;
832 
833 	/* Don't merge requests with different write hints. */
834 	if (req->write_hint != next->write_hint)
835 		return NULL;
836 
837 	if (req->ioprio != next->ioprio)
838 		return NULL;
839 
840 	if (!blk_atomic_write_mergeable_rqs(req, next))
841 		return NULL;
842 
843 	/*
844 	 * If we are allowed to merge, then append bio list
845 	 * from next to rq and release next. merge_requests_fn
846 	 * will have updated segment counts, update sector
847 	 * counts here. Handle DISCARDs separately, as they
848 	 * have separate settings.
849 	 */
850 
851 	switch (blk_try_req_merge(req, next)) {
852 	case ELEVATOR_DISCARD_MERGE:
853 		if (!req_attempt_discard_merge(q, req, next))
854 			return NULL;
855 		break;
856 	case ELEVATOR_BACK_MERGE:
857 		if (!ll_merge_requests_fn(q, req, next))
858 			return NULL;
859 		break;
860 	default:
861 		return NULL;
862 	}
863 
864 	/*
865 	 * If failfast settings disagree or any of the two is already
866 	 * a mixed merge, mark both as mixed before proceeding.  This
867 	 * makes sure that all involved bios have mixable attributes
868 	 * set properly.
869 	 */
870 	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
871 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
872 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
873 		blk_rq_set_mixed_merge(req);
874 		blk_rq_set_mixed_merge(next);
875 	}
876 
877 	/*
878 	 * At this point we have either done a back merge or front merge. We
879 	 * need the smaller start_time_ns of the merged requests to be the
880 	 * current request for accounting purposes.
881 	 */
882 	if (next->start_time_ns < req->start_time_ns)
883 		req->start_time_ns = next->start_time_ns;
884 
885 	req->biotail->bi_next = next->bio;
886 	req->biotail = next->biotail;
887 
888 	req->__data_len += blk_rq_bytes(next);
889 
890 	if (!blk_discard_mergable(req))
891 		elv_merge_requests(q, req, next);
892 
893 	blk_crypto_rq_put_keyslot(next);
894 
895 	/*
896 	 * 'next' is going away, so update stats accordingly
897 	 */
898 	blk_account_io_merge_request(next);
899 
900 	trace_block_rq_merge(next);
901 
902 	/*
903 	 * ownership of bio passed from next to req, return 'next' for
904 	 * the caller to free
905 	 */
906 	next->bio = NULL;
907 	return next;
908 }
909 
attempt_back_merge(struct request_queue * q,struct request * rq)910 static struct request *attempt_back_merge(struct request_queue *q,
911 		struct request *rq)
912 {
913 	struct request *next = elv_latter_request(q, rq);
914 
915 	if (next)
916 		return attempt_merge(q, rq, next);
917 
918 	return NULL;
919 }
920 
attempt_front_merge(struct request_queue * q,struct request * rq)921 static struct request *attempt_front_merge(struct request_queue *q,
922 		struct request *rq)
923 {
924 	struct request *prev = elv_former_request(q, rq);
925 
926 	if (prev)
927 		return attempt_merge(q, prev, rq);
928 
929 	return NULL;
930 }
931 
932 /*
933  * Try to merge 'next' into 'rq'. Return true if the merge happened, false
934  * otherwise. The caller is responsible for freeing 'next' if the merge
935  * happened.
936  */
blk_attempt_req_merge(struct request_queue * q,struct request * rq,struct request * next)937 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
938 			   struct request *next)
939 {
940 	return attempt_merge(q, rq, next);
941 }
942 
blk_rq_merge_ok(struct request * rq,struct bio * bio)943 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
944 {
945 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
946 		return false;
947 
948 	if (req_op(rq) != bio_op(bio))
949 		return false;
950 
951 	/* different data direction or already started, don't merge */
952 	if (bio_data_dir(bio) != rq_data_dir(rq))
953 		return false;
954 
955 	/* don't merge across cgroup boundaries */
956 	if (!blk_cgroup_mergeable(rq, bio))
957 		return false;
958 
959 	/* only merge integrity protected bio into ditto rq */
960 	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
961 		return false;
962 
963 	/* Only merge if the crypt contexts are compatible */
964 	if (!bio_crypt_rq_ctx_compatible(rq, bio))
965 		return false;
966 
967 	/* Don't merge requests with different write hints. */
968 	if (rq->write_hint != bio->bi_write_hint)
969 		return false;
970 
971 	if (rq->ioprio != bio_prio(bio))
972 		return false;
973 
974 	if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false)
975 		return false;
976 
977 	return true;
978 }
979 
blk_try_merge(struct request * rq,struct bio * bio)980 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
981 {
982 	if (blk_discard_mergable(rq))
983 		return ELEVATOR_DISCARD_MERGE;
984 	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
985 		return ELEVATOR_BACK_MERGE;
986 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
987 		return ELEVATOR_FRONT_MERGE;
988 	return ELEVATOR_NO_MERGE;
989 }
990 
blk_account_io_merge_bio(struct request * req)991 static void blk_account_io_merge_bio(struct request *req)
992 {
993 	if (!blk_do_io_stat(req))
994 		return;
995 
996 	part_stat_lock();
997 	part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
998 	part_stat_unlock();
999 }
1000 
bio_attempt_back_merge(struct request * req,struct bio * bio,unsigned int nr_segs)1001 enum bio_merge_status bio_attempt_back_merge(struct request *req,
1002 		struct bio *bio, unsigned int nr_segs)
1003 {
1004 	const blk_opf_t ff = bio_failfast(bio);
1005 
1006 	if (!ll_back_merge_fn(req, bio, nr_segs))
1007 		return BIO_MERGE_FAILED;
1008 
1009 	trace_block_bio_backmerge(bio);
1010 	rq_qos_merge(req->q, req, bio);
1011 
1012 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1013 		blk_rq_set_mixed_merge(req);
1014 
1015 	blk_update_mixed_merge(req, bio, false);
1016 
1017 	if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING)
1018 		blk_zone_write_plug_bio_merged(bio);
1019 
1020 	req->biotail->bi_next = bio;
1021 	req->biotail = bio;
1022 	req->__data_len += bio->bi_iter.bi_size;
1023 
1024 	bio_crypt_free_ctx(bio);
1025 
1026 	blk_account_io_merge_bio(req);
1027 	return BIO_MERGE_OK;
1028 }
1029 
bio_attempt_front_merge(struct request * req,struct bio * bio,unsigned int nr_segs)1030 static enum bio_merge_status bio_attempt_front_merge(struct request *req,
1031 		struct bio *bio, unsigned int nr_segs)
1032 {
1033 	const blk_opf_t ff = bio_failfast(bio);
1034 
1035 	/*
1036 	 * A front merge for writes to sequential zones of a zoned block device
1037 	 * can happen only if the user submitted writes out of order. Do not
1038 	 * merge such write to let it fail.
1039 	 */
1040 	if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING)
1041 		return BIO_MERGE_FAILED;
1042 
1043 	if (!ll_front_merge_fn(req, bio, nr_segs))
1044 		return BIO_MERGE_FAILED;
1045 
1046 	trace_block_bio_frontmerge(bio);
1047 	rq_qos_merge(req->q, req, bio);
1048 
1049 	if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1050 		blk_rq_set_mixed_merge(req);
1051 
1052 	blk_update_mixed_merge(req, bio, true);
1053 
1054 	bio->bi_next = req->bio;
1055 	req->bio = bio;
1056 
1057 	req->__sector = bio->bi_iter.bi_sector;
1058 	req->__data_len += bio->bi_iter.bi_size;
1059 
1060 	bio_crypt_do_front_merge(req, bio);
1061 
1062 	blk_account_io_merge_bio(req);
1063 	return BIO_MERGE_OK;
1064 }
1065 
bio_attempt_discard_merge(struct request_queue * q,struct request * req,struct bio * bio)1066 static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
1067 		struct request *req, struct bio *bio)
1068 {
1069 	unsigned short segments = blk_rq_nr_discard_segments(req);
1070 
1071 	if (segments >= queue_max_discard_segments(q))
1072 		goto no_merge;
1073 	if (blk_rq_sectors(req) + bio_sectors(bio) >
1074 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1075 		goto no_merge;
1076 
1077 	rq_qos_merge(q, req, bio);
1078 
1079 	req->biotail->bi_next = bio;
1080 	req->biotail = bio;
1081 	req->__data_len += bio->bi_iter.bi_size;
1082 	req->nr_phys_segments = segments + 1;
1083 
1084 	blk_account_io_merge_bio(req);
1085 	return BIO_MERGE_OK;
1086 no_merge:
1087 	req_set_nomerge(q, req);
1088 	return BIO_MERGE_FAILED;
1089 }
1090 
blk_attempt_bio_merge(struct request_queue * q,struct request * rq,struct bio * bio,unsigned int nr_segs,bool sched_allow_merge)1091 static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1092 						   struct request *rq,
1093 						   struct bio *bio,
1094 						   unsigned int nr_segs,
1095 						   bool sched_allow_merge)
1096 {
1097 	if (!blk_rq_merge_ok(rq, bio))
1098 		return BIO_MERGE_NONE;
1099 
1100 	switch (blk_try_merge(rq, bio)) {
1101 	case ELEVATOR_BACK_MERGE:
1102 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1103 			return bio_attempt_back_merge(rq, bio, nr_segs);
1104 		break;
1105 	case ELEVATOR_FRONT_MERGE:
1106 		if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1107 			return bio_attempt_front_merge(rq, bio, nr_segs);
1108 		break;
1109 	case ELEVATOR_DISCARD_MERGE:
1110 		return bio_attempt_discard_merge(q, rq, bio);
1111 	default:
1112 		return BIO_MERGE_NONE;
1113 	}
1114 
1115 	return BIO_MERGE_FAILED;
1116 }
1117 
1118 /**
1119  * blk_attempt_plug_merge - try to merge with %current's plugged list
1120  * @q: request_queue new bio is being queued at
1121  * @bio: new bio being queued
1122  * @nr_segs: number of segments in @bio
1123  * from the passed in @q already in the plug list
1124  *
1125  * Determine whether @bio being queued on @q can be merged with the previous
1126  * request on %current's plugged list.  Returns %true if merge was successful,
1127  * otherwise %false.
1128  *
1129  * Plugging coalesces IOs from the same issuer for the same purpose without
1130  * going through @q->queue_lock.  As such it's more of an issuing mechanism
1131  * than scheduling, and the request, while may have elvpriv data, is not
1132  * added on the elevator at this point.  In addition, we don't have
1133  * reliable access to the elevator outside queue lock.  Only check basic
1134  * merging parameters without querying the elevator.
1135  *
1136  * Caller must ensure !blk_queue_nomerges(q) beforehand.
1137  */
blk_attempt_plug_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs)1138 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1139 		unsigned int nr_segs)
1140 {
1141 	struct blk_plug *plug = current->plug;
1142 	struct request *rq;
1143 
1144 	if (!plug || rq_list_empty(&plug->mq_list))
1145 		return false;
1146 
1147 	rq = plug->mq_list.tail;
1148 	if (rq->q == q)
1149 		return blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1150 			BIO_MERGE_OK;
1151 	else if (!plug->multiple_queues)
1152 		return false;
1153 
1154 	rq_list_for_each(&plug->mq_list, rq) {
1155 		if (rq->q != q)
1156 			continue;
1157 		if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1158 		    BIO_MERGE_OK)
1159 			return true;
1160 		break;
1161 	}
1162 	return false;
1163 }
1164 
1165 /*
1166  * Iterate list of requests and see if we can merge this bio with any
1167  * of them.
1168  */
blk_bio_list_merge(struct request_queue * q,struct list_head * list,struct bio * bio,unsigned int nr_segs)1169 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1170 			struct bio *bio, unsigned int nr_segs)
1171 {
1172 	struct request *rq;
1173 	int checked = 8;
1174 
1175 	list_for_each_entry_reverse(rq, list, queuelist) {
1176 		if (!checked--)
1177 			break;
1178 
1179 		switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1180 		case BIO_MERGE_NONE:
1181 			continue;
1182 		case BIO_MERGE_OK:
1183 			return true;
1184 		case BIO_MERGE_FAILED:
1185 			return false;
1186 		}
1187 
1188 	}
1189 
1190 	return false;
1191 }
1192 EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1193 
blk_mq_sched_try_merge(struct request_queue * q,struct bio * bio,unsigned int nr_segs,struct request ** merged_request)1194 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1195 		unsigned int nr_segs, struct request **merged_request)
1196 {
1197 	struct request *rq;
1198 
1199 	switch (elv_merge(q, &rq, bio)) {
1200 	case ELEVATOR_BACK_MERGE:
1201 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1202 			return false;
1203 		if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1204 			return false;
1205 		*merged_request = attempt_back_merge(q, rq);
1206 		if (!*merged_request)
1207 			elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1208 		return true;
1209 	case ELEVATOR_FRONT_MERGE:
1210 		if (!blk_mq_sched_allow_merge(q, rq, bio))
1211 			return false;
1212 		if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1213 			return false;
1214 		*merged_request = attempt_front_merge(q, rq);
1215 		if (!*merged_request)
1216 			elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1217 		return true;
1218 	case ELEVATOR_DISCARD_MERGE:
1219 		return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1220 	default:
1221 		return false;
1222 	}
1223 }
1224 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
1225