• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to segment and merge handling
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10 #ifndef __GENKSYMS__
11 #include <linux/blk-cgroup.h>
12 #endif
13 
14 #include <trace/events/block.h>
15 
16 #include "blk.h"
17 
bio_will_gap(struct request_queue * q,struct request * prev_rq,struct bio * prev,struct bio * next)18 static inline bool bio_will_gap(struct request_queue *q,
19 		struct request *prev_rq, struct bio *prev, struct bio *next)
20 {
21 	struct bio_vec pb, nb;
22 
23 	if (!bio_has_data(prev) || !queue_virt_boundary(q))
24 		return false;
25 
26 	/*
27 	 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
28 	 * is quite difficult to respect the sg gap limit.  We work hard to
29 	 * merge a huge number of small single bios in case of mkfs.
30 	 */
31 	if (prev_rq)
32 		bio_get_first_bvec(prev_rq->bio, &pb);
33 	else
34 		bio_get_first_bvec(prev, &pb);
35 	if (pb.bv_offset & queue_virt_boundary(q))
36 		return true;
37 
38 	/*
39 	 * We don't need to worry about the situation that the merged segment
40 	 * ends in unaligned virt boundary:
41 	 *
42 	 * - if 'pb' ends aligned, the merged segment ends aligned
43 	 * - if 'pb' ends unaligned, the next bio must include
44 	 *   one single bvec of 'nb', otherwise the 'nb' can't
45 	 *   merge with 'pb'
46 	 */
47 	bio_get_last_bvec(prev, &pb);
48 	bio_get_first_bvec(next, &nb);
49 	if (biovec_phys_mergeable(q, &pb, &nb))
50 		return false;
51 	return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
52 }
53 
req_gap_back_merge(struct request * req,struct bio * bio)54 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
55 {
56 	return bio_will_gap(req->q, req, req->biotail, bio);
57 }
58 
req_gap_front_merge(struct request * req,struct bio * bio)59 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
60 {
61 	return bio_will_gap(req->q, NULL, bio, req->bio);
62 }
63 
blk_bio_discard_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * nsegs)64 static struct bio *blk_bio_discard_split(struct request_queue *q,
65 					 struct bio *bio,
66 					 struct bio_set *bs,
67 					 unsigned *nsegs)
68 {
69 	unsigned int max_discard_sectors, granularity;
70 	int alignment;
71 	sector_t tmp;
72 	unsigned split_sectors;
73 
74 	*nsegs = 1;
75 
76 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
77 	granularity = max(q->limits.discard_granularity >> 9, 1U);
78 
79 	max_discard_sectors = min(q->limits.max_discard_sectors,
80 			bio_allowed_max_sectors(q));
81 	max_discard_sectors -= max_discard_sectors % granularity;
82 
83 	if (unlikely(!max_discard_sectors)) {
84 		/* XXX: warn */
85 		return NULL;
86 	}
87 
88 	if (bio_sectors(bio) <= max_discard_sectors)
89 		return NULL;
90 
91 	split_sectors = max_discard_sectors;
92 
93 	/*
94 	 * If the next starting sector would be misaligned, stop the discard at
95 	 * the previous aligned sector.
96 	 */
97 	alignment = (q->limits.discard_alignment >> 9) % granularity;
98 
99 	tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
100 	tmp = sector_div(tmp, granularity);
101 
102 	if (split_sectors > tmp)
103 		split_sectors -= tmp;
104 
105 	return bio_split(bio, split_sectors, GFP_NOIO, bs);
106 }
107 
blk_bio_write_zeroes_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * nsegs)108 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
109 		struct bio *bio, struct bio_set *bs, unsigned *nsegs)
110 {
111 	*nsegs = 0;
112 
113 	if (!q->limits.max_write_zeroes_sectors)
114 		return NULL;
115 
116 	if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
117 		return NULL;
118 
119 	return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
120 }
121 
blk_bio_write_same_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * nsegs)122 static struct bio *blk_bio_write_same_split(struct request_queue *q,
123 					    struct bio *bio,
124 					    struct bio_set *bs,
125 					    unsigned *nsegs)
126 {
127 	*nsegs = 1;
128 
129 	if (!q->limits.max_write_same_sectors)
130 		return NULL;
131 
132 	if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
133 		return NULL;
134 
135 	return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
136 }
137 
138 /*
139  * Return the maximum number of sectors from the start of a bio that may be
140  * submitted as a single request to a block device. If enough sectors remain,
141  * align the end to the physical block size. Otherwise align the end to the
142  * logical block size. This approach minimizes the number of non-aligned
143  * requests that are submitted to a block device if the start of a bio is not
144  * aligned to a physical block boundary.
145  */
get_max_io_size(struct request_queue * q,struct bio * bio)146 static inline unsigned get_max_io_size(struct request_queue *q,
147 				       struct bio *bio)
148 {
149 	unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
150 	unsigned max_sectors = sectors;
151 	unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
152 	unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
153 	unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
154 
155 	max_sectors += start_offset;
156 	max_sectors &= ~(pbs - 1);
157 	if (max_sectors > start_offset)
158 		return max_sectors - start_offset;
159 
160 	return sectors & ~(lbs - 1);
161 }
162 
get_max_segment_size(const struct request_queue * q,struct page * start_page,unsigned long offset)163 static inline unsigned get_max_segment_size(const struct request_queue *q,
164 					    struct page *start_page,
165 					    unsigned long offset)
166 {
167 	unsigned long mask = queue_segment_boundary(q);
168 
169 	offset = mask & (page_to_phys(start_page) + offset);
170 
171 	/*
172 	 * overflow may be triggered in case of zero page physical address
173 	 * on 32bit arch, use queue's max segment size when that happens.
174 	 */
175 	return min_not_zero(mask - offset + 1,
176 			(unsigned long)queue_max_segment_size(q));
177 }
178 
179 /**
180  * bvec_split_segs - verify whether or not a bvec should be split in the middle
181  * @q:        [in] request queue associated with the bio associated with @bv
182  * @bv:       [in] bvec to examine
183  * @nsegs:    [in,out] Number of segments in the bio being built. Incremented
184  *            by the number of segments from @bv that may be appended to that
185  *            bio without exceeding @max_segs
186  * @sectors:  [in,out] Number of sectors in the bio being built. Incremented
187  *            by the number of sectors from @bv that may be appended to that
188  *            bio without exceeding @max_sectors
189  * @max_segs: [in] upper bound for *@nsegs
190  * @max_sectors: [in] upper bound for *@sectors
191  *
192  * When splitting a bio, it can happen that a bvec is encountered that is too
193  * big to fit in a single segment and hence that it has to be split in the
194  * middle. This function verifies whether or not that should happen. The value
195  * %true is returned if and only if appending the entire @bv to a bio with
196  * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
197  * the block driver.
198  */
bvec_split_segs(const struct request_queue * q,const struct bio_vec * bv,unsigned * nsegs,unsigned * sectors,unsigned max_segs,unsigned max_sectors)199 static bool bvec_split_segs(const struct request_queue *q,
200 			    const struct bio_vec *bv, unsigned *nsegs,
201 			    unsigned *sectors, unsigned max_segs,
202 			    unsigned max_sectors)
203 {
204 	unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
205 	unsigned len = min(bv->bv_len, max_len);
206 	unsigned total_len = 0;
207 	unsigned seg_size = 0;
208 
209 	while (len && *nsegs < max_segs) {
210 		seg_size = get_max_segment_size(q, bv->bv_page,
211 						bv->bv_offset + total_len);
212 		seg_size = min(seg_size, len);
213 
214 		(*nsegs)++;
215 		total_len += seg_size;
216 		len -= seg_size;
217 
218 		if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
219 			break;
220 	}
221 
222 	*sectors += total_len >> 9;
223 
224 	/* tell the caller to split the bvec if it is too big to fit */
225 	return len > 0 || bv->bv_len > max_len;
226 }
227 
228 /**
229  * blk_bio_segment_split - split a bio in two bios
230  * @q:    [in] request queue pointer
231  * @bio:  [in] bio to be split
232  * @bs:	  [in] bio set to allocate the clone from
233  * @segs: [out] number of segments in the bio with the first half of the sectors
234  *
235  * Clone @bio, update the bi_iter of the clone to represent the first sectors
236  * of @bio and update @bio->bi_iter to represent the remaining sectors. The
237  * following is guaranteed for the cloned bio:
238  * - That it has at most get_max_io_size(@q, @bio) sectors.
239  * - That it has at most queue_max_segments(@q) segments.
240  *
241  * Except for discard requests the cloned bio will point at the bi_io_vec of
242  * the original bio. It is the responsibility of the caller to ensure that the
243  * original bio is not freed before the cloned bio. The caller is also
244  * responsible for ensuring that @bs is only destroyed after processing of the
245  * split bio has finished.
246  */
blk_bio_segment_split(struct request_queue * q,struct bio * bio,struct bio_set * bs,unsigned * segs)247 static struct bio *blk_bio_segment_split(struct request_queue *q,
248 					 struct bio *bio,
249 					 struct bio_set *bs,
250 					 unsigned *segs)
251 {
252 	struct bio_vec bv, bvprv, *bvprvp = NULL;
253 	struct bvec_iter iter;
254 	unsigned nsegs = 0, sectors = 0;
255 	const unsigned max_sectors = get_max_io_size(q, bio);
256 	const unsigned max_segs = queue_max_segments(q);
257 
258 	bio_for_each_bvec(bv, bio, iter) {
259 		/*
260 		 * If the queue doesn't support SG gaps and adding this
261 		 * offset would create a gap, disallow it.
262 		 */
263 		if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
264 			goto split;
265 
266 		if (nsegs < max_segs &&
267 		    sectors + (bv.bv_len >> 9) <= max_sectors &&
268 		    bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
269 			nsegs++;
270 			sectors += bv.bv_len >> 9;
271 		} else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
272 					 max_sectors)) {
273 			goto split;
274 		}
275 
276 		bvprv = bv;
277 		bvprvp = &bvprv;
278 	}
279 
280 	*segs = nsegs;
281 	return NULL;
282 split:
283 	*segs = nsegs;
284 	return bio_split(bio, sectors, GFP_NOIO, bs);
285 }
286 
287 /**
288  * __blk_queue_split - split a bio and submit the second half
289  * @q:       [in] request queue pointer
290  * @bio:     [in, out] bio to be split
291  * @nr_segs: [out] number of segments in the first bio
292  *
293  * Split a bio into two bios, chain the two bios, submit the second half and
294  * store a pointer to the first half in *@bio. If the second bio is still too
295  * big it will be split by a recursive call to this function. Since this
296  * function may allocate a new bio from @q->bio_split, it is the responsibility
297  * of the caller to ensure that @q is only released after processing of the
298  * split bio has finished.
299  */
__blk_queue_split(struct request_queue * q,struct bio ** bio,unsigned int * nr_segs)300 void __blk_queue_split(struct request_queue *q, struct bio **bio,
301 		unsigned int *nr_segs)
302 {
303 	struct bio *split;
304 
305 	switch (bio_op(*bio)) {
306 	case REQ_OP_DISCARD:
307 	case REQ_OP_SECURE_ERASE:
308 		split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
309 		break;
310 	case REQ_OP_WRITE_ZEROES:
311 		split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
312 				nr_segs);
313 		break;
314 	case REQ_OP_WRITE_SAME:
315 		split = blk_bio_write_same_split(q, *bio, &q->bio_split,
316 				nr_segs);
317 		break;
318 	default:
319 		split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
320 		break;
321 	}
322 
323 	if (split) {
324 		/* there isn't chance to merge the splitted bio */
325 		split->bi_opf |= REQ_NOMERGE;
326 
327 		/*
328 		 * Since we're recursing into make_request here, ensure
329 		 * that we mark this bio as already having entered the queue.
330 		 * If not, and the queue is going away, we can get stuck
331 		 * forever on waiting for the queue reference to drop. But
332 		 * that will never happen, as we're already holding a
333 		 * reference to it.
334 		 */
335 		bio_set_flag(*bio, BIO_QUEUE_ENTERED);
336 
337 		bio_chain(split, *bio);
338 		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
339 		generic_make_request(*bio);
340 		*bio = split;
341 	}
342 }
343 
344 /**
345  * blk_queue_split - split a bio and submit the second half
346  * @q:   [in] request queue pointer
347  * @bio: [in, out] bio to be split
348  *
349  * Split a bio into two bios, chains the two bios, submit the second half and
350  * store a pointer to the first half in *@bio. Since this function may allocate
351  * a new bio from @q->bio_split, it is the responsibility of the caller to
352  * ensure that @q is only released after processing of the split bio has
353  * finished.
354  */
blk_queue_split(struct request_queue * q,struct bio ** bio)355 void blk_queue_split(struct request_queue *q, struct bio **bio)
356 {
357 	unsigned int nr_segs;
358 
359 	__blk_queue_split(q, bio, &nr_segs);
360 }
361 EXPORT_SYMBOL(blk_queue_split);
362 
blk_recalc_rq_segments(struct request * rq)363 unsigned int blk_recalc_rq_segments(struct request *rq)
364 {
365 	unsigned int nr_phys_segs = 0;
366 	unsigned int nr_sectors = 0;
367 	struct req_iterator iter;
368 	struct bio_vec bv;
369 
370 	if (!rq->bio)
371 		return 0;
372 
373 	switch (bio_op(rq->bio)) {
374 	case REQ_OP_DISCARD:
375 	case REQ_OP_SECURE_ERASE:
376 		if (queue_max_discard_segments(rq->q) > 1) {
377 			struct bio *bio = rq->bio;
378 
379 			for_each_bio(bio)
380 				nr_phys_segs++;
381 			return nr_phys_segs;
382 		}
383 		return 1;
384 	case REQ_OP_WRITE_ZEROES:
385 		return 0;
386 	case REQ_OP_WRITE_SAME:
387 		return 1;
388 	}
389 
390 	rq_for_each_bvec(bv, rq, iter)
391 		bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
392 				UINT_MAX, UINT_MAX);
393 	return nr_phys_segs;
394 }
395 
blk_next_sg(struct scatterlist ** sg,struct scatterlist * sglist)396 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
397 		struct scatterlist *sglist)
398 {
399 	if (!*sg)
400 		return sglist;
401 
402 	/*
403 	 * If the driver previously mapped a shorter list, we could see a
404 	 * termination bit prematurely unless it fully inits the sg table
405 	 * on each mapping. We KNOW that there must be more entries here
406 	 * or the driver would be buggy, so force clear the termination bit
407 	 * to avoid doing a full sg_init_table() in drivers for each command.
408 	 */
409 	sg_unmark_end(*sg);
410 	return sg_next(*sg);
411 }
412 
blk_bvec_map_sg(struct request_queue * q,struct bio_vec * bvec,struct scatterlist * sglist,struct scatterlist ** sg)413 static unsigned blk_bvec_map_sg(struct request_queue *q,
414 		struct bio_vec *bvec, struct scatterlist *sglist,
415 		struct scatterlist **sg)
416 {
417 	unsigned nbytes = bvec->bv_len;
418 	unsigned nsegs = 0, total = 0;
419 
420 	while (nbytes > 0) {
421 		unsigned offset = bvec->bv_offset + total;
422 		unsigned len = min(get_max_segment_size(q, bvec->bv_page,
423 					offset), nbytes);
424 		struct page *page = bvec->bv_page;
425 
426 		/*
427 		 * Unfortunately a fair number of drivers barf on scatterlists
428 		 * that have an offset larger than PAGE_SIZE, despite other
429 		 * subsystems dealing with that invariant just fine.  For now
430 		 * stick to the legacy format where we never present those from
431 		 * the block layer, but the code below should be removed once
432 		 * these offenders (mostly MMC/SD drivers) are fixed.
433 		 */
434 		page += (offset >> PAGE_SHIFT);
435 		offset &= ~PAGE_MASK;
436 
437 		*sg = blk_next_sg(sg, sglist);
438 		sg_set_page(*sg, page, len, offset);
439 
440 		total += len;
441 		nbytes -= len;
442 		nsegs++;
443 	}
444 
445 	return nsegs;
446 }
447 
__blk_bvec_map_sg(struct bio_vec bv,struct scatterlist * sglist,struct scatterlist ** sg)448 static inline int __blk_bvec_map_sg(struct bio_vec bv,
449 		struct scatterlist *sglist, struct scatterlist **sg)
450 {
451 	*sg = blk_next_sg(sg, sglist);
452 	sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
453 	return 1;
454 }
455 
456 /* only try to merge bvecs into one sg if they are from two bios */
457 static inline bool
__blk_segment_map_sg_merge(struct request_queue * q,struct bio_vec * bvec,struct bio_vec * bvprv,struct scatterlist ** sg)458 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
459 			   struct bio_vec *bvprv, struct scatterlist **sg)
460 {
461 
462 	int nbytes = bvec->bv_len;
463 
464 	if (!*sg)
465 		return false;
466 
467 	if ((*sg)->length + nbytes > queue_max_segment_size(q))
468 		return false;
469 
470 	if (!biovec_phys_mergeable(q, bvprv, bvec))
471 		return false;
472 
473 	(*sg)->length += nbytes;
474 
475 	return true;
476 }
477 
__blk_bios_map_sg(struct request_queue * q,struct bio * bio,struct scatterlist * sglist,struct scatterlist ** sg)478 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
479 			     struct scatterlist *sglist,
480 			     struct scatterlist **sg)
481 {
482 	struct bio_vec bvec, bvprv = { NULL };
483 	struct bvec_iter iter;
484 	int nsegs = 0;
485 	bool new_bio = false;
486 
487 	for_each_bio(bio) {
488 		bio_for_each_bvec(bvec, bio, iter) {
489 			/*
490 			 * Only try to merge bvecs from two bios given we
491 			 * have done bio internal merge when adding pages
492 			 * to bio
493 			 */
494 			if (new_bio &&
495 			    __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
496 				goto next_bvec;
497 
498 			if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
499 				nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
500 			else
501 				nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
502  next_bvec:
503 			new_bio = false;
504 		}
505 		if (likely(bio->bi_iter.bi_size)) {
506 			bvprv = bvec;
507 			new_bio = true;
508 		}
509 	}
510 
511 	return nsegs;
512 }
513 
514 /*
515  * map a request to scatterlist, return number of sg entries setup. Caller
516  * must make sure sg can hold rq->nr_phys_segments entries
517  */
blk_rq_map_sg(struct request_queue * q,struct request * rq,struct scatterlist * sglist)518 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
519 		  struct scatterlist *sglist)
520 {
521 	struct scatterlist *sg = NULL;
522 	int nsegs = 0;
523 
524 	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
525 		nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
526 	else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
527 		nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
528 	else if (rq->bio)
529 		nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
530 
531 	if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
532 	    (blk_rq_bytes(rq) & q->dma_pad_mask)) {
533 		unsigned int pad_len =
534 			(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
535 
536 		sg->length += pad_len;
537 		rq->extra_len += pad_len;
538 	}
539 
540 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
541 		if (op_is_write(req_op(rq)))
542 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
543 
544 		sg_unmark_end(sg);
545 		sg = sg_next(sg);
546 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
547 			    q->dma_drain_size,
548 			    ((unsigned long)q->dma_drain_buffer) &
549 			    (PAGE_SIZE - 1));
550 		nsegs++;
551 		rq->extra_len += q->dma_drain_size;
552 	}
553 
554 	if (sg)
555 		sg_mark_end(sg);
556 
557 	/*
558 	 * Something must have been wrong if the figured number of
559 	 * segment is bigger than number of req's physical segments
560 	 */
561 	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
562 
563 	return nsegs;
564 }
565 EXPORT_SYMBOL(blk_rq_map_sg);
566 
blk_rq_get_max_segments(struct request * rq)567 static inline unsigned int blk_rq_get_max_segments(struct request *rq)
568 {
569 	if (req_op(rq) == REQ_OP_DISCARD)
570 		return queue_max_discard_segments(rq->q);
571 	return queue_max_segments(rq->q);
572 }
573 
ll_new_hw_segment(struct request * req,struct bio * bio,unsigned int nr_phys_segs)574 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
575 		unsigned int nr_phys_segs)
576 {
577 	if (!blk_cgroup_mergeable(req, bio))
578 		goto no_merge;
579 
580 	if (blk_integrity_merge_bio(req->q, req, bio) == false)
581 		goto no_merge;
582 
583 	/* discard request merge won't add new segment */
584 	if (req_op(req) == REQ_OP_DISCARD)
585 		return 1;
586 
587 	if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
588 		goto no_merge;
589 
590 	/*
591 	 * This will form the start of a new hw segment.  Bump both
592 	 * counters.
593 	 */
594 	req->nr_phys_segments += nr_phys_segs;
595 	return 1;
596 
597 no_merge:
598 	req_set_nomerge(req->q, req);
599 	return 0;
600 }
601 
ll_back_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)602 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
603 {
604 	if (req_gap_back_merge(req, bio))
605 		return 0;
606 	if (blk_integrity_rq(req) &&
607 	    integrity_req_gap_back_merge(req, bio))
608 		return 0;
609 	if (blk_rq_sectors(req) + bio_sectors(bio) >
610 	    blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
611 		req_set_nomerge(req->q, req);
612 		return 0;
613 	}
614 	if (!bio_crypt_ctx_mergeable(req->bio, blk_rq_bytes(req), bio))
615 		return 0;
616 
617 	return ll_new_hw_segment(req, bio, nr_segs);
618 }
619 
ll_front_merge_fn(struct request * req,struct bio * bio,unsigned int nr_segs)620 int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
621 {
622 	if (req_gap_front_merge(req, bio))
623 		return 0;
624 	if (blk_integrity_rq(req) &&
625 	    integrity_req_gap_front_merge(req, bio))
626 		return 0;
627 	if (blk_rq_sectors(req) + bio_sectors(bio) >
628 	    blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
629 		req_set_nomerge(req->q, req);
630 		return 0;
631 	}
632 	if (!bio_crypt_ctx_mergeable(bio, bio->bi_iter.bi_size, req->bio))
633 		return 0;
634 
635 	return ll_new_hw_segment(req, bio, nr_segs);
636 }
637 
req_attempt_discard_merge(struct request_queue * q,struct request * req,struct request * next)638 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
639 		struct request *next)
640 {
641 	unsigned short segments = blk_rq_nr_discard_segments(req);
642 
643 	if (segments >= queue_max_discard_segments(q))
644 		goto no_merge;
645 	if (blk_rq_sectors(req) + bio_sectors(next->bio) >
646 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
647 		goto no_merge;
648 
649 	req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
650 	return true;
651 no_merge:
652 	req_set_nomerge(q, req);
653 	return false;
654 }
655 
ll_merge_requests_fn(struct request_queue * q,struct request * req,struct request * next)656 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
657 				struct request *next)
658 {
659 	int total_phys_segments;
660 
661 	if (req_gap_back_merge(req, next->bio))
662 		return 0;
663 
664 	/*
665 	 * Will it become too large?
666 	 */
667 	if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
668 	    blk_rq_get_max_sectors(req, blk_rq_pos(req)))
669 		return 0;
670 
671 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
672 	if (total_phys_segments > blk_rq_get_max_segments(req))
673 		return 0;
674 
675 	if (!blk_cgroup_mergeable(req, next->bio))
676 		return 0;
677 
678 	if (blk_integrity_merge_rq(q, req, next) == false)
679 		return 0;
680 
681 	if (!bio_crypt_ctx_mergeable(req->bio, blk_rq_bytes(req), next->bio))
682 		return 0;
683 
684 	/* Merge is OK... */
685 	req->nr_phys_segments = total_phys_segments;
686 	return 1;
687 }
688 
689 /**
690  * blk_rq_set_mixed_merge - mark a request as mixed merge
691  * @rq: request to mark as mixed merge
692  *
693  * Description:
694  *     @rq is about to be mixed merged.  Make sure the attributes
695  *     which can be mixed are set in each bio and mark @rq as mixed
696  *     merged.
697  */
blk_rq_set_mixed_merge(struct request * rq)698 void blk_rq_set_mixed_merge(struct request *rq)
699 {
700 	unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
701 	struct bio *bio;
702 
703 	if (rq->rq_flags & RQF_MIXED_MERGE)
704 		return;
705 
706 	/*
707 	 * @rq will no longer represent mixable attributes for all the
708 	 * contained bios.  It will just track those of the first one.
709 	 * Distributes the attributs to each bio.
710 	 */
711 	for (bio = rq->bio; bio; bio = bio->bi_next) {
712 		WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
713 			     (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
714 		bio->bi_opf |= ff;
715 	}
716 	rq->rq_flags |= RQF_MIXED_MERGE;
717 }
718 
blk_account_io_merge(struct request * req)719 static void blk_account_io_merge(struct request *req)
720 {
721 	if (blk_do_io_stat(req)) {
722 		struct hd_struct *part;
723 
724 		part_stat_lock();
725 		part = req->part;
726 
727 		part_dec_in_flight(req->q, part, rq_data_dir(req));
728 
729 		hd_struct_put(part);
730 		part_stat_unlock();
731 	}
732 }
733 
blk_try_req_merge(struct request * req,struct request * next)734 static enum elv_merge blk_try_req_merge(struct request *req,
735 					struct request *next)
736 {
737 	if (blk_discard_mergable(req))
738 		return ELEVATOR_DISCARD_MERGE;
739 	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
740 		return ELEVATOR_BACK_MERGE;
741 
742 	return ELEVATOR_NO_MERGE;
743 }
744 
745 /*
746  * For non-mq, this has to be called with the request spinlock acquired.
747  * For mq with scheduling, the appropriate queue wide lock should be held.
748  */
attempt_merge(struct request_queue * q,struct request * req,struct request * next)749 static struct request *attempt_merge(struct request_queue *q,
750 				     struct request *req, struct request *next)
751 {
752 	if (!rq_mergeable(req) || !rq_mergeable(next))
753 		return NULL;
754 
755 	if (req_op(req) != req_op(next))
756 		return NULL;
757 
758 	if (rq_data_dir(req) != rq_data_dir(next)
759 	    || req->rq_disk != next->rq_disk)
760 		return NULL;
761 
762 	if (req_op(req) == REQ_OP_WRITE_SAME &&
763 	    !blk_write_same_mergeable(req->bio, next->bio))
764 		return NULL;
765 
766 	/*
767 	 * Don't allow merge of different write hints, or for a hint with
768 	 * non-hint IO.
769 	 */
770 	if (req->write_hint != next->write_hint)
771 		return NULL;
772 
773 	if (req->ioprio != next->ioprio)
774 		return NULL;
775 
776 	/*
777 	 * If we are allowed to merge, then append bio list
778 	 * from next to rq and release next. merge_requests_fn
779 	 * will have updated segment counts, update sector
780 	 * counts here. Handle DISCARDs separately, as they
781 	 * have separate settings.
782 	 */
783 
784 	switch (blk_try_req_merge(req, next)) {
785 	case ELEVATOR_DISCARD_MERGE:
786 		if (!req_attempt_discard_merge(q, req, next))
787 			return NULL;
788 		break;
789 	case ELEVATOR_BACK_MERGE:
790 		if (!ll_merge_requests_fn(q, req, next))
791 			return NULL;
792 		break;
793 	default:
794 		return NULL;
795 	}
796 
797 	/*
798 	 * If failfast settings disagree or any of the two is already
799 	 * a mixed merge, mark both as mixed before proceeding.  This
800 	 * makes sure that all involved bios have mixable attributes
801 	 * set properly.
802 	 */
803 	if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
804 	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
805 	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
806 		blk_rq_set_mixed_merge(req);
807 		blk_rq_set_mixed_merge(next);
808 	}
809 
810 	/*
811 	 * At this point we have either done a back merge or front merge. We
812 	 * need the smaller start_time_ns of the merged requests to be the
813 	 * current request for accounting purposes.
814 	 */
815 	if (next->start_time_ns < req->start_time_ns)
816 		req->start_time_ns = next->start_time_ns;
817 
818 	req->biotail->bi_next = next->bio;
819 	req->biotail = next->biotail;
820 
821 	req->__data_len += blk_rq_bytes(next);
822 
823 	if (!blk_discard_mergable(req))
824 		elv_merge_requests(q, req, next);
825 
826 	/*
827 	 * 'next' is going away, so update stats accordingly
828 	 */
829 	blk_account_io_merge(next);
830 
831 	/*
832 	 * ownership of bio passed from next to req, return 'next' for
833 	 * the caller to free
834 	 */
835 	next->bio = NULL;
836 	return next;
837 }
838 
attempt_back_merge(struct request_queue * q,struct request * rq)839 struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
840 {
841 	struct request *next = elv_latter_request(q, rq);
842 
843 	if (next)
844 		return attempt_merge(q, rq, next);
845 
846 	return NULL;
847 }
848 
attempt_front_merge(struct request_queue * q,struct request * rq)849 struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
850 {
851 	struct request *prev = elv_former_request(q, rq);
852 
853 	if (prev)
854 		return attempt_merge(q, prev, rq);
855 
856 	return NULL;
857 }
858 
blk_attempt_req_merge(struct request_queue * q,struct request * rq,struct request * next)859 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
860 			  struct request *next)
861 {
862 	struct request *free;
863 
864 	free = attempt_merge(q, rq, next);
865 	if (free) {
866 		blk_put_request(free);
867 		return 1;
868 	}
869 
870 	return 0;
871 }
872 
blk_rq_merge_ok(struct request * rq,struct bio * bio)873 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
874 {
875 	if (!rq_mergeable(rq) || !bio_mergeable(bio))
876 		return false;
877 
878 	if (req_op(rq) != bio_op(bio))
879 		return false;
880 
881 	/* different data direction or already started, don't merge */
882 	if (bio_data_dir(bio) != rq_data_dir(rq))
883 		return false;
884 
885 	/* must be same device */
886 	if (rq->rq_disk != bio->bi_disk)
887 		return false;
888 
889 	/* don't merge across cgroup boundaries */
890 	if (!blk_cgroup_mergeable(rq, bio))
891 		return false;
892 
893 	/* only merge integrity protected bio into ditto rq */
894 	if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
895 		return false;
896 
897 	/* must be using the same buffer */
898 	if (req_op(rq) == REQ_OP_WRITE_SAME &&
899 	    !blk_write_same_mergeable(rq->bio, bio))
900 		return false;
901 
902 	/*
903 	 * Don't allow merge of different write hints, or for a hint with
904 	 * non-hint IO.
905 	 */
906 	if (rq->write_hint != bio->bi_write_hint)
907 		return false;
908 
909 	if (rq->ioprio != bio_prio(bio))
910 		return false;
911 
912 	/* Only merge if the crypt contexts are compatible */
913 	if (!bio_crypt_ctx_compatible(bio, rq->bio))
914 		return false;
915 
916 	return true;
917 }
918 
blk_try_merge(struct request * rq,struct bio * bio)919 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
920 {
921 	if (blk_discard_mergable(rq))
922 		return ELEVATOR_DISCARD_MERGE;
923 	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
924 		return ELEVATOR_BACK_MERGE;
925 	else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
926 		return ELEVATOR_FRONT_MERGE;
927 	return ELEVATOR_NO_MERGE;
928 }
929