• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to setting various queue properties from drivers
4  */
5 
6 #define pr_fmt(fmt)  "%s: " fmt, __func__
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagemap.h>
14 #include <linux/backing-dev-defs.h>
15 #include <linux/gcd.h>
16 #include <linux/lcm.h>
17 #include <linux/jiffies.h>
18 #include <linux/gfp.h>
19 #include <linux/dma-mapping.h>
20 
21 #include "blk.h"
22 #include "blk-wbt.h"
23 
24 /* Protects blk_nr_sub_page_limit_queues and blk_sub_page_limits changes. */
25 static DEFINE_MUTEX(blk_sub_page_limit_lock);
26 static uint32_t blk_nr_sub_page_limit_queues;
27 DEFINE_STATIC_KEY_FALSE(blk_sub_page_limits);
28 
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)29 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
30 {
31 	q->rq_timeout = timeout;
32 }
33 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
34 
35 /**
36  * blk_set_default_limits - reset limits to default values
37  * @lim:  the queue_limits structure to reset
38  *
39  * Description:
40  *   Returns a queue_limit struct to its default state.
41  */
blk_set_default_limits(struct queue_limits * lim)42 void blk_set_default_limits(struct queue_limits *lim)
43 {
44 	lim->max_segments = BLK_MAX_SEGMENTS;
45 	lim->max_discard_segments = 1;
46 	lim->max_integrity_segments = 0;
47 	lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
48 	lim->virt_boundary_mask = 0;
49 	lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
50 	lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
51 	lim->max_dev_sectors = 0;
52 	lim->chunk_sectors = 0;
53 	lim->max_write_same_sectors = 0;
54 	lim->max_write_zeroes_sectors = 0;
55 	lim->max_zone_append_sectors = 0;
56 	lim->max_discard_sectors = 0;
57 	lim->max_hw_discard_sectors = 0;
58 	lim->discard_granularity = 0;
59 	lim->discard_alignment = 0;
60 	lim->discard_misaligned = 0;
61 	lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
62 	lim->bounce = BLK_BOUNCE_NONE;
63 	lim->alignment_offset = 0;
64 	lim->io_opt = 0;
65 	lim->misaligned = 0;
66 	lim->zoned = BLK_ZONED_NONE;
67 	lim->zone_write_granularity = 0;
68 	lim->sub_page_limits = false;
69 }
70 EXPORT_SYMBOL(blk_set_default_limits);
71 
72 /**
73  * blk_set_stacking_limits - set default limits for stacking devices
74  * @lim:  the queue_limits structure to reset
75  *
76  * Description:
77  *   Returns a queue_limit struct to its default state. Should be used
78  *   by stacking drivers like DM that have no internal limits.
79  */
blk_set_stacking_limits(struct queue_limits * lim)80 void blk_set_stacking_limits(struct queue_limits *lim)
81 {
82 	blk_set_default_limits(lim);
83 
84 	/* Inherit limits from component devices */
85 	lim->max_segments = USHRT_MAX;
86 	lim->max_discard_segments = USHRT_MAX;
87 	lim->max_hw_sectors = UINT_MAX;
88 	lim->max_segment_size = UINT_MAX;
89 	lim->max_sectors = UINT_MAX;
90 	lim->max_dev_sectors = UINT_MAX;
91 	lim->max_write_same_sectors = UINT_MAX;
92 	lim->max_write_zeroes_sectors = UINT_MAX;
93 	lim->max_zone_append_sectors = UINT_MAX;
94 }
95 EXPORT_SYMBOL(blk_set_stacking_limits);
96 
97 /**
98  * blk_queue_bounce_limit - set bounce buffer limit for queue
99  * @q: the request queue for the device
100  * @bounce: bounce limit to enforce
101  *
102  * Description:
103  *    Force bouncing for ISA DMA ranges or highmem.
104  *
105  *    DEPRECATED, don't use in new code.
106  **/
blk_queue_bounce_limit(struct request_queue * q,enum blk_bounce bounce)107 void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
108 {
109 	q->limits.bounce = bounce;
110 }
111 EXPORT_SYMBOL(blk_queue_bounce_limit);
112 
113 /* For debugfs. */
blk_sub_page_limit_queues_get(void * data,u64 * val)114 int blk_sub_page_limit_queues_get(void *data, u64 *val)
115 {
116 	*val = READ_ONCE(blk_nr_sub_page_limit_queues);
117 
118 	return 0;
119 }
120 
121 /**
122  * blk_enable_sub_page_limits - enable support for limits below the page size
123  * @lim: request queue limits for which to enable support of these features.
124  *
125  * Enable support for max_segment_size values smaller than PAGE_SIZE and for
126  * max_hw_sectors values below PAGE_SIZE >> SECTOR_SHIFT. Support for these
127  * features is not enabled all the time because of the runtime overhead of these
128  * features.
129  */
blk_enable_sub_page_limits(struct queue_limits * lim)130 static void blk_enable_sub_page_limits(struct queue_limits *lim)
131 {
132 	if (lim->sub_page_limits)
133 		return;
134 
135 	lim->sub_page_limits = true;
136 
137 	mutex_lock(&blk_sub_page_limit_lock);
138 	if (++blk_nr_sub_page_limit_queues == 1)
139 		static_branch_enable(&blk_sub_page_limits);
140 	mutex_unlock(&blk_sub_page_limit_lock);
141 }
142 
143 /**
144  * blk_disable_sub_page_limits - disable support for limits below the page size
145  * @lim: request queue limits for which to enable support of these features.
146  *
147  * max_segment_size values smaller than PAGE_SIZE and for max_hw_sectors values
148  * below PAGE_SIZE >> SECTOR_SHIFT. Support for these features is not enabled
149  * all the time because of the runtime overhead of these features.
150  */
blk_disable_sub_page_limits(struct queue_limits * lim)151 void blk_disable_sub_page_limits(struct queue_limits *lim)
152 {
153 	if (!lim->sub_page_limits)
154 		return;
155 
156 	lim->sub_page_limits = false;
157 
158 	mutex_lock(&blk_sub_page_limit_lock);
159 	WARN_ON_ONCE(blk_nr_sub_page_limit_queues <= 0);
160 	if (--blk_nr_sub_page_limit_queues == 0)
161 		static_branch_disable(&blk_sub_page_limits);
162 	mutex_unlock(&blk_sub_page_limit_lock);
163 }
164 
165 /**
166  * blk_queue_max_hw_sectors - set max sectors for a request for this queue
167  * @q:  the request queue for the device
168  * @max_hw_sectors:  max hardware sectors in the usual 512b unit
169  *
170  * Description:
171  *    Enables a low level driver to set a hard upper limit,
172  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
173  *    the device driver based upon the capabilities of the I/O
174  *    controller.
175  *
176  *    max_dev_sectors is a hard limit imposed by the storage device for
177  *    READ/WRITE requests. It is set by the disk driver.
178  *
179  *    max_sectors is a soft limit imposed by the block layer for
180  *    filesystem type requests.  This value can be overridden on a
181  *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
182  *    The soft limit can not exceed max_hw_sectors.
183  **/
blk_queue_max_hw_sectors(struct request_queue * q,unsigned int max_hw_sectors)184 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
185 {
186 	struct queue_limits *limits = &q->limits;
187 	unsigned int min_max_hw_sectors = PAGE_SIZE >> SECTOR_SHIFT;
188 	unsigned int max_sectors;
189 
190 	if (max_hw_sectors < min_max_hw_sectors) {
191 		blk_enable_sub_page_limits(limits);
192 		min_max_hw_sectors = 1;
193 	}
194 
195 	if (max_hw_sectors < min_max_hw_sectors) {
196 		max_hw_sectors = min_max_hw_sectors;
197 		pr_info("set to minimum %u\n", max_hw_sectors);
198 	}
199 
200 	max_hw_sectors = round_down(max_hw_sectors,
201 				    limits->logical_block_size >> SECTOR_SHIFT);
202 	limits->max_hw_sectors = max_hw_sectors;
203 
204 	max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
205 	max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS);
206 	max_sectors = round_down(max_sectors,
207 				 limits->logical_block_size >> SECTOR_SHIFT);
208 	limits->max_sectors = max_sectors;
209 
210 	if (!q->disk)
211 		return;
212 	q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9);
213 }
214 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
215 
216 /**
217  * blk_queue_chunk_sectors - set size of the chunk for this queue
218  * @q:  the request queue for the device
219  * @chunk_sectors:  chunk sectors in the usual 512b unit
220  *
221  * Description:
222  *    If a driver doesn't want IOs to cross a given chunk size, it can set
223  *    this limit and prevent merging across chunks. Note that the block layer
224  *    must accept a page worth of data at any offset. So if the crossing of
225  *    chunks is a hard limitation in the driver, it must still be prepared
226  *    to split single page bios.
227  **/
blk_queue_chunk_sectors(struct request_queue * q,unsigned int chunk_sectors)228 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
229 {
230 	q->limits.chunk_sectors = chunk_sectors;
231 }
232 EXPORT_SYMBOL(blk_queue_chunk_sectors);
233 
234 /**
235  * blk_queue_max_discard_sectors - set max sectors for a single discard
236  * @q:  the request queue for the device
237  * @max_discard_sectors: maximum number of sectors to discard
238  **/
blk_queue_max_discard_sectors(struct request_queue * q,unsigned int max_discard_sectors)239 void blk_queue_max_discard_sectors(struct request_queue *q,
240 		unsigned int max_discard_sectors)
241 {
242 	q->limits.max_hw_discard_sectors = max_discard_sectors;
243 	q->limits.max_discard_sectors = max_discard_sectors;
244 }
245 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
246 
247 /**
248  * blk_queue_max_write_same_sectors - set max sectors for a single write same
249  * @q:  the request queue for the device
250  * @max_write_same_sectors: maximum number of sectors to write per command
251  **/
blk_queue_max_write_same_sectors(struct request_queue * q,unsigned int max_write_same_sectors)252 void blk_queue_max_write_same_sectors(struct request_queue *q,
253 				      unsigned int max_write_same_sectors)
254 {
255 	q->limits.max_write_same_sectors = max_write_same_sectors;
256 }
257 EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
258 
259 /**
260  * blk_queue_max_write_zeroes_sectors - set max sectors for a single
261  *                                      write zeroes
262  * @q:  the request queue for the device
263  * @max_write_zeroes_sectors: maximum number of sectors to write per command
264  **/
blk_queue_max_write_zeroes_sectors(struct request_queue * q,unsigned int max_write_zeroes_sectors)265 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
266 		unsigned int max_write_zeroes_sectors)
267 {
268 	q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
269 }
270 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
271 
272 /**
273  * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
274  * @q:  the request queue for the device
275  * @max_zone_append_sectors: maximum number of sectors to write per command
276  **/
blk_queue_max_zone_append_sectors(struct request_queue * q,unsigned int max_zone_append_sectors)277 void blk_queue_max_zone_append_sectors(struct request_queue *q,
278 		unsigned int max_zone_append_sectors)
279 {
280 	unsigned int max_sectors;
281 
282 	if (WARN_ON(!blk_queue_is_zoned(q)))
283 		return;
284 
285 	max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
286 	max_sectors = min(q->limits.chunk_sectors, max_sectors);
287 
288 	/*
289 	 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
290 	 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
291 	 * or the max_hw_sectors limit not set.
292 	 */
293 	WARN_ON(!max_sectors);
294 
295 	q->limits.max_zone_append_sectors = max_sectors;
296 }
297 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors);
298 
299 /**
300  * blk_queue_max_segments - set max hw segments for a request for this queue
301  * @q:  the request queue for the device
302  * @max_segments:  max number of segments
303  *
304  * Description:
305  *    Enables a low level driver to set an upper limit on the number of
306  *    hw data segments in a request.
307  **/
blk_queue_max_segments(struct request_queue * q,unsigned short max_segments)308 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
309 {
310 	if (!max_segments) {
311 		max_segments = 1;
312 		pr_info("set to minimum %u\n", max_segments);
313 	}
314 
315 	q->limits.max_segments = max_segments;
316 }
317 EXPORT_SYMBOL(blk_queue_max_segments);
318 
319 /**
320  * blk_queue_max_discard_segments - set max segments for discard requests
321  * @q:  the request queue for the device
322  * @max_segments:  max number of segments
323  *
324  * Description:
325  *    Enables a low level driver to set an upper limit on the number of
326  *    segments in a discard request.
327  **/
blk_queue_max_discard_segments(struct request_queue * q,unsigned short max_segments)328 void blk_queue_max_discard_segments(struct request_queue *q,
329 		unsigned short max_segments)
330 {
331 	q->limits.max_discard_segments = max_segments;
332 }
333 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
334 
335 /**
336  * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
337  * @q:  the request queue for the device
338  * @max_size:  max size of segment in bytes
339  *
340  * Description:
341  *    Enables a low level driver to set an upper limit on the size of a
342  *    coalesced segment
343  **/
blk_queue_max_segment_size(struct request_queue * q,unsigned int max_size)344 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
345 {
346 	unsigned int min_max_segment_size = PAGE_SIZE;
347 
348 	if (max_size < min_max_segment_size) {
349 		blk_enable_sub_page_limits(&q->limits);
350 		min_max_segment_size = SECTOR_SIZE;
351 	}
352 
353 	if (max_size < min_max_segment_size) {
354 		max_size = min_max_segment_size;
355 		pr_info("set to minimum %u\n", max_size);
356 	}
357 
358 	/* see blk_queue_virt_boundary() for the explanation */
359 	WARN_ON_ONCE(q->limits.virt_boundary_mask);
360 
361 	q->limits.max_segment_size = max_size;
362 }
363 EXPORT_SYMBOL(blk_queue_max_segment_size);
364 
365 /**
366  * blk_queue_logical_block_size - set logical block size for the queue
367  * @q:  the request queue for the device
368  * @size:  the logical block size, in bytes
369  *
370  * Description:
371  *   This should be set to the lowest possible block size that the
372  *   storage device can address.  The default of 512 covers most
373  *   hardware.
374  **/
blk_queue_logical_block_size(struct request_queue * q,unsigned int size)375 void blk_queue_logical_block_size(struct request_queue *q, unsigned int size)
376 {
377 	struct queue_limits *limits = &q->limits;
378 
379 	limits->logical_block_size = size;
380 
381 	if (limits->physical_block_size < size)
382 		limits->physical_block_size = size;
383 
384 	if (limits->io_min < limits->physical_block_size)
385 		limits->io_min = limits->physical_block_size;
386 
387 	limits->max_hw_sectors =
388 		round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
389 	limits->max_sectors =
390 		round_down(limits->max_sectors, size >> SECTOR_SHIFT);
391 }
392 EXPORT_SYMBOL(blk_queue_logical_block_size);
393 
394 /**
395  * blk_queue_physical_block_size - set physical block size for the queue
396  * @q:  the request queue for the device
397  * @size:  the physical block size, in bytes
398  *
399  * Description:
400  *   This should be set to the lowest possible sector size that the
401  *   hardware can operate on without reverting to read-modify-write
402  *   operations.
403  */
blk_queue_physical_block_size(struct request_queue * q,unsigned int size)404 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
405 {
406 	q->limits.physical_block_size = size;
407 
408 	if (q->limits.physical_block_size < q->limits.logical_block_size)
409 		q->limits.physical_block_size = q->limits.logical_block_size;
410 
411 	if (q->limits.io_min < q->limits.physical_block_size)
412 		q->limits.io_min = q->limits.physical_block_size;
413 }
414 EXPORT_SYMBOL(blk_queue_physical_block_size);
415 
416 /**
417  * blk_queue_zone_write_granularity - set zone write granularity for the queue
418  * @q:  the request queue for the zoned device
419  * @size:  the zone write granularity size, in bytes
420  *
421  * Description:
422  *   This should be set to the lowest possible size allowing to write in
423  *   sequential zones of a zoned block device.
424  */
blk_queue_zone_write_granularity(struct request_queue * q,unsigned int size)425 void blk_queue_zone_write_granularity(struct request_queue *q,
426 				      unsigned int size)
427 {
428 	if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
429 		return;
430 
431 	q->limits.zone_write_granularity = size;
432 
433 	if (q->limits.zone_write_granularity < q->limits.logical_block_size)
434 		q->limits.zone_write_granularity = q->limits.logical_block_size;
435 }
436 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity);
437 
438 /**
439  * blk_queue_alignment_offset - set physical block alignment offset
440  * @q:	the request queue for the device
441  * @offset: alignment offset in bytes
442  *
443  * Description:
444  *   Some devices are naturally misaligned to compensate for things like
445  *   the legacy DOS partition table 63-sector offset.  Low-level drivers
446  *   should call this function for devices whose first sector is not
447  *   naturally aligned.
448  */
blk_queue_alignment_offset(struct request_queue * q,unsigned int offset)449 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
450 {
451 	q->limits.alignment_offset =
452 		offset & (q->limits.physical_block_size - 1);
453 	q->limits.misaligned = 0;
454 }
455 EXPORT_SYMBOL(blk_queue_alignment_offset);
456 
disk_update_readahead(struct gendisk * disk)457 void disk_update_readahead(struct gendisk *disk)
458 {
459 	struct request_queue *q = disk->queue;
460 
461 	/*
462 	 * For read-ahead of large files to be effective, we need to read ahead
463 	 * at least twice the optimal I/O size.
464 	 */
465 	disk->bdi->ra_pages =
466 		max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
467 	disk->bdi->io_pages = queue_max_sectors(q) >> (PAGE_SHIFT - 9);
468 }
469 EXPORT_SYMBOL_GPL(disk_update_readahead);
470 
471 /**
472  * blk_limits_io_min - set minimum request size for a device
473  * @limits: the queue limits
474  * @min:  smallest I/O size in bytes
475  *
476  * Description:
477  *   Some devices have an internal block size bigger than the reported
478  *   hardware sector size.  This function can be used to signal the
479  *   smallest I/O the device can perform without incurring a performance
480  *   penalty.
481  */
blk_limits_io_min(struct queue_limits * limits,unsigned int min)482 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
483 {
484 	limits->io_min = min;
485 
486 	if (limits->io_min < limits->logical_block_size)
487 		limits->io_min = limits->logical_block_size;
488 
489 	if (limits->io_min < limits->physical_block_size)
490 		limits->io_min = limits->physical_block_size;
491 }
492 EXPORT_SYMBOL(blk_limits_io_min);
493 
494 /**
495  * blk_queue_io_min - set minimum request size for the queue
496  * @q:	the request queue for the device
497  * @min:  smallest I/O size in bytes
498  *
499  * Description:
500  *   Storage devices may report a granularity or preferred minimum I/O
501  *   size which is the smallest request the device can perform without
502  *   incurring a performance penalty.  For disk drives this is often the
503  *   physical block size.  For RAID arrays it is often the stripe chunk
504  *   size.  A properly aligned multiple of minimum_io_size is the
505  *   preferred request size for workloads where a high number of I/O
506  *   operations is desired.
507  */
blk_queue_io_min(struct request_queue * q,unsigned int min)508 void blk_queue_io_min(struct request_queue *q, unsigned int min)
509 {
510 	blk_limits_io_min(&q->limits, min);
511 }
512 EXPORT_SYMBOL(blk_queue_io_min);
513 
514 /**
515  * blk_limits_io_opt - set optimal request size for a device
516  * @limits: the queue limits
517  * @opt:  smallest I/O size in bytes
518  *
519  * Description:
520  *   Storage devices may report an optimal I/O size, which is the
521  *   device's preferred unit for sustained I/O.  This is rarely reported
522  *   for disk drives.  For RAID arrays it is usually the stripe width or
523  *   the internal track size.  A properly aligned multiple of
524  *   optimal_io_size is the preferred request size for workloads where
525  *   sustained throughput is desired.
526  */
blk_limits_io_opt(struct queue_limits * limits,unsigned int opt)527 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
528 {
529 	limits->io_opt = opt;
530 }
531 EXPORT_SYMBOL(blk_limits_io_opt);
532 
533 /**
534  * blk_queue_io_opt - set optimal request size for the queue
535  * @q:	the request queue for the device
536  * @opt:  optimal request size in bytes
537  *
538  * Description:
539  *   Storage devices may report an optimal I/O size, which is the
540  *   device's preferred unit for sustained I/O.  This is rarely reported
541  *   for disk drives.  For RAID arrays it is usually the stripe width or
542  *   the internal track size.  A properly aligned multiple of
543  *   optimal_io_size is the preferred request size for workloads where
544  *   sustained throughput is desired.
545  */
blk_queue_io_opt(struct request_queue * q,unsigned int opt)546 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
547 {
548 	blk_limits_io_opt(&q->limits, opt);
549 	if (!q->disk)
550 		return;
551 	q->disk->bdi->ra_pages =
552 		max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES);
553 }
554 EXPORT_SYMBOL(blk_queue_io_opt);
555 
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)556 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
557 {
558 	sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
559 	if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
560 		sectors = PAGE_SIZE >> SECTOR_SHIFT;
561 	return sectors;
562 }
563 
564 /**
565  * blk_stack_limits - adjust queue_limits for stacked devices
566  * @t:	the stacking driver limits (top device)
567  * @b:  the underlying queue limits (bottom, component device)
568  * @start:  first data sector within component device
569  *
570  * Description:
571  *    This function is used by stacking drivers like MD and DM to ensure
572  *    that all component devices have compatible block sizes and
573  *    alignments.  The stacking driver must provide a queue_limits
574  *    struct (top) and then iteratively call the stacking function for
575  *    all component (bottom) devices.  The stacking function will
576  *    attempt to combine the values and ensure proper alignment.
577  *
578  *    Returns 0 if the top and bottom queue_limits are compatible.  The
579  *    top device's block sizes and alignment offsets may be adjusted to
580  *    ensure alignment with the bottom device. If no compatible sizes
581  *    and alignments exist, -1 is returned and the resulting top
582  *    queue_limits will have the misaligned flag set to indicate that
583  *    the alignment_offset is undefined.
584  */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)585 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
586 		     sector_t start)
587 {
588 	unsigned int top, bottom, alignment, ret = 0;
589 
590 	t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
591 	t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
592 	t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
593 	t->max_write_same_sectors = min(t->max_write_same_sectors,
594 					b->max_write_same_sectors);
595 	t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
596 					b->max_write_zeroes_sectors);
597 	t->max_zone_append_sectors = min(t->max_zone_append_sectors,
598 					b->max_zone_append_sectors);
599 	t->bounce = max(t->bounce, b->bounce);
600 
601 	t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
602 					    b->seg_boundary_mask);
603 	t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
604 					    b->virt_boundary_mask);
605 
606 	t->max_segments = min_not_zero(t->max_segments, b->max_segments);
607 	t->max_discard_segments = min_not_zero(t->max_discard_segments,
608 					       b->max_discard_segments);
609 	t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
610 						 b->max_integrity_segments);
611 
612 	t->max_segment_size = min_not_zero(t->max_segment_size,
613 					   b->max_segment_size);
614 
615 	t->misaligned |= b->misaligned;
616 
617 	alignment = queue_limit_alignment_offset(b, start);
618 
619 	/* Bottom device has different alignment.  Check that it is
620 	 * compatible with the current top alignment.
621 	 */
622 	if (t->alignment_offset != alignment) {
623 
624 		top = max(t->physical_block_size, t->io_min)
625 			+ t->alignment_offset;
626 		bottom = max(b->physical_block_size, b->io_min) + alignment;
627 
628 		/* Verify that top and bottom intervals line up */
629 		if (max(top, bottom) % min(top, bottom)) {
630 			t->misaligned = 1;
631 			ret = -1;
632 		}
633 	}
634 
635 	t->logical_block_size = max(t->logical_block_size,
636 				    b->logical_block_size);
637 
638 	t->physical_block_size = max(t->physical_block_size,
639 				     b->physical_block_size);
640 
641 	t->io_min = max(t->io_min, b->io_min);
642 	t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
643 
644 	/* Set non-power-of-2 compatible chunk_sectors boundary */
645 	if (b->chunk_sectors)
646 		t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
647 
648 	/* Physical block size a multiple of the logical block size? */
649 	if (t->physical_block_size & (t->logical_block_size - 1)) {
650 		t->physical_block_size = t->logical_block_size;
651 		t->misaligned = 1;
652 		ret = -1;
653 	}
654 
655 	/* Minimum I/O a multiple of the physical block size? */
656 	if (t->io_min & (t->physical_block_size - 1)) {
657 		t->io_min = t->physical_block_size;
658 		t->misaligned = 1;
659 		ret = -1;
660 	}
661 
662 	/* Optimal I/O a multiple of the physical block size? */
663 	if (t->io_opt & (t->physical_block_size - 1)) {
664 		t->io_opt = 0;
665 		t->misaligned = 1;
666 		ret = -1;
667 	}
668 
669 	/* chunk_sectors a multiple of the physical block size? */
670 	if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) {
671 		t->chunk_sectors = 0;
672 		t->misaligned = 1;
673 		ret = -1;
674 	}
675 
676 	t->raid_partial_stripes_expensive =
677 		max(t->raid_partial_stripes_expensive,
678 		    b->raid_partial_stripes_expensive);
679 
680 	/* Find lowest common alignment_offset */
681 	t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
682 		% max(t->physical_block_size, t->io_min);
683 
684 	/* Verify that new alignment_offset is on a logical block boundary */
685 	if (t->alignment_offset & (t->logical_block_size - 1)) {
686 		t->misaligned = 1;
687 		ret = -1;
688 	}
689 
690 	t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
691 	t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
692 	t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
693 
694 	/* Discard alignment and granularity */
695 	if (b->discard_granularity) {
696 		alignment = queue_limit_discard_alignment(b, start);
697 
698 		if (t->discard_granularity != 0 &&
699 		    t->discard_alignment != alignment) {
700 			top = t->discard_granularity + t->discard_alignment;
701 			bottom = b->discard_granularity + alignment;
702 
703 			/* Verify that top and bottom intervals line up */
704 			if ((max(top, bottom) % min(top, bottom)) != 0)
705 				t->discard_misaligned = 1;
706 		}
707 
708 		t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
709 						      b->max_discard_sectors);
710 		t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
711 							 b->max_hw_discard_sectors);
712 		t->discard_granularity = max(t->discard_granularity,
713 					     b->discard_granularity);
714 		t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
715 			t->discard_granularity;
716 	}
717 
718 	t->zone_write_granularity = max(t->zone_write_granularity,
719 					b->zone_write_granularity);
720 	t->zoned = max(t->zoned, b->zoned);
721 	return ret;
722 }
723 EXPORT_SYMBOL(blk_stack_limits);
724 
725 /**
726  * disk_stack_limits - adjust queue limits for stacked drivers
727  * @disk:  MD/DM gendisk (top)
728  * @bdev:  the underlying block device (bottom)
729  * @offset:  offset to beginning of data within component device
730  *
731  * Description:
732  *    Merges the limits for a top level gendisk and a bottom level
733  *    block_device.
734  */
disk_stack_limits(struct gendisk * disk,struct block_device * bdev,sector_t offset)735 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
736 		       sector_t offset)
737 {
738 	struct request_queue *t = disk->queue;
739 
740 	if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
741 			get_start_sect(bdev) + (offset >> 9)) < 0)
742 		pr_notice("%s: Warning: Device %pg is misaligned\n",
743 			disk->disk_name, bdev);
744 
745 	disk_update_readahead(disk);
746 }
747 EXPORT_SYMBOL(disk_stack_limits);
748 
749 /**
750  * blk_queue_update_dma_pad - update pad mask
751  * @q:     the request queue for the device
752  * @mask:  pad mask
753  *
754  * Update dma pad mask.
755  *
756  * Appending pad buffer to a request modifies the last entry of a
757  * scatter list such that it includes the pad buffer.
758  **/
blk_queue_update_dma_pad(struct request_queue * q,unsigned int mask)759 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
760 {
761 	if (mask > q->dma_pad_mask)
762 		q->dma_pad_mask = mask;
763 }
764 EXPORT_SYMBOL(blk_queue_update_dma_pad);
765 
766 /**
767  * blk_queue_segment_boundary - set boundary rules for segment merging
768  * @q:  the request queue for the device
769  * @mask:  the memory boundary mask
770  **/
blk_queue_segment_boundary(struct request_queue * q,unsigned long mask)771 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
772 {
773 	if (mask < PAGE_SIZE - 1) {
774 		mask = PAGE_SIZE - 1;
775 		pr_info("set to minimum %lx\n", mask);
776 	}
777 
778 	q->limits.seg_boundary_mask = mask;
779 }
780 EXPORT_SYMBOL(blk_queue_segment_boundary);
781 
782 /**
783  * blk_queue_virt_boundary - set boundary rules for bio merging
784  * @q:  the request queue for the device
785  * @mask:  the memory boundary mask
786  **/
blk_queue_virt_boundary(struct request_queue * q,unsigned long mask)787 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
788 {
789 	q->limits.virt_boundary_mask = mask;
790 
791 	/*
792 	 * Devices that require a virtual boundary do not support scatter/gather
793 	 * I/O natively, but instead require a descriptor list entry for each
794 	 * page (which might not be idential to the Linux PAGE_SIZE).  Because
795 	 * of that they are not limited by our notion of "segment size".
796 	 */
797 	if (mask)
798 		q->limits.max_segment_size = UINT_MAX;
799 }
800 EXPORT_SYMBOL(blk_queue_virt_boundary);
801 
802 /**
803  * blk_queue_dma_alignment - set dma length and memory alignment
804  * @q:     the request queue for the device
805  * @mask:  alignment mask
806  *
807  * description:
808  *    set required memory and length alignment for direct dma transactions.
809  *    this is used when building direct io requests for the queue.
810  *
811  **/
blk_queue_dma_alignment(struct request_queue * q,int mask)812 void blk_queue_dma_alignment(struct request_queue *q, int mask)
813 {
814 	q->dma_alignment = mask;
815 }
816 EXPORT_SYMBOL(blk_queue_dma_alignment);
817 
818 /**
819  * blk_queue_update_dma_alignment - update dma length and memory alignment
820  * @q:     the request queue for the device
821  * @mask:  alignment mask
822  *
823  * description:
824  *    update required memory and length alignment for direct dma transactions.
825  *    If the requested alignment is larger than the current alignment, then
826  *    the current queue alignment is updated to the new value, otherwise it
827  *    is left alone.  The design of this is to allow multiple objects
828  *    (driver, device, transport etc) to set their respective
829  *    alignments without having them interfere.
830  *
831  **/
blk_queue_update_dma_alignment(struct request_queue * q,int mask)832 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
833 {
834 	BUG_ON(mask > PAGE_SIZE);
835 
836 	if (mask > q->dma_alignment)
837 		q->dma_alignment = mask;
838 }
839 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
840 
841 /**
842  * blk_set_queue_depth - tell the block layer about the device queue depth
843  * @q:		the request queue for the device
844  * @depth:		queue depth
845  *
846  */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)847 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
848 {
849 	q->queue_depth = depth;
850 	rq_qos_queue_depth_changed(q);
851 }
852 EXPORT_SYMBOL(blk_set_queue_depth);
853 
854 /**
855  * blk_queue_write_cache - configure queue's write cache
856  * @q:		the request queue for the device
857  * @wc:		write back cache on or off
858  * @fua:	device supports FUA writes, if true
859  *
860  * Tell the block layer about the write cache of @q.
861  */
blk_queue_write_cache(struct request_queue * q,bool wc,bool fua)862 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
863 {
864 	if (wc)
865 		blk_queue_flag_set(QUEUE_FLAG_WC, q);
866 	else
867 		blk_queue_flag_clear(QUEUE_FLAG_WC, q);
868 	if (fua)
869 		blk_queue_flag_set(QUEUE_FLAG_FUA, q);
870 	else
871 		blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
872 
873 	wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
874 }
875 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
876 
877 /**
878  * blk_queue_required_elevator_features - Set a queue required elevator features
879  * @q:		the request queue for the target device
880  * @features:	Required elevator features OR'ed together
881  *
882  * Tell the block layer that for the device controlled through @q, only the
883  * only elevators that can be used are those that implement at least the set of
884  * features specified by @features.
885  */
blk_queue_required_elevator_features(struct request_queue * q,unsigned int features)886 void blk_queue_required_elevator_features(struct request_queue *q,
887 					  unsigned int features)
888 {
889 	q->required_elevator_features = features;
890 }
891 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
892 
893 /**
894  * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
895  * @q:		the request queue for the device
896  * @dev:	the device pointer for dma
897  *
898  * Tell the block layer about merging the segments by dma map of @q.
899  */
blk_queue_can_use_dma_map_merging(struct request_queue * q,struct device * dev)900 bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
901 				       struct device *dev)
902 {
903 	unsigned long boundary = dma_get_merge_boundary(dev);
904 
905 	if (!boundary)
906 		return false;
907 
908 	/* No need to update max_segment_size. see blk_queue_virt_boundary() */
909 	blk_queue_virt_boundary(q, boundary);
910 
911 	return true;
912 }
913 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
914 
disk_has_partitions(struct gendisk * disk)915 static bool disk_has_partitions(struct gendisk *disk)
916 {
917 	unsigned long idx;
918 	struct block_device *part;
919 	bool ret = false;
920 
921 	rcu_read_lock();
922 	xa_for_each(&disk->part_tbl, idx, part) {
923 		if (bdev_is_partition(part)) {
924 			ret = true;
925 			break;
926 		}
927 	}
928 	rcu_read_unlock();
929 
930 	return ret;
931 }
932 
933 /**
934  * blk_queue_set_zoned - configure a disk queue zoned model.
935  * @disk:	the gendisk of the queue to configure
936  * @model:	the zoned model to set
937  *
938  * Set the zoned model of the request queue of @disk according to @model.
939  * When @model is BLK_ZONED_HM (host managed), this should be called only
940  * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
941  * If @model specifies BLK_ZONED_HA (host aware), the effective model used
942  * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
943  * on the disk.
944  */
blk_queue_set_zoned(struct gendisk * disk,enum blk_zoned_model model)945 void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
946 {
947 	struct request_queue *q = disk->queue;
948 	unsigned int old_model = q->limits.zoned;
949 
950 	switch (model) {
951 	case BLK_ZONED_HM:
952 		/*
953 		 * Host managed devices are supported only if
954 		 * CONFIG_BLK_DEV_ZONED is enabled.
955 		 */
956 		WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
957 		break;
958 	case BLK_ZONED_HA:
959 		/*
960 		 * Host aware devices can be treated either as regular block
961 		 * devices (similar to drive managed devices) or as zoned block
962 		 * devices to take advantage of the zone command set, similarly
963 		 * to host managed devices. We try the latter if there are no
964 		 * partitions and zoned block device support is enabled, else
965 		 * we do nothing special as far as the block layer is concerned.
966 		 */
967 		if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
968 		    disk_has_partitions(disk))
969 			model = BLK_ZONED_NONE;
970 		break;
971 	case BLK_ZONED_NONE:
972 	default:
973 		if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
974 			model = BLK_ZONED_NONE;
975 		break;
976 	}
977 
978 	q->limits.zoned = model;
979 	if (model != BLK_ZONED_NONE) {
980 		/*
981 		 * Set the zone write granularity to the device logical block
982 		 * size by default. The driver can change this value if needed.
983 		 */
984 		blk_queue_zone_write_granularity(q,
985 						queue_logical_block_size(q));
986 	} else if (old_model != BLK_ZONED_NONE) {
987 		blk_queue_clear_zone_settings(q);
988 	}
989 }
990 EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
991