Lines Matching +full:top +full:- +full:level
1 // SPDX-License-Identifier: GPL-2.0
15 #include <linux/dma-mapping.h>
18 #include "blk-wbt.h"
27 q->rq_timeout = timeout; in blk_queue_rq_timeout()
32 * blk_set_default_limits - reset limits to default values
40 lim->max_segments = BLK_MAX_SEGMENTS; in blk_set_default_limits()
41 lim->max_discard_segments = 1; in blk_set_default_limits()
42 lim->max_integrity_segments = 0; in blk_set_default_limits()
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; in blk_set_default_limits()
44 lim->virt_boundary_mask = 0; in blk_set_default_limits()
45 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; in blk_set_default_limits()
46 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; in blk_set_default_limits()
47 lim->max_dev_sectors = 0; in blk_set_default_limits()
48 lim->chunk_sectors = 0; in blk_set_default_limits()
49 lim->max_write_same_sectors = 0; in blk_set_default_limits()
50 lim->max_write_zeroes_sectors = 0; in blk_set_default_limits()
51 lim->max_zone_append_sectors = 0; in blk_set_default_limits()
52 lim->max_discard_sectors = 0; in blk_set_default_limits()
53 lim->max_hw_discard_sectors = 0; in blk_set_default_limits()
54 lim->discard_granularity = 0; in blk_set_default_limits()
55 lim->discard_alignment = 0; in blk_set_default_limits()
56 lim->discard_misaligned = 0; in blk_set_default_limits()
57 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; in blk_set_default_limits()
58 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); in blk_set_default_limits()
59 lim->alignment_offset = 0; in blk_set_default_limits()
60 lim->io_opt = 0; in blk_set_default_limits()
61 lim->misaligned = 0; in blk_set_default_limits()
62 lim->zoned = BLK_ZONED_NONE; in blk_set_default_limits()
67 * blk_set_stacking_limits - set default limits for stacking devices
79 lim->max_segments = USHRT_MAX; in blk_set_stacking_limits()
80 lim->max_discard_segments = USHRT_MAX; in blk_set_stacking_limits()
81 lim->max_hw_sectors = UINT_MAX; in blk_set_stacking_limits()
82 lim->max_segment_size = UINT_MAX; in blk_set_stacking_limits()
83 lim->max_sectors = UINT_MAX; in blk_set_stacking_limits()
84 lim->max_dev_sectors = UINT_MAX; in blk_set_stacking_limits()
85 lim->max_write_same_sectors = UINT_MAX; in blk_set_stacking_limits()
86 lim->max_write_zeroes_sectors = UINT_MAX; in blk_set_stacking_limits()
87 lim->max_zone_append_sectors = UINT_MAX; in blk_set_stacking_limits()
92 * blk_queue_bounce_limit - set bounce buffer limit for queue
98 * it can do I/O directly to. A low level driver can call
107 q->bounce_gfp = GFP_NOIO; in blk_queue_bounce_limit()
116 q->limits.bounce_pfn = max(max_low_pfn, b_pfn); in blk_queue_bounce_limit()
120 q->limits.bounce_pfn = b_pfn; in blk_queue_bounce_limit()
124 q->bounce_gfp = GFP_NOIO | GFP_DMA; in blk_queue_bounce_limit()
125 q->limits.bounce_pfn = b_pfn; in blk_queue_bounce_limit()
131 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
136 * Enables a low level driver to set a hard upper limit,
146 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
151 struct queue_limits *limits = &q->limits; in blk_queue_max_hw_sectors()
155 max_hw_sectors = 1 << (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors()
160 limits->max_hw_sectors = max_hw_sectors; in blk_queue_max_hw_sectors()
161 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); in blk_queue_max_hw_sectors()
163 limits->max_sectors = max_sectors; in blk_queue_max_hw_sectors()
164 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors()
169 * blk_queue_chunk_sectors - set size of the chunk for this queue
182 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
187 * blk_queue_max_discard_sectors - set max sectors for a single discard
194 q->limits.max_hw_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
195 q->limits.max_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
200 * blk_queue_max_write_same_sectors - set max sectors for a single write same
207 q->limits.max_write_same_sectors = max_write_same_sectors; in blk_queue_max_write_same_sectors()
212 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
220 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; in blk_queue_max_write_zeroes_sectors()
225 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
237 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); in blk_queue_max_zone_append_sectors()
238 max_sectors = min(q->limits.chunk_sectors, max_sectors); in blk_queue_max_zone_append_sectors()
247 q->limits.max_zone_append_sectors = max_sectors; in blk_queue_max_zone_append_sectors()
252 * blk_queue_max_segments - set max hw segments for a request for this queue
257 * Enables a low level driver to set an upper limit on the number of
268 q->limits.max_segments = max_segments; in blk_queue_max_segments()
273 * blk_queue_max_discard_segments - set max segments for discard requests
278 * Enables a low level driver to set an upper limit on the number of
284 q->limits.max_discard_segments = max_segments; in blk_queue_max_discard_segments()
289 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
294 * Enables a low level driver to set an upper limit on the size of a
306 WARN_ON_ONCE(q->limits.virt_boundary_mask); in blk_queue_max_segment_size()
308 q->limits.max_segment_size = max_size; in blk_queue_max_segment_size()
313 * blk_queue_logical_block_size - set logical block size for the queue
324 q->limits.logical_block_size = size; in blk_queue_logical_block_size()
326 if (q->limits.physical_block_size < size) in blk_queue_logical_block_size()
327 q->limits.physical_block_size = size; in blk_queue_logical_block_size()
329 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_logical_block_size()
330 q->limits.io_min = q->limits.physical_block_size; in blk_queue_logical_block_size()
335 * blk_queue_physical_block_size - set physical block size for the queue
341 * hardware can operate on without reverting to read-modify-write
346 q->limits.physical_block_size = size; in blk_queue_physical_block_size()
348 if (q->limits.physical_block_size < q->limits.logical_block_size) in blk_queue_physical_block_size()
349 q->limits.physical_block_size = q->limits.logical_block_size; in blk_queue_physical_block_size()
351 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_physical_block_size()
352 q->limits.io_min = q->limits.physical_block_size; in blk_queue_physical_block_size()
357 * blk_queue_alignment_offset - set physical block alignment offset
363 * the legacy DOS partition table 63-sector offset. Low-level drivers
369 q->limits.alignment_offset = in blk_queue_alignment_offset()
370 offset & (q->limits.physical_block_size - 1); in blk_queue_alignment_offset()
371 q->limits.misaligned = 0; in blk_queue_alignment_offset()
378 * For read-ahead of large files to be effective, we need to read ahead in blk_queue_update_readahead()
381 q->backing_dev_info->ra_pages = in blk_queue_update_readahead()
383 q->backing_dev_info->io_pages = in blk_queue_update_readahead()
384 queue_max_sectors(q) >> (PAGE_SHIFT - 9); in blk_queue_update_readahead()
389 * blk_limits_io_min - set minimum request size for a device
401 limits->io_min = min; in blk_limits_io_min()
403 if (limits->io_min < limits->logical_block_size) in blk_limits_io_min()
404 limits->io_min = limits->logical_block_size; in blk_limits_io_min()
406 if (limits->io_min < limits->physical_block_size) in blk_limits_io_min()
407 limits->io_min = limits->physical_block_size; in blk_limits_io_min()
412 * blk_queue_io_min - set minimum request size for the queue
427 blk_limits_io_min(&q->limits, min); in blk_queue_io_min()
432 * blk_limits_io_opt - set optimal request size for a device
446 limits->io_opt = opt; in blk_limits_io_opt()
451 * blk_queue_io_opt - set optimal request size for the queue
465 blk_limits_io_opt(&q->limits, opt); in blk_queue_io_opt()
466 q->backing_dev_info->ra_pages = in blk_queue_io_opt()
480 * blk_stack_limits - adjust queue_limits for stacked devices
481 * @t: the stacking driver limits (top device)
489 * struct (top) and then iteratively call the stacking function for
493 * Returns 0 if the top and bottom queue_limits are compatible. The
494 * top device's block sizes and alignment offsets may be adjusted to
496 * and alignments exist, -1 is returned and the resulting top
503 unsigned int top, bottom, alignment, ret = 0; in blk_stack_limits() local
505 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); in blk_stack_limits()
506 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); in blk_stack_limits()
507 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); in blk_stack_limits()
508 t->max_write_same_sectors = min(t->max_write_same_sectors, in blk_stack_limits()
509 b->max_write_same_sectors); in blk_stack_limits()
510 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, in blk_stack_limits()
511 b->max_write_zeroes_sectors); in blk_stack_limits()
512 t->max_zone_append_sectors = min(t->max_zone_append_sectors, in blk_stack_limits()
513 b->max_zone_append_sectors); in blk_stack_limits()
514 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); in blk_stack_limits()
516 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, in blk_stack_limits()
517 b->seg_boundary_mask); in blk_stack_limits()
518 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, in blk_stack_limits()
519 b->virt_boundary_mask); in blk_stack_limits()
521 t->max_segments = min_not_zero(t->max_segments, b->max_segments); in blk_stack_limits()
522 t->max_discard_segments = min_not_zero(t->max_discard_segments, in blk_stack_limits()
523 b->max_discard_segments); in blk_stack_limits()
524 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, in blk_stack_limits()
525 b->max_integrity_segments); in blk_stack_limits()
527 t->max_segment_size = min_not_zero(t->max_segment_size, in blk_stack_limits()
528 b->max_segment_size); in blk_stack_limits()
530 t->misaligned |= b->misaligned; in blk_stack_limits()
535 * compatible with the current top alignment. in blk_stack_limits()
537 if (t->alignment_offset != alignment) { in blk_stack_limits()
539 top = max(t->physical_block_size, t->io_min) in blk_stack_limits()
540 + t->alignment_offset; in blk_stack_limits()
541 bottom = max(b->physical_block_size, b->io_min) + alignment; in blk_stack_limits()
543 /* Verify that top and bottom intervals line up */ in blk_stack_limits()
544 if (max(top, bottom) % min(top, bottom)) { in blk_stack_limits()
545 t->misaligned = 1; in blk_stack_limits()
546 ret = -1; in blk_stack_limits()
550 t->logical_block_size = max(t->logical_block_size, in blk_stack_limits()
551 b->logical_block_size); in blk_stack_limits()
553 t->physical_block_size = max(t->physical_block_size, in blk_stack_limits()
554 b->physical_block_size); in blk_stack_limits()
556 t->io_min = max(t->io_min, b->io_min); in blk_stack_limits()
557 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); in blk_stack_limits()
559 /* Set non-power-of-2 compatible chunk_sectors boundary */ in blk_stack_limits()
560 if (b->chunk_sectors) in blk_stack_limits()
561 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors); in blk_stack_limits()
564 if (t->physical_block_size & (t->logical_block_size - 1)) { in blk_stack_limits()
565 t->physical_block_size = t->logical_block_size; in blk_stack_limits()
566 t->misaligned = 1; in blk_stack_limits()
567 ret = -1; in blk_stack_limits()
571 if (t->io_min & (t->physical_block_size - 1)) { in blk_stack_limits()
572 t->io_min = t->physical_block_size; in blk_stack_limits()
573 t->misaligned = 1; in blk_stack_limits()
574 ret = -1; in blk_stack_limits()
578 if (t->io_opt & (t->physical_block_size - 1)) { in blk_stack_limits()
579 t->io_opt = 0; in blk_stack_limits()
580 t->misaligned = 1; in blk_stack_limits()
581 ret = -1; in blk_stack_limits()
585 if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { in blk_stack_limits()
586 t->chunk_sectors = 0; in blk_stack_limits()
587 t->misaligned = 1; in blk_stack_limits()
588 ret = -1; in blk_stack_limits()
591 t->raid_partial_stripes_expensive = in blk_stack_limits()
592 max(t->raid_partial_stripes_expensive, in blk_stack_limits()
593 b->raid_partial_stripes_expensive); in blk_stack_limits()
596 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) in blk_stack_limits()
597 % max(t->physical_block_size, t->io_min); in blk_stack_limits()
600 if (t->alignment_offset & (t->logical_block_size - 1)) { in blk_stack_limits()
601 t->misaligned = 1; in blk_stack_limits()
602 ret = -1; in blk_stack_limits()
605 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size); in blk_stack_limits()
606 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size); in blk_stack_limits()
607 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size); in blk_stack_limits()
610 if (b->discard_granularity) { in blk_stack_limits()
613 if (t->discard_granularity != 0 && in blk_stack_limits()
614 t->discard_alignment != alignment) { in blk_stack_limits()
615 top = t->discard_granularity + t->discard_alignment; in blk_stack_limits()
616 bottom = b->discard_granularity + alignment; in blk_stack_limits()
618 /* Verify that top and bottom intervals line up */ in blk_stack_limits()
619 if ((max(top, bottom) % min(top, bottom)) != 0) in blk_stack_limits()
620 t->discard_misaligned = 1; in blk_stack_limits()
623 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, in blk_stack_limits()
624 b->max_discard_sectors); in blk_stack_limits()
625 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, in blk_stack_limits()
626 b->max_hw_discard_sectors); in blk_stack_limits()
627 t->discard_granularity = max(t->discard_granularity, in blk_stack_limits()
628 b->discard_granularity); in blk_stack_limits()
629 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % in blk_stack_limits()
630 t->discard_granularity; in blk_stack_limits()
633 t->zoned = max(t->zoned, b->zoned); in blk_stack_limits()
639 * disk_stack_limits - adjust queue limits for stacked drivers
640 * @disk: MD/DM gendisk (top)
645 * Merges the limits for a top level gendisk and a bottom level
651 struct request_queue *t = disk->queue; in disk_stack_limits()
653 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits, in disk_stack_limits()
655 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; in disk_stack_limits() local
657 disk_name(disk, 0, top); in disk_stack_limits()
661 top, bottom); in disk_stack_limits()
664 blk_queue_update_readahead(disk->queue); in disk_stack_limits()
669 * blk_queue_update_dma_pad - update pad mask
680 if (mask > q->dma_pad_mask) in blk_queue_update_dma_pad()
681 q->dma_pad_mask = mask; in blk_queue_update_dma_pad()
686 * blk_queue_segment_boundary - set boundary rules for segment merging
692 if (mask < PAGE_SIZE - 1) { in blk_queue_segment_boundary()
693 mask = PAGE_SIZE - 1; in blk_queue_segment_boundary()
698 q->limits.seg_boundary_mask = mask; in blk_queue_segment_boundary()
703 * blk_queue_virt_boundary - set boundary rules for bio merging
709 q->limits.virt_boundary_mask = mask; in blk_queue_virt_boundary()
718 q->limits.max_segment_size = UINT_MAX; in blk_queue_virt_boundary()
723 * blk_queue_dma_alignment - set dma length and memory alignment
734 q->dma_alignment = mask; in blk_queue_dma_alignment()
739 * blk_queue_update_dma_alignment - update dma length and memory alignment
756 if (mask > q->dma_alignment) in blk_queue_update_dma_alignment()
757 q->dma_alignment = mask; in blk_queue_update_dma_alignment()
762 * blk_set_queue_depth - tell the block layer about the device queue depth
769 q->queue_depth = depth; in blk_set_queue_depth()
775 * blk_queue_write_cache - configure queue's write cache
793 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); in blk_queue_write_cache()
798 * blk_queue_required_elevator_features - Set a queue required elevator features
809 q->required_elevator_features = features; in blk_queue_required_elevator_features()
814 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
836 * blk_queue_set_zoned - configure a disk queue zoned model.
877 disk->queue->limits.zoned = model; in blk_queue_set_zoned()
883 blk_max_low_pfn = max_low_pfn - 1; in blk_settings_init()
884 blk_max_pfn = max_pfn - 1; in blk_settings_init()