• Home
  • Raw
  • Download

Lines Matching +full:top +full:- +full:level

16 #include "blk-wbt.h"
24 * blk_queue_prep_rq - set a prepare_request function for queue
36 q->prep_rq_fn = pfn; in blk_queue_prep_rq()
41 * blk_queue_unprep_rq - set an unprepare_request function for queue
53 q->unprep_rq_fn = ufn; in blk_queue_unprep_rq()
59 q->softirq_done_fn = fn; in blk_queue_softirq_done()
65 q->rq_timeout = timeout; in blk_queue_rq_timeout()
71 WARN_ON_ONCE(q->mq_ops); in blk_queue_rq_timed_out()
72 q->rq_timed_out_fn = fn; in blk_queue_rq_timed_out()
78 q->lld_busy_fn = fn; in blk_queue_lld_busy()
83 * blk_set_default_limits - reset limits to default values
91 lim->max_segments = BLK_MAX_SEGMENTS; in blk_set_default_limits()
92 lim->max_discard_segments = 1; in blk_set_default_limits()
93 lim->max_integrity_segments = 0; in blk_set_default_limits()
94 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; in blk_set_default_limits()
95 lim->virt_boundary_mask = 0; in blk_set_default_limits()
96 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; in blk_set_default_limits()
97 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; in blk_set_default_limits()
98 lim->max_dev_sectors = 0; in blk_set_default_limits()
99 lim->chunk_sectors = 0; in blk_set_default_limits()
100 lim->max_write_same_sectors = 0; in blk_set_default_limits()
101 lim->max_write_zeroes_sectors = 0; in blk_set_default_limits()
102 lim->max_discard_sectors = 0; in blk_set_default_limits()
103 lim->max_hw_discard_sectors = 0; in blk_set_default_limits()
104 lim->discard_granularity = 0; in blk_set_default_limits()
105 lim->discard_alignment = 0; in blk_set_default_limits()
106 lim->discard_misaligned = 0; in blk_set_default_limits()
107 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; in blk_set_default_limits()
108 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); in blk_set_default_limits()
109 lim->alignment_offset = 0; in blk_set_default_limits()
110 lim->io_opt = 0; in blk_set_default_limits()
111 lim->misaligned = 0; in blk_set_default_limits()
112 lim->cluster = 1; in blk_set_default_limits()
113 lim->zoned = BLK_ZONED_NONE; in blk_set_default_limits()
118 * blk_set_stacking_limits - set default limits for stacking devices
130 lim->max_segments = USHRT_MAX; in blk_set_stacking_limits()
131 lim->max_discard_segments = USHRT_MAX; in blk_set_stacking_limits()
132 lim->max_hw_sectors = UINT_MAX; in blk_set_stacking_limits()
133 lim->max_segment_size = UINT_MAX; in blk_set_stacking_limits()
134 lim->max_sectors = UINT_MAX; in blk_set_stacking_limits()
135 lim->max_dev_sectors = UINT_MAX; in blk_set_stacking_limits()
136 lim->max_write_same_sectors = UINT_MAX; in blk_set_stacking_limits()
137 lim->max_write_zeroes_sectors = UINT_MAX; in blk_set_stacking_limits()
142 * blk_queue_make_request - define an alternate make_request function for a device
168 q->nr_requests = BLKDEV_MAX_RQ; in blk_queue_make_request()
170 q->make_request_fn = mfn; in blk_queue_make_request()
173 q->nr_batching = BLK_BATCH_REQ; in blk_queue_make_request()
175 blk_set_default_limits(&q->limits); in blk_queue_make_request()
180 * blk_queue_bounce_limit - set bounce buffer limit for queue
186 * it can do I/O directly to. A low level driver can call
195 q->bounce_gfp = GFP_NOIO; in blk_queue_bounce_limit()
204 q->limits.bounce_pfn = max(max_low_pfn, b_pfn); in blk_queue_bounce_limit()
208 q->limits.bounce_pfn = b_pfn; in blk_queue_bounce_limit()
212 q->bounce_gfp = GFP_NOIO | GFP_DMA; in blk_queue_bounce_limit()
213 q->limits.bounce_pfn = b_pfn; in blk_queue_bounce_limit()
219 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
224 * Enables a low level driver to set a hard upper limit,
234 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
239 struct queue_limits *limits = &q->limits; in blk_queue_max_hw_sectors()
243 max_hw_sectors = 1 << (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors()
248 limits->max_hw_sectors = max_hw_sectors; in blk_queue_max_hw_sectors()
249 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); in blk_queue_max_hw_sectors()
251 limits->max_sectors = max_sectors; in blk_queue_max_hw_sectors()
252 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); in blk_queue_max_hw_sectors()
257 * blk_queue_chunk_sectors - set size of the chunk for this queue
264 * must currently be a power-of-2 in sectors. Also note that the block
272 q->limits.chunk_sectors = chunk_sectors; in blk_queue_chunk_sectors()
277 * blk_queue_max_discard_sectors - set max sectors for a single discard
284 q->limits.max_hw_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
285 q->limits.max_discard_sectors = max_discard_sectors; in blk_queue_max_discard_sectors()
290 * blk_queue_max_write_same_sectors - set max sectors for a single write same
297 q->limits.max_write_same_sectors = max_write_same_sectors; in blk_queue_max_write_same_sectors()
302 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
310 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; in blk_queue_max_write_zeroes_sectors()
315 * blk_queue_max_segments - set max hw segments for a request for this queue
320 * Enables a low level driver to set an upper limit on the number of
331 q->limits.max_segments = max_segments; in blk_queue_max_segments()
336 * blk_queue_max_discard_segments - set max segments for discard requests
341 * Enables a low level driver to set an upper limit on the number of
347 q->limits.max_discard_segments = max_segments; in blk_queue_max_discard_segments()
352 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
357 * Enables a low level driver to set an upper limit on the size of a
368 q->limits.max_segment_size = max_size; in blk_queue_max_segment_size()
373 * blk_queue_logical_block_size - set logical block size for the queue
384 q->limits.logical_block_size = size; in blk_queue_logical_block_size()
386 if (q->limits.physical_block_size < size) in blk_queue_logical_block_size()
387 q->limits.physical_block_size = size; in blk_queue_logical_block_size()
389 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_logical_block_size()
390 q->limits.io_min = q->limits.physical_block_size; in blk_queue_logical_block_size()
395 * blk_queue_physical_block_size - set physical block size for the queue
401 * hardware can operate on without reverting to read-modify-write
406 q->limits.physical_block_size = size; in blk_queue_physical_block_size()
408 if (q->limits.physical_block_size < q->limits.logical_block_size) in blk_queue_physical_block_size()
409 q->limits.physical_block_size = q->limits.logical_block_size; in blk_queue_physical_block_size()
411 if (q->limits.io_min < q->limits.physical_block_size) in blk_queue_physical_block_size()
412 q->limits.io_min = q->limits.physical_block_size; in blk_queue_physical_block_size()
417 * blk_queue_alignment_offset - set physical block alignment offset
423 * the legacy DOS partition table 63-sector offset. Low-level drivers
429 q->limits.alignment_offset = in blk_queue_alignment_offset()
430 offset & (q->limits.physical_block_size - 1); in blk_queue_alignment_offset()
431 q->limits.misaligned = 0; in blk_queue_alignment_offset()
436 * blk_limits_io_min - set minimum request size for a device
448 limits->io_min = min; in blk_limits_io_min()
450 if (limits->io_min < limits->logical_block_size) in blk_limits_io_min()
451 limits->io_min = limits->logical_block_size; in blk_limits_io_min()
453 if (limits->io_min < limits->physical_block_size) in blk_limits_io_min()
454 limits->io_min = limits->physical_block_size; in blk_limits_io_min()
459 * blk_queue_io_min - set minimum request size for the queue
474 blk_limits_io_min(&q->limits, min); in blk_queue_io_min()
479 * blk_limits_io_opt - set optimal request size for a device
493 limits->io_opt = opt; in blk_limits_io_opt()
498 * blk_queue_io_opt - set optimal request size for the queue
512 blk_limits_io_opt(&q->limits, opt); in blk_queue_io_opt()
517 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
518 * @t: the stacking driver (top)
523 blk_stack_limits(&t->limits, &b->limits, 0); in blk_queue_stack_limits()
528 * blk_stack_limits - adjust queue_limits for stacked devices
529 * @t: the stacking driver limits (top device)
537 * struct (top) and then iteratively call the stacking function for
541 * Returns 0 if the top and bottom queue_limits are compatible. The
542 * top device's block sizes and alignment offsets may be adjusted to
544 * and alignments exist, -1 is returned and the resulting top
551 unsigned int top, bottom, alignment, ret = 0; in blk_stack_limits() local
553 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); in blk_stack_limits()
554 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); in blk_stack_limits()
555 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); in blk_stack_limits()
556 t->max_write_same_sectors = min(t->max_write_same_sectors, in blk_stack_limits()
557 b->max_write_same_sectors); in blk_stack_limits()
558 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, in blk_stack_limits()
559 b->max_write_zeroes_sectors); in blk_stack_limits()
560 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); in blk_stack_limits()
562 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, in blk_stack_limits()
563 b->seg_boundary_mask); in blk_stack_limits()
564 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask, in blk_stack_limits()
565 b->virt_boundary_mask); in blk_stack_limits()
567 t->max_segments = min_not_zero(t->max_segments, b->max_segments); in blk_stack_limits()
568 t->max_discard_segments = min_not_zero(t->max_discard_segments, in blk_stack_limits()
569 b->max_discard_segments); in blk_stack_limits()
570 t->max_integrity_segments = min_not_zero(t->max_integrity_segments, in blk_stack_limits()
571 b->max_integrity_segments); in blk_stack_limits()
573 t->max_segment_size = min_not_zero(t->max_segment_size, in blk_stack_limits()
574 b->max_segment_size); in blk_stack_limits()
576 t->misaligned |= b->misaligned; in blk_stack_limits()
581 * compatible with the current top alignment. in blk_stack_limits()
583 if (t->alignment_offset != alignment) { in blk_stack_limits()
585 top = max(t->physical_block_size, t->io_min) in blk_stack_limits()
586 + t->alignment_offset; in blk_stack_limits()
587 bottom = max(b->physical_block_size, b->io_min) + alignment; in blk_stack_limits()
589 /* Verify that top and bottom intervals line up */ in blk_stack_limits()
590 if (max(top, bottom) % min(top, bottom)) { in blk_stack_limits()
591 t->misaligned = 1; in blk_stack_limits()
592 ret = -1; in blk_stack_limits()
596 t->logical_block_size = max(t->logical_block_size, in blk_stack_limits()
597 b->logical_block_size); in blk_stack_limits()
599 t->physical_block_size = max(t->physical_block_size, in blk_stack_limits()
600 b->physical_block_size); in blk_stack_limits()
602 t->io_min = max(t->io_min, b->io_min); in blk_stack_limits()
603 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); in blk_stack_limits()
605 t->cluster &= b->cluster; in blk_stack_limits()
608 if (t->physical_block_size & (t->logical_block_size - 1)) { in blk_stack_limits()
609 t->physical_block_size = t->logical_block_size; in blk_stack_limits()
610 t->misaligned = 1; in blk_stack_limits()
611 ret = -1; in blk_stack_limits()
615 if (t->io_min & (t->physical_block_size - 1)) { in blk_stack_limits()
616 t->io_min = t->physical_block_size; in blk_stack_limits()
617 t->misaligned = 1; in blk_stack_limits()
618 ret = -1; in blk_stack_limits()
622 if (t->io_opt & (t->physical_block_size - 1)) { in blk_stack_limits()
623 t->io_opt = 0; in blk_stack_limits()
624 t->misaligned = 1; in blk_stack_limits()
625 ret = -1; in blk_stack_limits()
628 t->raid_partial_stripes_expensive = in blk_stack_limits()
629 max(t->raid_partial_stripes_expensive, in blk_stack_limits()
630 b->raid_partial_stripes_expensive); in blk_stack_limits()
633 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment) in blk_stack_limits()
634 % max(t->physical_block_size, t->io_min); in blk_stack_limits()
637 if (t->alignment_offset & (t->logical_block_size - 1)) { in blk_stack_limits()
638 t->misaligned = 1; in blk_stack_limits()
639 ret = -1; in blk_stack_limits()
643 if (b->discard_granularity) { in blk_stack_limits()
646 if (t->discard_granularity != 0 && in blk_stack_limits()
647 t->discard_alignment != alignment) { in blk_stack_limits()
648 top = t->discard_granularity + t->discard_alignment; in blk_stack_limits()
649 bottom = b->discard_granularity + alignment; in blk_stack_limits()
651 /* Verify that top and bottom intervals line up */ in blk_stack_limits()
652 if ((max(top, bottom) % min(top, bottom)) != 0) in blk_stack_limits()
653 t->discard_misaligned = 1; in blk_stack_limits()
656 t->max_discard_sectors = min_not_zero(t->max_discard_sectors, in blk_stack_limits()
657 b->max_discard_sectors); in blk_stack_limits()
658 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors, in blk_stack_limits()
659 b->max_hw_discard_sectors); in blk_stack_limits()
660 t->discard_granularity = max(t->discard_granularity, in blk_stack_limits()
661 b->discard_granularity); in blk_stack_limits()
662 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) % in blk_stack_limits()
663 t->discard_granularity; in blk_stack_limits()
666 if (b->chunk_sectors) in blk_stack_limits()
667 t->chunk_sectors = min_not_zero(t->chunk_sectors, in blk_stack_limits()
668 b->chunk_sectors); in blk_stack_limits()
675 * bdev_stack_limits - adjust queue limits for stacked drivers
676 * @t: the stacking driver limits (top device)
681 * Merges queue limits for a top device and a block_device. Returns
682 * 0 if alignment didn't change. Returns -1 if adding the bottom
692 return blk_stack_limits(t, &bq->limits, start); in bdev_stack_limits()
697 * disk_stack_limits - adjust queue limits for stacked drivers
698 * @disk: MD/DM gendisk (top)
703 * Merges the limits for a top level gendisk and a bottom level
709 struct request_queue *t = disk->queue; in disk_stack_limits()
711 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { in disk_stack_limits()
712 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; in disk_stack_limits() local
714 disk_name(disk, 0, top); in disk_stack_limits()
718 top, bottom); in disk_stack_limits()
721 t->backing_dev_info->io_pages = in disk_stack_limits()
722 t->limits.max_sectors >> (PAGE_SHIFT - 9); in disk_stack_limits()
727 * blk_queue_dma_pad - set pad mask
738 q->dma_pad_mask = mask; in blk_queue_dma_pad()
743 * blk_queue_update_dma_pad - update pad mask
754 if (mask > q->dma_pad_mask) in blk_queue_update_dma_pad()
755 q->dma_pad_mask = mask; in blk_queue_update_dma_pad()
760 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
762 * @dma_drain_needed: fn which returns non-zero if drain is necessary
785 return -EINVAL; in blk_queue_dma_drain()
787 blk_queue_max_segments(q, queue_max_segments(q) - 1); in blk_queue_dma_drain()
788 q->dma_drain_needed = dma_drain_needed; in blk_queue_dma_drain()
789 q->dma_drain_buffer = buf; in blk_queue_dma_drain()
790 q->dma_drain_size = size; in blk_queue_dma_drain()
797 * blk_queue_segment_boundary - set boundary rules for segment merging
803 if (mask < PAGE_SIZE - 1) { in blk_queue_segment_boundary()
804 mask = PAGE_SIZE - 1; in blk_queue_segment_boundary()
809 q->limits.seg_boundary_mask = mask; in blk_queue_segment_boundary()
814 * blk_queue_virt_boundary - set boundary rules for bio merging
820 q->limits.virt_boundary_mask = mask; in blk_queue_virt_boundary()
825 * blk_queue_dma_alignment - set dma length and memory alignment
836 q->dma_alignment = mask; in blk_queue_dma_alignment()
841 * blk_queue_update_dma_alignment - update dma length and memory alignment
858 if (mask > q->dma_alignment) in blk_queue_update_dma_alignment()
859 q->dma_alignment = mask; in blk_queue_update_dma_alignment()
873 * blk_set_queue_depth - tell the block layer about the device queue depth
880 q->queue_depth = depth; in blk_set_queue_depth()
886 * blk_queue_write_cache - configure queue's write cache
895 spin_lock_irq(q->queue_lock); in blk_queue_write_cache()
904 spin_unlock_irq(q->queue_lock); in blk_queue_write_cache()
906 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); in blk_queue_write_cache()
912 blk_max_low_pfn = max_low_pfn - 1; in blk_settings_init()
913 blk_max_pfn = max_pfn - 1; in blk_settings_init()