• Home
  • Raw
  • Download

Lines Matching refs:limits

508 	struct queue_limits	limits;  member
654 #define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
655 #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
656 #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
657 #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
673 ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
704 (q->limits.features & BLK_FEAT_ZONED); in blk_queue_is_zoned()
781 const sector_t zone_sectors = disk->queue->limits.chunk_sectors; in disk_zone_no()
797 return bdev->bd_disk->queue->limits.max_open_zones; in bdev_max_open_zones()
802 return bdev->bd_disk->queue->limits.max_active_zones; in bdev_max_active_zones()
1022 return q->limits; in queue_limits_start_update()
1052 q->limits.max_discard_sectors = 0; in blk_queue_disable_discard()
1057 q->limits.max_secure_erase_sectors = 0; in blk_queue_disable_secure_erase()
1062 q->limits.max_write_zeroes_sectors = 0; in blk_queue_disable_write_zeroes()
1252 return q->limits.seg_boundary_mask; in queue_segment_boundary()
1257 return q->limits.virt_boundary_mask; in queue_virt_boundary()
1262 return q->limits.max_sectors; in queue_max_sectors()
1272 return q->limits.max_hw_sectors; in queue_max_hw_sectors()
1277 return q->limits.max_segments; in queue_max_segments()
1282 return q->limits.max_discard_segments; in queue_max_discard_segments()
1287 return q->limits.max_segment_size; in queue_max_segment_size()
1303 return queue_limits_max_zone_append_sectors(&q->limits); in queue_max_zone_append_sectors()
1308 return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors; in queue_emulates_zone_append()
1329 return q->limits.logical_block_size; in queue_logical_block_size()
1339 return q->limits.physical_block_size; in queue_physical_block_size()
1349 return q->limits.io_min; in queue_io_min()
1359 return q->limits.io_opt; in queue_io_opt()
1370 return q->limits.zone_write_granularity; in queue_zone_write_granularity()
1384 return bdev_get_queue(bdev)->limits.max_discard_sectors; in bdev_max_discard_sectors()
1389 return bdev_get_queue(bdev)->limits.discard_granularity; in bdev_discard_granularity()
1395 return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; in bdev_max_secure_erase_sectors()
1400 return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors; in bdev_write_zeroes_sectors()
1410 return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS; in bdev_synchronous()
1418 q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE) in bdev_stable_writes()
1420 return q->limits.features & BLK_FEAT_STABLE_WRITES; in bdev_stable_writes()
1425 return (q->limits.features & BLK_FEAT_WRITE_CACHE) && in blk_queue_write_cache()
1426 !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED); in blk_queue_write_cache()
1436 return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA; in bdev_fua()
1441 return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT; in bdev_nowait()
1460 return q->limits.chunk_sectors; in bdev_zone_sectors()
1523 return q->limits.dma_alignment; in queue_dma_alignment()
1529 return q->limits.atomic_write_unit_max; in queue_atomic_write_unit_max_bytes()
1535 return q->limits.atomic_write_unit_min; in queue_atomic_write_unit_min_bytes()
1541 return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT; in queue_atomic_write_boundary_bytes()
1547 return q->limits.atomic_write_max_sectors << SECTOR_SHIFT; in queue_atomic_write_max_bytes()
1570 unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); in blk_rq_aligned()
1811 struct queue_limits *limits = &bd_queue->limits; in bdev_can_atomic_write() local
1813 if (!limits->atomic_write_unit_min) in bdev_can_atomic_write()
1819 max(limits->atomic_write_unit_min, in bdev_can_atomic_write()
1820 limits->atomic_write_hw_boundary); in bdev_can_atomic_write()