Lines Matching refs:q
646 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
647 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
649 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) argument
650 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) argument
651 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) argument
652 #define blk_queue_noxmerges(q) \ argument
653 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
654 #define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL)) argument
655 #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) argument
656 #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) argument
657 #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA) argument
659 #define blk_queue_rq_alloc_time(q) \ argument
660 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
662 #define blk_queue_rq_alloc_time(q) false argument
668 #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) argument
669 #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) argument
670 #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) argument
671 #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags) argument
672 #define blk_queue_skip_tagset_quiesce(q) \ argument
673 ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
675 extern void blk_set_pm_only(struct request_queue *q);
676 extern void blk_clear_pm_only(struct request_queue *q);
684 static inline bool queue_is_mq(struct request_queue *q) in queue_is_mq() argument
686 return q->mq_ops; in queue_is_mq()
690 static inline enum rpm_status queue_rpm_status(struct request_queue *q) in queue_rpm_status() argument
692 return q->rpm_status; in queue_rpm_status()
695 static inline enum rpm_status queue_rpm_status(struct request_queue *q) in queue_rpm_status() argument
701 static inline bool blk_queue_is_zoned(struct request_queue *q) in blk_queue_is_zoned() argument
704 (q->limits.features & BLK_FEAT_ZONED); in blk_queue_is_zoned()
805 static inline unsigned int blk_queue_depth(struct request_queue *q) in blk_queue_depth() argument
807 if (q->queue_depth) in blk_queue_depth()
808 return q->queue_depth; in blk_queue_depth()
810 return q->nr_requests; in blk_queue_depth()
956 extern int blk_lld_busy(struct request_queue *q);
957 extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
958 extern void blk_queue_exit(struct request_queue *q);
959 extern void blk_sync_queue(struct request_queue *q);
1019 queue_limits_start_update(struct request_queue *q) in queue_limits_start_update() argument
1021 mutex_lock(&q->limits_lock); in queue_limits_start_update()
1022 return q->limits; in queue_limits_start_update()
1024 int queue_limits_commit_update_frozen(struct request_queue *q,
1026 int queue_limits_commit_update(struct request_queue *q,
1028 int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
1038 static inline void queue_limits_cancel_update(struct request_queue *q) in queue_limits_cancel_update() argument
1040 mutex_unlock(&q->limits_lock); in queue_limits_cancel_update()
1050 static inline void blk_queue_disable_discard(struct request_queue *q) in blk_queue_disable_discard() argument
1052 q->limits.max_discard_sectors = 0; in blk_queue_disable_discard()
1055 static inline void blk_queue_disable_secure_erase(struct request_queue *q) in blk_queue_disable_secure_erase() argument
1057 q->limits.max_secure_erase_sectors = 0; in blk_queue_disable_secure_erase()
1060 static inline void blk_queue_disable_write_zeroes(struct request_queue *q) in blk_queue_disable_write_zeroes() argument
1062 q->limits.max_write_zeroes_sectors = 0; in blk_queue_disable_write_zeroes()
1068 extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
1250 static inline unsigned long queue_segment_boundary(const struct request_queue *q) in queue_segment_boundary() argument
1252 return q->limits.seg_boundary_mask; in queue_segment_boundary()
1255 static inline unsigned long queue_virt_boundary(const struct request_queue *q) in queue_virt_boundary() argument
1257 return q->limits.virt_boundary_mask; in queue_virt_boundary()
1260 static inline unsigned int queue_max_sectors(const struct request_queue *q) in queue_max_sectors() argument
1262 return q->limits.max_sectors; in queue_max_sectors()
1265 static inline unsigned int queue_max_bytes(struct request_queue *q) in queue_max_bytes() argument
1267 return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; in queue_max_bytes()
1270 static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) in queue_max_hw_sectors() argument
1272 return q->limits.max_hw_sectors; in queue_max_hw_sectors()
1275 static inline unsigned short queue_max_segments(const struct request_queue *q) in queue_max_segments() argument
1277 return q->limits.max_segments; in queue_max_segments()
1280 static inline unsigned short queue_max_discard_segments(const struct request_queue *q) in queue_max_discard_segments() argument
1282 return q->limits.max_discard_segments; in queue_max_discard_segments()
1285 static inline unsigned int queue_max_segment_size(const struct request_queue *q) in queue_max_segment_size() argument
1287 return q->limits.max_segment_size; in queue_max_segment_size()
1298 static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q) in queue_max_zone_append_sectors() argument
1300 if (!blk_queue_is_zoned(q)) in queue_max_zone_append_sectors()
1303 return queue_limits_max_zone_append_sectors(&q->limits); in queue_max_zone_append_sectors()
1306 static inline bool queue_emulates_zone_append(struct request_queue *q) in queue_emulates_zone_append() argument
1308 return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors; in queue_emulates_zone_append()
1327 static inline unsigned queue_logical_block_size(const struct request_queue *q) in queue_logical_block_size() argument
1329 return q->limits.logical_block_size; in queue_logical_block_size()
1337 static inline unsigned int queue_physical_block_size(const struct request_queue *q) in queue_physical_block_size() argument
1339 return q->limits.physical_block_size; in queue_physical_block_size()
1347 static inline unsigned int queue_io_min(const struct request_queue *q) in queue_io_min() argument
1349 return q->limits.io_min; in queue_io_min()
1357 static inline unsigned int queue_io_opt(const struct request_queue *q) in queue_io_opt() argument
1359 return q->limits.io_opt; in queue_io_opt()
1368 queue_zone_write_granularity(const struct request_queue *q) in queue_zone_write_granularity() argument
1370 return q->limits.zone_write_granularity; in queue_zone_write_granularity()
1415 struct request_queue *q = bdev_get_queue(bdev); in bdev_stable_writes() local
1418 q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE) in bdev_stable_writes()
1420 return q->limits.features & BLK_FEAT_STABLE_WRITES; in bdev_stable_writes()
1423 static inline bool blk_queue_write_cache(struct request_queue *q) in blk_queue_write_cache() argument
1425 return (q->limits.features & BLK_FEAT_WRITE_CACHE) && in blk_queue_write_cache()
1426 !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED); in blk_queue_write_cache()
1456 struct request_queue *q = bdev_get_queue(bdev); in bdev_zone_sectors() local
1458 if (!blk_queue_is_zoned(q)) in bdev_zone_sectors()
1460 return q->limits.chunk_sectors; in bdev_zone_sectors()
1521 static inline int queue_dma_alignment(const struct request_queue *q) in queue_dma_alignment() argument
1523 return q->limits.dma_alignment; in queue_dma_alignment()
1527 queue_atomic_write_unit_max_bytes(const struct request_queue *q) in queue_atomic_write_unit_max_bytes() argument
1529 return q->limits.atomic_write_unit_max; in queue_atomic_write_unit_max_bytes()
1533 queue_atomic_write_unit_min_bytes(const struct request_queue *q) in queue_atomic_write_unit_min_bytes() argument
1535 return q->limits.atomic_write_unit_min; in queue_atomic_write_unit_min_bytes()
1539 queue_atomic_write_boundary_bytes(const struct request_queue *q) in queue_atomic_write_boundary_bytes() argument
1541 return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT; in queue_atomic_write_boundary_bytes()
1545 queue_atomic_write_max_bytes(const struct request_queue *q) in queue_atomic_write_max_bytes() argument
1547 return q->limits.atomic_write_max_sectors << SECTOR_SHIFT; in queue_atomic_write_max_bytes()
1567 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, in blk_rq_aligned() argument
1570 unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); in blk_rq_aligned()
1592 struct request_queue *q);
1597 struct request_queue *q) in blk_crypto_register() argument