Lines Matching refs:q
33 void blk_free_flush_queue(struct blk_flush_queue *q);
42 void blk_disable_sub_page_limits(struct queue_limits *q);
44 void blk_freeze_queue(struct request_queue *q);
45 void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
46 void blk_queue_start_drain(struct request_queue *q);
47 int __bio_queue_enter(struct request_queue *q, struct bio *bio);
50 static inline bool blk_try_enter_queue(struct request_queue *q, bool pm) in blk_try_enter_queue() argument
53 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter)) in blk_try_enter_queue()
60 if (blk_queue_pm_only(q) && in blk_try_enter_queue()
61 (!pm || queue_rpm_status(q) == RPM_SUSPENDED)) in blk_try_enter_queue()
68 blk_queue_exit(q); in blk_try_enter_queue()
76 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in bio_queue_enter() local
78 if (blk_try_enter_queue(q, false)) in bio_queue_enter()
80 return __bio_queue_enter(q, bio); in bio_queue_enter()
106 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
110 static inline bool biovec_phys_mergeable(struct request_queue *q, in biovec_phys_mergeable() argument
113 unsigned long mask = queue_segment_boundary(q); in biovec_phys_mergeable()
185 queue_max_discard_segments(req->q) > 1) in blk_discard_mergable()
193 return queue_max_discard_segments(rq->q); in blk_rq_get_max_segments()
194 return queue_max_segments(rq->q); in blk_rq_get_max_segments()
197 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, in blk_queue_get_max_sectors() argument
201 return min(q->limits.max_discard_sectors, in blk_queue_get_max_sectors()
205 return q->limits.max_write_zeroes_sectors; in blk_queue_get_max_sectors()
207 return q->limits.max_sectors; in blk_queue_get_max_sectors()
232 return bvec_gap_to_prev(&req->q->limits, in integrity_req_gap_back_merge()
243 return bvec_gap_to_prev(&req->q->limits, in integrity_req_gap_front_merge()
286 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
288 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
304 int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
305 void elevator_disable(struct request_queue *q);
306 void elevator_exit(struct request_queue *q);
307 int elv_register_queue(struct request_queue *q, bool uevent);
308 void elv_unregister_queue(struct request_queue *q);
352 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
376 static inline void req_set_nomerge(struct request_queue *q, struct request *req) in req_set_nomerge() argument
379 if (req == q->last_merge) in req_set_nomerge()
380 q->last_merge = NULL; in req_set_nomerge()
386 struct io_cq *ioc_find_get_icq(struct request_queue *q);
387 struct io_cq *ioc_lookup_icq(struct request_queue *q);
389 void ioc_clear_queue(struct request_queue *q);
391 static inline void ioc_clear_queue(struct request_queue *q) in ioc_clear_queue() argument
397 extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
398 extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
407 struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
409 static inline bool blk_queue_may_bounce(struct request_queue *q) in blk_queue_may_bounce() argument
412 q->limits.bounce == BLK_BOUNCE_HIGH && in blk_queue_may_bounce()
417 struct request_queue *q) in blk_queue_bounce() argument
419 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio))) in blk_queue_bounce()
420 return __blk_queue_bounce(bio, q); in blk_queue_bounce()
463 struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
466 int bio_add_hw_page(struct request_queue *q, struct bio *bio,