Lines Matching refs:q
48 static void blk_mq_poll_stats_start(struct request_queue *q);
117 unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part) in blk_mq_in_flight() argument
121 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); in blk_mq_in_flight()
126 void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, in blk_mq_in_flight_rw() argument
131 blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); in blk_mq_in_flight_rw()
136 void blk_freeze_queue_start(struct request_queue *q) in blk_freeze_queue_start() argument
138 mutex_lock(&q->mq_freeze_lock); in blk_freeze_queue_start()
139 if (++q->mq_freeze_depth == 1) { in blk_freeze_queue_start()
140 percpu_ref_kill(&q->q_usage_counter); in blk_freeze_queue_start()
141 mutex_unlock(&q->mq_freeze_lock); in blk_freeze_queue_start()
142 if (queue_is_mq(q)) in blk_freeze_queue_start()
143 blk_mq_run_hw_queues(q, false); in blk_freeze_queue_start()
145 mutex_unlock(&q->mq_freeze_lock); in blk_freeze_queue_start()
150 void blk_mq_freeze_queue_wait(struct request_queue *q) in blk_mq_freeze_queue_wait() argument
152 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); in blk_mq_freeze_queue_wait()
156 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, in blk_mq_freeze_queue_wait_timeout() argument
159 return wait_event_timeout(q->mq_freeze_wq, in blk_mq_freeze_queue_wait_timeout()
160 percpu_ref_is_zero(&q->q_usage_counter), in blk_mq_freeze_queue_wait_timeout()
169 void blk_freeze_queue(struct request_queue *q) in blk_freeze_queue() argument
178 blk_freeze_queue_start(q); in blk_freeze_queue()
179 blk_mq_freeze_queue_wait(q); in blk_freeze_queue()
182 void blk_mq_freeze_queue(struct request_queue *q) in blk_mq_freeze_queue() argument
188 blk_freeze_queue(q); in blk_mq_freeze_queue()
192 void blk_mq_unfreeze_queue(struct request_queue *q) in blk_mq_unfreeze_queue() argument
194 mutex_lock(&q->mq_freeze_lock); in blk_mq_unfreeze_queue()
195 q->mq_freeze_depth--; in blk_mq_unfreeze_queue()
196 WARN_ON_ONCE(q->mq_freeze_depth < 0); in blk_mq_unfreeze_queue()
197 if (!q->mq_freeze_depth) { in blk_mq_unfreeze_queue()
198 percpu_ref_resurrect(&q->q_usage_counter); in blk_mq_unfreeze_queue()
199 wake_up_all(&q->mq_freeze_wq); in blk_mq_unfreeze_queue()
201 mutex_unlock(&q->mq_freeze_lock); in blk_mq_unfreeze_queue()
209 void blk_mq_quiesce_queue_nowait(struct request_queue *q) in blk_mq_quiesce_queue_nowait() argument
211 blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); in blk_mq_quiesce_queue_nowait()
224 void blk_mq_quiesce_queue(struct request_queue *q) in blk_mq_quiesce_queue() argument
230 blk_mq_quiesce_queue_nowait(q); in blk_mq_quiesce_queue()
232 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_quiesce_queue()
250 void blk_mq_unquiesce_queue(struct request_queue *q) in blk_mq_unquiesce_queue() argument
252 blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); in blk_mq_unquiesce_queue()
255 blk_mq_run_hw_queues(q, true); in blk_mq_unquiesce_queue()
259 void blk_mq_wake_waiters(struct request_queue *q) in blk_mq_wake_waiters() argument
264 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters()
275 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; in blk_mq_need_time_stamp()
284 if (data->q->elevator) { in blk_mq_rq_ctx_init()
293 rq->q = data->q; in blk_mq_rq_ctx_init()
300 if (blk_queue_io_stat(data->q)) in blk_mq_rq_ctx_init()
333 struct elevator_queue *e = data->q->elevator; in blk_mq_rq_ctx_init()
352 struct request_queue *q = data->q; in __blk_mq_alloc_request() local
353 struct elevator_queue *e = q->elevator; in __blk_mq_alloc_request()
359 if (blk_queue_rq_alloc_time(q)) in __blk_mq_alloc_request()
378 data->ctx = blk_mq_get_ctx(q); in __blk_mq_alloc_request()
379 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); in __blk_mq_alloc_request()
406 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, in blk_mq_alloc_request() argument
410 .q = q, in blk_mq_alloc_request()
417 ret = blk_queue_enter(q, flags); in blk_mq_alloc_request()
429 blk_queue_exit(q); in blk_mq_alloc_request()
434 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, in blk_mq_alloc_request_hctx() argument
438 .q = q, in blk_mq_alloc_request_hctx()
448 if (blk_queue_rq_alloc_time(q)) in blk_mq_alloc_request_hctx()
461 if (hctx_idx >= q->nr_hw_queues) in blk_mq_alloc_request_hctx()
464 ret = blk_queue_enter(q, flags); in blk_mq_alloc_request_hctx()
473 data.hctx = q->queue_hw_ctx[hctx_idx]; in blk_mq_alloc_request_hctx()
479 data.ctx = __blk_mq_get_ctx(q, cpu); in blk_mq_alloc_request_hctx()
481 if (!q->elevator) in blk_mq_alloc_request_hctx()
491 blk_queue_exit(q); in blk_mq_alloc_request_hctx()
498 struct request_queue *q = rq->q; in __blk_mq_free_request() local
516 blk_queue_exit(q); in __blk_mq_free_request()
521 struct request_queue *q = rq->q; in blk_mq_free_request() local
522 struct elevator_queue *e = q->elevator; in blk_mq_free_request()
540 laptop_io_completion(q->backing_dev_info); in blk_mq_free_request()
542 rq_qos_done(q, rq); in blk_mq_free_request()
558 blk_mq_poll_stats_start(rq->q); in __blk_mq_end_request()
567 rq_qos_done(rq->q, rq); in __blk_mq_end_request()
601 rq->q->mq_ops->complete(rq); in blk_done_softirq()
653 if (rq->q->nr_hw_queues == 1) in __blk_mq_complete_request_remote()
656 rq->q->mq_ops->complete(rq); in __blk_mq_complete_request_remote()
664 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) in blk_mq_complete_need_ipi()
669 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && in blk_mq_complete_need_ipi()
694 if (rq->q->nr_hw_queues > 1) in blk_mq_complete_request_remote()
718 rq->q->mq_ops->complete(rq); in blk_mq_complete_request()
752 struct request_queue *q = rq->q; in blk_mq_start_request() local
754 trace_block_rq_issue(q, rq); in blk_mq_start_request()
756 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { in blk_mq_start_request()
760 rq_qos_issue(q, rq); in blk_mq_start_request()
770 q->integrity.profile->prepare_fn(rq); in blk_mq_start_request()
777 struct request_queue *q = rq->q; in __blk_mq_requeue_request() local
781 trace_block_rq_requeue(q, rq); in __blk_mq_requeue_request()
782 rq_qos_requeue(q, rq); in __blk_mq_requeue_request()
803 struct request_queue *q = in blk_mq_requeue_work() local
808 spin_lock_irq(&q->requeue_lock); in blk_mq_requeue_work()
809 list_splice_init(&q->requeue_list, &rq_list); in blk_mq_requeue_work()
810 spin_unlock_irq(&q->requeue_lock); in blk_mq_requeue_work()
835 blk_mq_run_hw_queues(q, false); in blk_mq_requeue_work()
841 struct request_queue *q = rq->q; in blk_mq_add_to_requeue_list() local
855 spin_lock_irqsave(&q->requeue_lock, flags); in blk_mq_add_to_requeue_list()
858 list_add(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
860 list_add_tail(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
862 spin_unlock_irqrestore(&q->requeue_lock, flags); in blk_mq_add_to_requeue_list()
865 blk_mq_kick_requeue_list(q); in blk_mq_add_to_requeue_list()
868 void blk_mq_kick_requeue_list(struct request_queue *q) in blk_mq_kick_requeue_list() argument
870 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); in blk_mq_kick_requeue_list()
874 void blk_mq_delay_kick_requeue_list(struct request_queue *q, in blk_mq_delay_kick_requeue_list() argument
877 kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, in blk_mq_delay_kick_requeue_list()
900 if (blk_mq_request_started(rq) && rq->q == hctx->queue) { in blk_mq_rq_inflight()
910 bool blk_mq_queue_inflight(struct request_queue *q) in blk_mq_queue_inflight() argument
914 blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); in blk_mq_queue_inflight()
922 if (req->q->mq_ops->timeout) { in blk_mq_rq_timed_out()
925 ret = req->q->mq_ops->timeout(req, reserved); in blk_mq_rq_timed_out()
981 struct request_queue *q = in blk_mq_timeout_work() local
1000 if (!percpu_ref_tryget(&q->q_usage_counter)) in blk_mq_timeout_work()
1003 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); in blk_mq_timeout_work()
1006 mod_timer(&q->timeout, next); in blk_mq_timeout_work()
1014 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_timeout_work()
1020 blk_queue_exit(q); in blk_mq_timeout_work()
1321 if (need_budget && !blk_mq_get_dispatch_budget(rq->q)) { in blk_mq_prep_dispatch_rq()
1340 blk_mq_put_dispatch_budget(rq->q); in blk_mq_prep_dispatch_rq()
1349 static void blk_mq_release_budgets(struct request_queue *q, in blk_mq_release_budgets() argument
1355 blk_mq_put_dispatch_budget(q); in blk_mq_release_budgets()
1365 struct request_queue *q = hctx->queue; in blk_mq_dispatch_rq_list() local
1410 ret = q->mq_ops->queue_rq(hctx, &bd); in blk_mq_dispatch_rq_list()
1445 ret == BLK_STS_DEV_RESOURCE) && q->mq_ops->commit_rqs && queued) in blk_mq_dispatch_rq_list()
1446 q->mq_ops->commit_rqs(hctx); in blk_mq_dispatch_rq_list()
1457 blk_mq_release_budgets(q, nr_budgets); in blk_mq_dispatch_rq_list()
1704 static bool blk_mq_has_sqsched(struct request_queue *q) in blk_mq_has_sqsched() argument
1706 struct elevator_queue *e = q->elevator; in blk_mq_has_sqsched()
1718 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) in blk_mq_get_sq_hctx() argument
1720 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); in blk_mq_get_sq_hctx()
1728 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx); in blk_mq_get_sq_hctx()
1740 void blk_mq_run_hw_queues(struct request_queue *q, bool async) in blk_mq_run_hw_queues() argument
1746 if (blk_mq_has_sqsched(q)) in blk_mq_run_hw_queues()
1747 sq_hctx = blk_mq_get_sq_hctx(q); in blk_mq_run_hw_queues()
1748 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_run_hw_queues()
1768 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) in blk_mq_delay_run_hw_queues() argument
1774 if (blk_mq_has_sqsched(q)) in blk_mq_delay_run_hw_queues()
1775 sq_hctx = blk_mq_get_sq_hctx(q); in blk_mq_delay_run_hw_queues()
1776 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_delay_run_hw_queues()
1798 bool blk_mq_queue_stopped(struct request_queue *q) in blk_mq_queue_stopped() argument
1803 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_queue_stopped()
1837 void blk_mq_stop_hw_queues(struct request_queue *q) in blk_mq_stop_hw_queues() argument
1842 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_stop_hw_queues()
1855 void blk_mq_start_hw_queues(struct request_queue *q) in blk_mq_start_hw_queues() argument
1860 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_hw_queues()
1875 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) in blk_mq_start_stopped_hw_queues() argument
1880 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_stopped_hw_queues()
2016 BUG_ON(!rq->q); in blk_mq_flush_plug_list()
2023 trace_block_unplug(head_rq->q, depth, !from_schedule); in blk_mq_flush_plug_list()
2052 struct request_queue *q = rq->q; in __blk_mq_issue_directly() local
2067 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_issue_directly()
2092 struct request_queue *q = rq->q; in __blk_mq_try_issue_directly() local
2102 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { in __blk_mq_try_issue_directly()
2108 if (q->elevator && !bypass_insert) in __blk_mq_try_issue_directly()
2111 if (!blk_mq_get_dispatch_budget(q)) in __blk_mq_try_issue_directly()
2115 blk_mq_put_dispatch_budget(q); in __blk_mq_try_issue_directly()
2218 if (tmp->q != rq->q) in blk_add_rq_to_plug()
2252 struct request_queue *q = bio->bi_disk->queue; in blk_mq_submit_bio() local
2256 .q = q, in blk_mq_submit_bio()
2265 blk_queue_bounce(q, &bio); in blk_mq_submit_bio()
2271 if (!is_flush_fua && !blk_queue_nomerges(q) && in blk_mq_submit_bio()
2272 blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) in blk_mq_submit_bio()
2275 if (blk_mq_sched_bio_merge(q, bio, nr_segs)) in blk_mq_submit_bio()
2278 rq_qos_throttle(q, bio); in blk_mq_submit_bio()
2283 rq_qos_cleanup(q, bio); in blk_mq_submit_bio()
2289 trace_block_getrq(q, bio, bio->bi_opf); in blk_mq_submit_bio()
2291 rq_qos_track(q, rq, bio); in blk_mq_submit_bio()
2305 plug = blk_mq_plug(q, bio); in blk_mq_submit_bio()
2310 } else if (plug && (q->nr_hw_queues == 1 || in blk_mq_submit_bio()
2312 q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) { in blk_mq_submit_bio()
2324 trace_block_plug(q); in blk_mq_submit_bio()
2331 trace_block_plug(q); in blk_mq_submit_bio()
2335 } else if (q->elevator) { in blk_mq_submit_bio()
2338 } else if (plug && !blk_queue_nomerges(q)) { in blk_mq_submit_bio()
2353 trace_block_plug(q); in blk_mq_submit_bio()
2357 trace_block_unplug(q, 1, true); in blk_mq_submit_bio()
2361 } else if ((q->nr_hw_queues > 1 && is_sync) || in blk_mq_submit_bio()
2375 blk_queue_exit(q); in blk_mq_submit_bio()
2749 static void blk_mq_exit_hctx(struct request_queue *q, in blk_mq_exit_hctx() argument
2768 spin_lock(&q->unused_hctx_lock); in blk_mq_exit_hctx()
2769 list_add(&hctx->hctx_list, &q->unused_hctx_list); in blk_mq_exit_hctx()
2770 spin_unlock(&q->unused_hctx_lock); in blk_mq_exit_hctx()
2773 static void blk_mq_exit_hw_queues(struct request_queue *q, in blk_mq_exit_hw_queues() argument
2779 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_hw_queues()
2783 blk_mq_exit_hctx(q, set, hctx, i); in blk_mq_exit_hw_queues()
2801 static int blk_mq_init_hctx(struct request_queue *q, in blk_mq_init_hctx() argument
2832 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, in blk_mq_alloc_hctx() argument
2853 hctx->queue = q; in blk_mq_alloc_hctx()
2898 static void blk_mq_init_cpu_queues(struct request_queue *q, in blk_mq_init_cpu_queues() argument
2901 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_init_cpu_queues()
2905 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); in blk_mq_init_cpu_queues()
2914 __ctx->queue = q; in blk_mq_init_cpu_queues()
2921 hctx = blk_mq_map_queue_type(q, j, i); in blk_mq_init_cpu_queues()
2961 static void blk_mq_map_swqueue(struct request_queue *q) in blk_mq_map_swqueue() argument
2966 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_map_swqueue()
2968 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
2981 ctx = per_cpu_ptr(q->queue_ctx, i); in blk_mq_map_swqueue()
2984 ctx->hctxs[j] = blk_mq_map_queue_type(q, in blk_mq_map_swqueue()
3001 hctx = blk_mq_map_queue_type(q, j, i); in blk_mq_map_swqueue()
3024 ctx->hctxs[j] = blk_mq_map_queue_type(q, in blk_mq_map_swqueue()
3028 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
3067 static void queue_set_hctx_shared(struct request_queue *q, bool shared) in queue_set_hctx_shared() argument
3072 queue_for_each_hw_ctx(q, hctx, i) { in queue_set_hctx_shared()
3083 struct request_queue *q; in blk_mq_update_tag_set_shared() local
3087 list_for_each_entry(q, &set->tag_list, tag_set_list) { in blk_mq_update_tag_set_shared()
3088 blk_mq_freeze_queue(q); in blk_mq_update_tag_set_shared()
3089 queue_set_hctx_shared(q, shared); in blk_mq_update_tag_set_shared()
3090 blk_mq_unfreeze_queue(q); in blk_mq_update_tag_set_shared()
3094 static void blk_mq_del_queue_tag_set(struct request_queue *q) in blk_mq_del_queue_tag_set() argument
3096 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_del_queue_tag_set()
3099 list_del(&q->tag_set_list); in blk_mq_del_queue_tag_set()
3107 INIT_LIST_HEAD(&q->tag_set_list); in blk_mq_del_queue_tag_set()
3111 struct request_queue *q) in blk_mq_add_queue_tag_set() argument
3125 queue_set_hctx_shared(q, true); in blk_mq_add_queue_tag_set()
3126 list_add_tail(&q->tag_set_list, &set->tag_list); in blk_mq_add_queue_tag_set()
3132 static int blk_mq_alloc_ctxs(struct request_queue *q) in blk_mq_alloc_ctxs() argument
3150 q->mq_kobj = &ctxs->kobj; in blk_mq_alloc_ctxs()
3151 q->queue_ctx = ctxs->queue_ctx; in blk_mq_alloc_ctxs()
3165 void blk_mq_release(struct request_queue *q) in blk_mq_release() argument
3170 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_release()
3174 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { in blk_mq_release()
3179 kfree(q->queue_hw_ctx); in blk_mq_release()
3185 blk_mq_sysfs_deinit(q); in blk_mq_release()
3191 struct request_queue *uninit_q, *q; in blk_mq_init_queue_data() local
3202 q = blk_mq_init_allocated_queue(set, uninit_q, false); in blk_mq_init_queue_data()
3203 if (IS_ERR(q)) in blk_mq_init_queue_data()
3206 return q; in blk_mq_init_queue_data()
3225 struct request_queue *q; in blk_mq_init_sq_queue() local
3240 q = blk_mq_init_queue(set); in blk_mq_init_sq_queue()
3241 if (IS_ERR(q)) { in blk_mq_init_sq_queue()
3243 return q; in blk_mq_init_sq_queue()
3246 return q; in blk_mq_init_sq_queue()
3251 struct blk_mq_tag_set *set, struct request_queue *q, in blk_mq_alloc_and_init_hctx() argument
3257 spin_lock(&q->unused_hctx_lock); in blk_mq_alloc_and_init_hctx()
3258 list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { in blk_mq_alloc_and_init_hctx()
3266 spin_unlock(&q->unused_hctx_lock); in blk_mq_alloc_and_init_hctx()
3269 hctx = blk_mq_alloc_hctx(q, set, node); in blk_mq_alloc_and_init_hctx()
3273 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) in blk_mq_alloc_and_init_hctx()
3285 struct request_queue *q) in blk_mq_realloc_hw_ctxs() argument
3288 struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; in blk_mq_realloc_hw_ctxs()
3290 if (q->nr_hw_queues < set->nr_hw_queues) { in blk_mq_realloc_hw_ctxs()
3299 memcpy(new_hctxs, hctxs, q->nr_hw_queues * in blk_mq_realloc_hw_ctxs()
3301 q->queue_hw_ctx = new_hctxs; in blk_mq_realloc_hw_ctxs()
3307 mutex_lock(&q->sysfs_lock); in blk_mq_realloc_hw_ctxs()
3321 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node); in blk_mq_realloc_hw_ctxs()
3324 blk_mq_exit_hctx(q, set, hctxs[i], i); in blk_mq_realloc_hw_ctxs()
3340 j = q->nr_hw_queues; in blk_mq_realloc_hw_ctxs()
3344 end = q->nr_hw_queues; in blk_mq_realloc_hw_ctxs()
3345 q->nr_hw_queues = set->nr_hw_queues; in blk_mq_realloc_hw_ctxs()
3354 blk_mq_exit_hctx(q, set, hctx, j); in blk_mq_realloc_hw_ctxs()
3358 mutex_unlock(&q->sysfs_lock); in blk_mq_realloc_hw_ctxs()
3362 struct request_queue *q, in blk_mq_init_allocated_queue() argument
3366 q->mq_ops = set->ops; in blk_mq_init_allocated_queue()
3368 q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, in blk_mq_init_allocated_queue()
3370 BLK_MQ_POLL_STATS_BKTS, q); in blk_mq_init_allocated_queue()
3371 if (!q->poll_cb) in blk_mq_init_allocated_queue()
3374 if (blk_mq_alloc_ctxs(q)) in blk_mq_init_allocated_queue()
3378 blk_mq_sysfs_init(q); in blk_mq_init_allocated_queue()
3380 INIT_LIST_HEAD(&q->unused_hctx_list); in blk_mq_init_allocated_queue()
3381 spin_lock_init(&q->unused_hctx_lock); in blk_mq_init_allocated_queue()
3383 blk_mq_realloc_hw_ctxs(set, q); in blk_mq_init_allocated_queue()
3384 if (!q->nr_hw_queues) in blk_mq_init_allocated_queue()
3387 INIT_WORK(&q->timeout_work, blk_mq_timeout_work); in blk_mq_init_allocated_queue()
3388 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); in blk_mq_init_allocated_queue()
3390 q->tag_set = set; in blk_mq_init_allocated_queue()
3392 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; in blk_mq_init_allocated_queue()
3395 blk_queue_flag_set(QUEUE_FLAG_POLL, q); in blk_mq_init_allocated_queue()
3397 q->sg_reserved_size = INT_MAX; in blk_mq_init_allocated_queue()
3399 INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); in blk_mq_init_allocated_queue()
3400 INIT_LIST_HEAD(&q->requeue_list); in blk_mq_init_allocated_queue()
3401 spin_lock_init(&q->requeue_lock); in blk_mq_init_allocated_queue()
3403 q->nr_requests = set->queue_depth; in blk_mq_init_allocated_queue()
3408 q->poll_nsec = BLK_MQ_POLL_CLASSIC; in blk_mq_init_allocated_queue()
3410 blk_mq_init_cpu_queues(q, set->nr_hw_queues); in blk_mq_init_allocated_queue()
3411 blk_mq_add_queue_tag_set(set, q); in blk_mq_init_allocated_queue()
3412 blk_mq_map_swqueue(q); in blk_mq_init_allocated_queue()
3414 trace_android_rvh_blk_mq_init_allocated_queue(q); in blk_mq_init_allocated_queue()
3417 elevator_init_mq(q); in blk_mq_init_allocated_queue()
3419 return q; in blk_mq_init_allocated_queue()
3422 kfree(q->queue_hw_ctx); in blk_mq_init_allocated_queue()
3423 q->nr_hw_queues = 0; in blk_mq_init_allocated_queue()
3424 blk_mq_sysfs_deinit(q); in blk_mq_init_allocated_queue()
3426 blk_stat_free_callback(q->poll_cb); in blk_mq_init_allocated_queue()
3427 q->poll_cb = NULL; in blk_mq_init_allocated_queue()
3429 q->mq_ops = NULL; in blk_mq_init_allocated_queue()
3435 void blk_mq_exit_queue(struct request_queue *q) in blk_mq_exit_queue() argument
3437 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_exit_queue()
3439 trace_android_vh_blk_mq_exit_queue(q); in blk_mq_exit_queue()
3441 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); in blk_mq_exit_queue()
3443 blk_mq_del_queue_tag_set(q); in blk_mq_exit_queue()
3684 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) in blk_mq_update_nr_requests() argument
3686 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_update_nr_requests()
3693 if (q->nr_requests == nr) in blk_mq_update_nr_requests()
3696 blk_mq_freeze_queue(q); in blk_mq_update_nr_requests()
3697 blk_mq_quiesce_queue(q); in blk_mq_update_nr_requests()
3700 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_nr_requests()
3718 if (q->elevator && q->elevator->type->ops.depth_updated) in blk_mq_update_nr_requests()
3719 q->elevator->type->ops.depth_updated(hctx); in blk_mq_update_nr_requests()
3723 q->nr_requests = nr; in blk_mq_update_nr_requests()
3725 blk_mq_unquiesce_queue(q); in blk_mq_update_nr_requests()
3726 blk_mq_unfreeze_queue(q); in blk_mq_update_nr_requests()
3738 struct request_queue *q; member
3747 struct request_queue *q) in blk_mq_elv_switch_none() argument
3751 if (!q->elevator) in blk_mq_elv_switch_none()
3759 qe->q = q; in blk_mq_elv_switch_none()
3760 qe->type = q->elevator->type; in blk_mq_elv_switch_none()
3763 mutex_lock(&q->sysfs_lock); in blk_mq_elv_switch_none()
3772 elevator_switch_mq(q, NULL); in blk_mq_elv_switch_none()
3773 mutex_unlock(&q->sysfs_lock); in blk_mq_elv_switch_none()
3779 struct request_queue *q) in blk_mq_elv_switch_back() argument
3785 if (qe->q == q) { in blk_mq_elv_switch_back()
3796 mutex_lock(&q->sysfs_lock); in blk_mq_elv_switch_back()
3797 elevator_switch_mq(q, t); in blk_mq_elv_switch_back()
3798 mutex_unlock(&q->sysfs_lock); in blk_mq_elv_switch_back()
3804 struct request_queue *q; in __blk_mq_update_nr_hw_queues() local
3817 list_for_each_entry(q, &set->tag_list, tag_set_list) in __blk_mq_update_nr_hw_queues()
3818 blk_mq_freeze_queue(q); in __blk_mq_update_nr_hw_queues()
3824 list_for_each_entry(q, &set->tag_list, tag_set_list) in __blk_mq_update_nr_hw_queues()
3825 if (!blk_mq_elv_switch_none(&head, q)) in __blk_mq_update_nr_hw_queues()
3828 list_for_each_entry(q, &set->tag_list, tag_set_list) { in __blk_mq_update_nr_hw_queues()
3829 blk_mq_debugfs_unregister_hctxs(q); in __blk_mq_update_nr_hw_queues()
3830 blk_mq_sysfs_unregister(q); in __blk_mq_update_nr_hw_queues()
3841 list_for_each_entry(q, &set->tag_list, tag_set_list) { in __blk_mq_update_nr_hw_queues()
3842 blk_mq_realloc_hw_ctxs(set, q); in __blk_mq_update_nr_hw_queues()
3843 if (q->nr_hw_queues != set->nr_hw_queues) { in __blk_mq_update_nr_hw_queues()
3850 blk_mq_map_swqueue(q); in __blk_mq_update_nr_hw_queues()
3854 list_for_each_entry(q, &set->tag_list, tag_set_list) { in __blk_mq_update_nr_hw_queues()
3855 blk_mq_sysfs_register(q); in __blk_mq_update_nr_hw_queues()
3856 blk_mq_debugfs_register_hctxs(q); in __blk_mq_update_nr_hw_queues()
3860 list_for_each_entry(q, &set->tag_list, tag_set_list) in __blk_mq_update_nr_hw_queues()
3861 blk_mq_elv_switch_back(&head, q); in __blk_mq_update_nr_hw_queues()
3863 list_for_each_entry(q, &set->tag_list, tag_set_list) in __blk_mq_update_nr_hw_queues()
3864 blk_mq_unfreeze_queue(q); in __blk_mq_update_nr_hw_queues()
3876 static bool blk_poll_stats_enable(struct request_queue *q) in blk_poll_stats_enable() argument
3878 if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || in blk_poll_stats_enable()
3879 blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q)) in blk_poll_stats_enable()
3881 blk_stat_add_callback(q, q->poll_cb); in blk_poll_stats_enable()
3885 static void blk_mq_poll_stats_start(struct request_queue *q) in blk_mq_poll_stats_start() argument
3891 if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || in blk_mq_poll_stats_start()
3892 blk_stat_is_active(q->poll_cb)) in blk_mq_poll_stats_start()
3895 blk_stat_activate_msecs(q->poll_cb, 100); in blk_mq_poll_stats_start()
3900 struct request_queue *q = cb->data; in blk_mq_poll_stats_fn() local
3905 q->poll_stat[bucket] = cb->stat[bucket]; in blk_mq_poll_stats_fn()
3909 static unsigned long blk_mq_poll_nsecs(struct request_queue *q, in blk_mq_poll_nsecs() argument
3919 if (!blk_poll_stats_enable(q)) in blk_mq_poll_nsecs()
3935 if (q->poll_stat[bucket].nr_samples) in blk_mq_poll_nsecs()
3936 ret = (q->poll_stat[bucket].mean + 1) / 2; in blk_mq_poll_nsecs()
3941 static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, in blk_mq_poll_hybrid_sleep() argument
3958 if (q->poll_nsec > 0) in blk_mq_poll_hybrid_sleep()
3959 nsecs = q->poll_nsec; in blk_mq_poll_hybrid_sleep()
3961 nsecs = blk_mq_poll_nsecs(q, rq); in blk_mq_poll_hybrid_sleep()
3994 static bool blk_mq_poll_hybrid(struct request_queue *q, in blk_mq_poll_hybrid() argument
3999 if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) in blk_mq_poll_hybrid()
4016 return blk_mq_poll_hybrid_sleep(q, rq); in blk_mq_poll_hybrid()
4031 int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) in blk_poll() argument
4037 !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) in blk_poll()
4043 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; in blk_poll()
4052 if (blk_mq_poll_hybrid(q, hctx, cookie)) in blk_poll()
4063 ret = q->mq_ops->poll(hctx); in blk_poll()