• Home
  • Raw
  • Download

Lines Matching refs:q

81 void blk_mq_freeze_queue_start(struct request_queue *q)  in blk_mq_freeze_queue_start()  argument
85 freeze_depth = atomic_inc_return(&q->mq_freeze_depth); in blk_mq_freeze_queue_start()
87 percpu_ref_kill(&q->q_usage_counter); in blk_mq_freeze_queue_start()
88 blk_mq_run_hw_queues(q, false); in blk_mq_freeze_queue_start()
93 static void blk_mq_freeze_queue_wait(struct request_queue *q) in blk_mq_freeze_queue_wait() argument
95 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); in blk_mq_freeze_queue_wait()
102 void blk_freeze_queue(struct request_queue *q) in blk_freeze_queue() argument
111 blk_mq_freeze_queue_start(q); in blk_freeze_queue()
112 blk_mq_freeze_queue_wait(q); in blk_freeze_queue()
115 void blk_mq_freeze_queue(struct request_queue *q) in blk_mq_freeze_queue() argument
121 blk_freeze_queue(q); in blk_mq_freeze_queue()
125 void blk_mq_unfreeze_queue(struct request_queue *q) in blk_mq_unfreeze_queue() argument
129 freeze_depth = atomic_dec_return(&q->mq_freeze_depth); in blk_mq_unfreeze_queue()
132 percpu_ref_reinit(&q->q_usage_counter); in blk_mq_unfreeze_queue()
133 wake_up_all(&q->mq_freeze_wq); in blk_mq_unfreeze_queue()
138 void blk_mq_wake_waiters(struct request_queue *q) in blk_mq_wake_waiters() argument
143 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters()
152 wake_up_all(&q->mq_freeze_wq); in blk_mq_wake_waiters()
161 static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, in blk_mq_rq_ctx_init() argument
164 if (blk_queue_io_stat(q)) in blk_mq_rq_ctx_init()
169 rq->q = q; in blk_mq_rq_ctx_init()
225 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); in __blk_mq_alloc_request()
232 struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, in blk_mq_alloc_request() argument
241 ret = blk_queue_enter(q, gfp); in blk_mq_alloc_request()
245 ctx = blk_mq_get_ctx(q); in blk_mq_alloc_request()
246 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
247 blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_DIRECT_RECLAIM, in blk_mq_alloc_request()
255 ctx = blk_mq_get_ctx(q); in blk_mq_alloc_request()
256 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_alloc_request()
257 blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx, in blk_mq_alloc_request()
264 blk_queue_exit(q); in blk_mq_alloc_request()
275 struct request_queue *q = rq->q; in __blk_mq_free_request() local
283 blk_queue_exit(q); in __blk_mq_free_request()
299 struct request_queue *q = rq->q; in blk_mq_free_request() local
301 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_free_request()
332 rq->q->softirq_done_fn(rq); in __blk_mq_complete_request_remote()
341 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { in blk_mq_ipi_complete_request()
342 rq->q->softirq_done_fn(rq); in blk_mq_ipi_complete_request()
347 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) in blk_mq_ipi_complete_request()
356 rq->q->softirq_done_fn(rq); in blk_mq_ipi_complete_request()
363 struct request_queue *q = rq->q; in __blk_mq_complete_request() local
365 if (!q->softirq_done_fn) in __blk_mq_complete_request()
381 struct request_queue *q = rq->q; in blk_mq_complete_request() local
383 if (unlikely(blk_should_fake_timeout(q))) in blk_mq_complete_request()
400 struct request_queue *q = rq->q; in blk_mq_start_request() local
402 trace_block_rq_issue(q, rq); in blk_mq_start_request()
427 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_mq_start_request()
440 struct request_queue *q = rq->q; in __blk_mq_requeue_request() local
442 trace_block_rq_requeue(q, rq); in __blk_mq_requeue_request()
445 if (q->dma_drain_size && blk_rq_bytes(rq)) in __blk_mq_requeue_request()
461 struct request_queue *q = in blk_mq_requeue_work() local
467 spin_lock_irqsave(&q->requeue_lock, flags); in blk_mq_requeue_work()
468 list_splice_init(&q->requeue_list, &rq_list); in blk_mq_requeue_work()
469 spin_unlock_irqrestore(&q->requeue_lock, flags); in blk_mq_requeue_work()
490 blk_mq_start_hw_queues(q); in blk_mq_requeue_work()
495 struct request_queue *q = rq->q; in blk_mq_add_to_requeue_list() local
504 spin_lock_irqsave(&q->requeue_lock, flags); in blk_mq_add_to_requeue_list()
507 list_add(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
509 list_add_tail(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
511 spin_unlock_irqrestore(&q->requeue_lock, flags); in blk_mq_add_to_requeue_list()
515 void blk_mq_cancel_requeue_work(struct request_queue *q) in blk_mq_cancel_requeue_work() argument
517 cancel_work_sync(&q->requeue_work); in blk_mq_cancel_requeue_work()
521 void blk_mq_kick_requeue_list(struct request_queue *q) in blk_mq_kick_requeue_list() argument
523 kblockd_schedule_work(&q->requeue_work); in blk_mq_kick_requeue_list()
527 void blk_mq_abort_requeue_list(struct request_queue *q) in blk_mq_abort_requeue_list() argument
532 spin_lock_irqsave(&q->requeue_lock, flags); in blk_mq_abort_requeue_list()
533 list_splice_init(&q->requeue_list, &rq_list); in blk_mq_abort_requeue_list()
534 spin_unlock_irqrestore(&q->requeue_lock, flags); in blk_mq_abort_requeue_list()
560 struct blk_mq_ops *ops = req->q->mq_ops; in blk_mq_rq_timed_out()
604 if (unlikely(blk_queue_dying(rq->q))) { in blk_mq_check_expired()
624 struct request_queue *q = (struct request_queue *)priv; in blk_mq_rq_timer() local
644 if (!percpu_ref_tryget(&q->q_usage_counter)) in blk_mq_rq_timer()
647 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data); in blk_mq_rq_timer()
651 mod_timer(&q->timeout, data.next); in blk_mq_rq_timer()
655 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_rq_timer()
661 blk_queue_exit(q); in blk_mq_rq_timer()
669 static bool blk_mq_attempt_merge(struct request_queue *q, in blk_mq_attempt_merge() argument
686 if (bio_attempt_back_merge(q, rq, bio)) { in blk_mq_attempt_merge()
692 if (bio_attempt_front_merge(q, rq, bio)) { in blk_mq_attempt_merge()
745 struct request_queue *q = hctx->queue; in __blk_mq_run_hw_queue() local
796 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_run_hw_queue()
896 void blk_mq_run_hw_queues(struct request_queue *q, bool async) in blk_mq_run_hw_queues() argument
901 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_run_hw_queues()
920 void blk_mq_stop_hw_queues(struct request_queue *q) in blk_mq_stop_hw_queues() argument
925 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_stop_hw_queues()
938 void blk_mq_start_hw_queues(struct request_queue *q) in blk_mq_start_hw_queues() argument
943 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_hw_queues()
948 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) in blk_mq_start_stopped_hw_queues() argument
953 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_start_stopped_hw_queues()
1017 struct request_queue *q = rq->q; in blk_mq_insert_request() local
1021 current_ctx = blk_mq_get_ctx(q); in blk_mq_insert_request()
1025 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_request()
1037 static void blk_mq_insert_requests(struct request_queue *q, in blk_mq_insert_requests() argument
1047 trace_block_unplug(q, depth, !from_schedule); in blk_mq_insert_requests()
1049 current_ctx = blk_mq_get_ctx(q); in blk_mq_insert_requests()
1053 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_insert_requests()
1105 BUG_ON(!rq->q); in blk_mq_flush_plug_list()
1114 this_q = rq->q; in blk_mq_flush_plug_list()
1158 struct request_queue *q = hctx->queue; in blk_mq_merge_queue_io() local
1161 if (!blk_mq_attempt_merge(q, ctx, bio)) { in blk_mq_merge_queue_io()
1177 static struct request *blk_mq_map_request(struct request_queue *q, in blk_mq_map_request() argument
1187 blk_queue_enter_live(q); in blk_mq_map_request()
1188 ctx = blk_mq_get_ctx(q); in blk_mq_map_request()
1189 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1194 trace_block_getrq(q, bio, rw); in blk_mq_map_request()
1195 blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx, in blk_mq_map_request()
1201 trace_block_sleeprq(q, bio, rw); in blk_mq_map_request()
1203 ctx = blk_mq_get_ctx(q); in blk_mq_map_request()
1204 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_map_request()
1205 blk_mq_set_alloc_data(&alloc_data, q, in blk_mq_map_request()
1221 struct request_queue *q = rq->q; in blk_mq_direct_issue_request() local
1222 struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, in blk_mq_direct_issue_request()
1236 ret = q->mq_ops->queue_rq(hctx, &bd); in blk_mq_direct_issue_request()
1259 static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) in blk_mq_make_request() argument
1270 blk_queue_bounce(q, &bio); in blk_mq_make_request()
1272 blk_queue_split(q, &bio, q->bio_split); in blk_mq_make_request()
1279 if (!is_flush_fua && !blk_queue_nomerges(q) && in blk_mq_make_request()
1280 blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) in blk_mq_make_request()
1283 rq = blk_mq_map_request(q, bio, &data); in blk_mq_make_request()
1301 if (((plug && !blk_queue_nomerges(q)) || is_sync) && in blk_mq_make_request()
1353 static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) in blk_sq_make_request() argument
1363 blk_queue_bounce(q, &bio); in blk_sq_make_request()
1370 blk_queue_split(q, &bio, q->bio_split); in blk_sq_make_request()
1372 if (!is_flush_fua && !blk_queue_nomerges(q)) { in blk_sq_make_request()
1373 if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) in blk_sq_make_request()
1376 request_count = blk_plug_queued_count(q); in blk_sq_make_request()
1378 rq = blk_mq_map_request(q, bio, &data); in blk_sq_make_request()
1399 trace_block_plug(q); in blk_sq_make_request()
1405 trace_block_plug(q); in blk_sq_make_request()
1430 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, const int cpu) in blk_mq_map_queue() argument
1432 return q->queue_hw_ctx[q->mq_map[cpu]]; in blk_mq_map_queue()
1591 struct request_queue *q = hctx->queue; in blk_mq_hctx_cpu_offline() local
1598 ctx = __blk_mq_get_ctx(q, cpu); in blk_mq_hctx_cpu_offline()
1610 ctx = blk_mq_get_ctx(q); in blk_mq_hctx_cpu_offline()
1621 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_mq_hctx_cpu_offline()
1648 static void blk_mq_exit_hctx(struct request_queue *q, in blk_mq_exit_hctx() argument
1670 static void blk_mq_exit_hw_queues(struct request_queue *q, in blk_mq_exit_hw_queues() argument
1676 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_hw_queues()
1679 blk_mq_exit_hctx(q, set, hctx, i); in blk_mq_exit_hw_queues()
1683 static void blk_mq_free_hw_queues(struct request_queue *q, in blk_mq_free_hw_queues() argument
1689 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_free_hw_queues()
1693 static int blk_mq_init_hctx(struct request_queue *q, in blk_mq_init_hctx() argument
1708 hctx->queue = q; in blk_mq_init_hctx()
1736 hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); in blk_mq_init_hctx()
1763 static int blk_mq_init_hw_queues(struct request_queue *q, in blk_mq_init_hw_queues() argument
1772 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_init_hw_queues()
1773 if (blk_mq_init_hctx(q, set, hctx, i)) in blk_mq_init_hw_queues()
1777 if (i == q->nr_hw_queues) in blk_mq_init_hw_queues()
1783 blk_mq_exit_hw_queues(q, set, i); in blk_mq_init_hw_queues()
1788 static void blk_mq_init_cpu_queues(struct request_queue *q, in blk_mq_init_cpu_queues() argument
1794 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); in blk_mq_init_cpu_queues()
1801 __ctx->queue = q; in blk_mq_init_cpu_queues()
1807 hctx = q->mq_ops->map_queue(q, i); in blk_mq_init_cpu_queues()
1818 static void blk_mq_map_swqueue(struct request_queue *q, in blk_mq_map_swqueue() argument
1824 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_map_swqueue()
1829 mutex_lock(&q->sysfs_lock); in blk_mq_map_swqueue()
1831 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
1839 queue_for_each_ctx(q, ctx, i) { in blk_mq_map_swqueue()
1844 hctx = q->mq_ops->map_queue(q, i); in blk_mq_map_swqueue()
1850 mutex_unlock(&q->sysfs_lock); in blk_mq_map_swqueue()
1852 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
1888 queue_for_each_ctx(q, ctx, i) { in blk_mq_map_swqueue()
1892 hctx = q->mq_ops->map_queue(q, i); in blk_mq_map_swqueue()
1897 static void queue_set_hctx_shared(struct request_queue *q, bool shared) in queue_set_hctx_shared() argument
1902 queue_for_each_hw_ctx(q, hctx, i) { in queue_set_hctx_shared()
1912 struct request_queue *q; in blk_mq_update_tag_set_depth() local
1914 list_for_each_entry(q, &set->tag_list, tag_set_list) { in blk_mq_update_tag_set_depth()
1915 blk_mq_freeze_queue(q); in blk_mq_update_tag_set_depth()
1916 queue_set_hctx_shared(q, shared); in blk_mq_update_tag_set_depth()
1917 blk_mq_unfreeze_queue(q); in blk_mq_update_tag_set_depth()
1921 static void blk_mq_del_queue_tag_set(struct request_queue *q) in blk_mq_del_queue_tag_set() argument
1923 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_del_queue_tag_set()
1926 list_del_init(&q->tag_set_list); in blk_mq_del_queue_tag_set()
1937 struct request_queue *q) in blk_mq_add_queue_tag_set() argument
1939 q->tag_set = set; in blk_mq_add_queue_tag_set()
1950 queue_set_hctx_shared(q, true); in blk_mq_add_queue_tag_set()
1951 list_add_tail(&q->tag_set_list, &set->tag_list); in blk_mq_add_queue_tag_set()
1962 void blk_mq_release(struct request_queue *q) in blk_mq_release() argument
1968 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_release()
1975 kfree(q->mq_map); in blk_mq_release()
1976 q->mq_map = NULL; in blk_mq_release()
1978 kfree(q->queue_hw_ctx); in blk_mq_release()
1981 free_percpu(q->queue_ctx); in blk_mq_release()
1986 struct request_queue *uninit_q, *q; in blk_mq_init_queue() local
1992 q = blk_mq_init_allocated_queue(set, uninit_q); in blk_mq_init_queue()
1993 if (IS_ERR(q)) in blk_mq_init_queue()
1996 return q; in blk_mq_init_queue()
2001 struct request_queue *q) in blk_mq_init_allocated_queue() argument
2039 setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); in blk_mq_init_allocated_queue()
2040 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); in blk_mq_init_allocated_queue()
2042 q->nr_queues = nr_cpu_ids; in blk_mq_init_allocated_queue()
2043 q->nr_hw_queues = set->nr_hw_queues; in blk_mq_init_allocated_queue()
2044 q->mq_map = map; in blk_mq_init_allocated_queue()
2046 q->queue_ctx = ctx; in blk_mq_init_allocated_queue()
2047 q->queue_hw_ctx = hctxs; in blk_mq_init_allocated_queue()
2049 q->mq_ops = set->ops; in blk_mq_init_allocated_queue()
2050 q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; in blk_mq_init_allocated_queue()
2053 q->queue_flags |= 1 << QUEUE_FLAG_NO_SG_MERGE; in blk_mq_init_allocated_queue()
2055 q->sg_reserved_size = INT_MAX; in blk_mq_init_allocated_queue()
2057 INIT_WORK(&q->requeue_work, blk_mq_requeue_work); in blk_mq_init_allocated_queue()
2058 INIT_LIST_HEAD(&q->requeue_list); in blk_mq_init_allocated_queue()
2059 spin_lock_init(&q->requeue_lock); in blk_mq_init_allocated_queue()
2061 if (q->nr_hw_queues > 1) in blk_mq_init_allocated_queue()
2062 blk_queue_make_request(q, blk_mq_make_request); in blk_mq_init_allocated_queue()
2064 blk_queue_make_request(q, blk_sq_make_request); in blk_mq_init_allocated_queue()
2069 q->nr_requests = set->queue_depth; in blk_mq_init_allocated_queue()
2072 blk_queue_softirq_done(q, set->ops->complete); in blk_mq_init_allocated_queue()
2074 blk_mq_init_cpu_queues(q, set->nr_hw_queues); in blk_mq_init_allocated_queue()
2076 if (blk_mq_init_hw_queues(q, set)) in blk_mq_init_allocated_queue()
2082 list_add_tail(&q->all_q_node, &all_q_list); in blk_mq_init_allocated_queue()
2083 blk_mq_add_queue_tag_set(set, q); in blk_mq_init_allocated_queue()
2084 blk_mq_map_swqueue(q, cpu_online_mask); in blk_mq_init_allocated_queue()
2089 return q; in blk_mq_init_allocated_queue()
2107 void blk_mq_free_queue(struct request_queue *q) in blk_mq_free_queue() argument
2109 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_free_queue()
2112 list_del_init(&q->all_q_node); in blk_mq_free_queue()
2115 blk_mq_del_queue_tag_set(q); in blk_mq_free_queue()
2117 blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); in blk_mq_free_queue()
2118 blk_mq_free_hw_queues(q, set); in blk_mq_free_queue()
2122 static void blk_mq_queue_reinit(struct request_queue *q, in blk_mq_queue_reinit() argument
2125 WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth)); in blk_mq_queue_reinit()
2127 blk_mq_sysfs_unregister(q); in blk_mq_queue_reinit()
2129 blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask); in blk_mq_queue_reinit()
2137 blk_mq_map_swqueue(q, online_mask); in blk_mq_queue_reinit()
2139 blk_mq_sysfs_register(q); in blk_mq_queue_reinit()
2145 struct request_queue *q; in blk_mq_queue_reinit_notify() local
2193 list_for_each_entry(q, &all_q_list, all_q_node) in blk_mq_queue_reinit_notify()
2194 blk_mq_freeze_queue_start(q); in blk_mq_queue_reinit_notify()
2195 list_for_each_entry(q, &all_q_list, all_q_node) { in blk_mq_queue_reinit_notify()
2196 blk_mq_freeze_queue_wait(q); in blk_mq_queue_reinit_notify()
2202 del_timer_sync(&q->timeout); in blk_mq_queue_reinit_notify()
2205 list_for_each_entry(q, &all_q_list, all_q_node) in blk_mq_queue_reinit_notify()
2206 blk_mq_queue_reinit(q, &online_new); in blk_mq_queue_reinit_notify()
2208 list_for_each_entry(q, &all_q_list, all_q_node) in blk_mq_queue_reinit_notify()
2209 blk_mq_unfreeze_queue(q); in blk_mq_queue_reinit_notify()
2345 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) in blk_mq_update_nr_requests() argument
2347 struct blk_mq_tag_set *set = q->tag_set; in blk_mq_update_nr_requests()
2355 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_nr_requests()
2362 q->nr_requests = nr; in blk_mq_update_nr_requests()