• Home
  • Raw
  • Download

Lines Matching refs:hctx

72 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)  in blk_mq_hctx_has_pending()  argument
74 return !list_empty_careful(&hctx->dispatch) || in blk_mq_hctx_has_pending()
75 sbitmap_any_bit_set(&hctx->ctx_map) || in blk_mq_hctx_has_pending()
76 blk_mq_sched_has_work(hctx); in blk_mq_hctx_has_pending()
82 static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_mark_pending() argument
85 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending()
87 if (!sbitmap_test_bit(&hctx->ctx_map, bit)) in blk_mq_hctx_mark_pending()
88 sbitmap_set_bit(&hctx->ctx_map, bit); in blk_mq_hctx_mark_pending()
91 static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx, in blk_mq_hctx_clear_pending() argument
94 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_clear_pending()
96 sbitmap_clear_bit(&hctx->ctx_map, bit); in blk_mq_hctx_clear_pending()
104 static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx, in blk_mq_check_inflight() argument
226 struct blk_mq_hw_ctx *hctx; in blk_mq_quiesce_queue() local
232 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_quiesce_queue()
233 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_quiesce_queue()
234 synchronize_srcu(hctx->srcu); in blk_mq_quiesce_queue()
261 struct blk_mq_hw_ctx *hctx; in blk_mq_wake_waiters() local
264 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_wake_waiters()
265 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_wake_waiters()
266 blk_mq_tag_wakeup_all(hctx->tags, true); in blk_mq_wake_waiters()
295 rq->mq_hctx = data->hctx; in blk_mq_rq_ctx_init()
345 data->hctx->queued++; in blk_mq_rq_ctx_init()
379 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); in __blk_mq_alloc_request()
381 blk_mq_tag_busy(data->hctx); in __blk_mq_alloc_request()
473 data.hctx = q->queue_hw_ctx[hctx_idx]; in blk_mq_alloc_request_hctx()
474 if (!blk_mq_hw_queue_mapped(data.hctx)) in blk_mq_alloc_request_hctx()
476 cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask); in blk_mq_alloc_request_hctx()
482 blk_mq_tag_busy(data.hctx); in blk_mq_alloc_request_hctx()
500 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in __blk_mq_free_request() local
508 trace_android_vh_internal_blk_mq_free_request(&skip, rq, hctx); in __blk_mq_free_request()
511 blk_mq_put_tag(hctx->tags, ctx, rq->tag); in __blk_mq_free_request()
514 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); in __blk_mq_free_request()
515 blk_mq_sched_restart(hctx); in __blk_mq_free_request()
524 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_free_request() local
537 __blk_mq_dec_active_requests(hctx); in blk_mq_free_request()
722 static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) in hctx_unlock() argument
723 __releases(hctx->srcu) in hctx_unlock()
725 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) in hctx_unlock()
728 srcu_read_unlock(hctx->srcu, srcu_idx); in hctx_unlock()
731 static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx) in hctx_lock() argument
732 __acquires(hctx->srcu) in hctx_lock()
734 if (!(hctx->flags & BLK_MQ_F_BLOCKING)) { in hctx_lock()
739 *srcu_idx = srcu_read_lock(hctx->srcu); in hctx_lock()
893 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, in blk_mq_rq_inflight() argument
900 if (blk_mq_request_started(rq) && rq->q == hctx->queue) { in blk_mq_rq_inflight()
962 static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, in blk_mq_check_expired() argument
984 struct blk_mq_hw_ctx *hctx; in blk_mq_timeout_work() local
1014 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_timeout_work()
1016 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_timeout_work()
1017 blk_mq_tag_idle(hctx); in blk_mq_timeout_work()
1024 struct blk_mq_hw_ctx *hctx; member
1031 struct blk_mq_hw_ctx *hctx = flush_data->hctx; in flush_busy_ctx() local
1032 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in flush_busy_ctx()
1033 enum hctx_type type = hctx->type; in flush_busy_ctx()
1046 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list) in blk_mq_flush_busy_ctxs() argument
1049 .hctx = hctx, in blk_mq_flush_busy_ctxs()
1053 sbitmap_for_each_set(&hctx->ctx_map, flush_busy_ctx, &data); in blk_mq_flush_busy_ctxs()
1058 struct blk_mq_hw_ctx *hctx; member
1066 struct blk_mq_hw_ctx *hctx = dispatch_data->hctx; in dispatch_rq_from_ctx() local
1067 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in dispatch_rq_from_ctx()
1068 enum hctx_type type = hctx->type; in dispatch_rq_from_ctx()
1082 struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx, in blk_mq_dequeue_from_ctx() argument
1085 unsigned off = start ? start->index_hw[hctx->type] : 0; in blk_mq_dequeue_from_ctx()
1087 .hctx = hctx, in blk_mq_dequeue_from_ctx()
1091 __sbitmap_for_each_set(&hctx->ctx_map, off, in blk_mq_dequeue_from_ctx()
1131 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_get_driver_tag() local
1136 if ((hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) && in blk_mq_get_driver_tag()
1139 __blk_mq_inc_active_requests(hctx); in blk_mq_get_driver_tag()
1141 hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag()
1148 struct blk_mq_hw_ctx *hctx; in blk_mq_dispatch_wake() local
1150 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait); in blk_mq_dispatch_wake()
1152 spin_lock(&hctx->dispatch_wait_lock); in blk_mq_dispatch_wake()
1157 sbq = hctx->tags->bitmap_tags; in blk_mq_dispatch_wake()
1160 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_dispatch_wake()
1162 blk_mq_run_hw_queue(hctx, true); in blk_mq_dispatch_wake()
1172 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx, in blk_mq_mark_tag_wait() argument
1175 struct sbitmap_queue *sbq = hctx->tags->bitmap_tags; in blk_mq_mark_tag_wait()
1180 if (!(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) { in blk_mq_mark_tag_wait()
1181 blk_mq_sched_mark_restart_hctx(hctx); in blk_mq_mark_tag_wait()
1194 wait = &hctx->dispatch_wait; in blk_mq_mark_tag_wait()
1198 wq = &bt_wait_ptr(sbq, hctx)->wait; in blk_mq_mark_tag_wait()
1201 spin_lock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1203 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1235 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1246 spin_unlock(&hctx->dispatch_wait_lock); in blk_mq_mark_tag_wait()
1261 static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy) in blk_mq_update_dispatch_busy() argument
1265 ewma = hctx->dispatch_busy; in blk_mq_update_dispatch_busy()
1275 hctx->dispatch_busy = ewma; in blk_mq_update_dispatch_busy()
1319 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_prep_dispatch_rq() local
1334 if (!blk_mq_mark_tag_wait(hctx, rq)) { in blk_mq_prep_dispatch_rq()
1361 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, in blk_mq_dispatch_rq_list() argument
1365 struct request_queue *q = hctx->queue; in blk_mq_dispatch_rq_list()
1384 WARN_ON_ONCE(hctx != rq->mq_hctx); in blk_mq_dispatch_rq_list()
1410 ret = q->mq_ops->queue_rq(hctx, &bd); in blk_mq_dispatch_rq_list()
1439 hctx->dispatched[queued_to_index(queued)]++; in blk_mq_dispatch_rq_list()
1446 q->mq_ops->commit_rqs(hctx); in blk_mq_dispatch_rq_list()
1455 (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED); in blk_mq_dispatch_rq_list()
1459 spin_lock(&hctx->lock); in blk_mq_dispatch_rq_list()
1460 list_splice_tail_init(list, &hctx->dispatch); in blk_mq_dispatch_rq_list()
1461 spin_unlock(&hctx->lock); in blk_mq_dispatch_rq_list()
1498 needs_restart = blk_mq_sched_needs_restart(hctx); in blk_mq_dispatch_rq_list()
1502 (no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) in blk_mq_dispatch_rq_list()
1503 blk_mq_run_hw_queue(hctx, true); in blk_mq_dispatch_rq_list()
1505 blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); in blk_mq_dispatch_rq_list()
1507 blk_mq_update_dispatch_busy(hctx, true); in blk_mq_dispatch_rq_list()
1510 blk_mq_update_dispatch_busy(hctx, false); in blk_mq_dispatch_rq_list()
1521 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) in __blk_mq_run_hw_queue() argument
1542 if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) && in __blk_mq_run_hw_queue()
1543 cpu_online(hctx->next_cpu)) { in __blk_mq_run_hw_queue()
1546 cpumask_empty(hctx->cpumask) ? "inactive": "active"); in __blk_mq_run_hw_queue()
1556 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in __blk_mq_run_hw_queue()
1558 hctx_lock(hctx, &srcu_idx); in __blk_mq_run_hw_queue()
1559 blk_mq_sched_dispatch_requests(hctx); in __blk_mq_run_hw_queue()
1560 hctx_unlock(hctx, srcu_idx); in __blk_mq_run_hw_queue()
1563 static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx) in blk_mq_first_mapped_cpu() argument
1565 int cpu = cpumask_first_and(hctx->cpumask, cpu_online_mask); in blk_mq_first_mapped_cpu()
1568 cpu = cpumask_first(hctx->cpumask); in blk_mq_first_mapped_cpu()
1578 static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_next_cpu() argument
1581 int next_cpu = hctx->next_cpu; in blk_mq_hctx_next_cpu()
1583 if (hctx->queue->nr_hw_queues == 1) in blk_mq_hctx_next_cpu()
1586 if (--hctx->next_cpu_batch <= 0) { in blk_mq_hctx_next_cpu()
1588 next_cpu = cpumask_next_and(next_cpu, hctx->cpumask, in blk_mq_hctx_next_cpu()
1591 next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_hctx_next_cpu()
1592 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_hctx_next_cpu()
1609 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
1610 hctx->next_cpu_batch = 1; in blk_mq_hctx_next_cpu()
1614 hctx->next_cpu = next_cpu; in blk_mq_hctx_next_cpu()
1627 static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async, in __blk_mq_delay_run_hw_queue() argument
1632 if (unlikely(blk_mq_hctx_stopped(hctx))) in __blk_mq_delay_run_hw_queue()
1635 trace_android_rvh_blk_mq_delay_run_hw_queue(&skip, hctx, async); in __blk_mq_delay_run_hw_queue()
1639 if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { in __blk_mq_delay_run_hw_queue()
1641 if (cpumask_test_cpu(cpu, hctx->cpumask)) { in __blk_mq_delay_run_hw_queue()
1642 __blk_mq_run_hw_queue(hctx); in __blk_mq_delay_run_hw_queue()
1650 kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work, in __blk_mq_delay_run_hw_queue()
1661 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs) in blk_mq_delay_run_hw_queue() argument
1663 __blk_mq_delay_run_hw_queue(hctx, true, msecs); in blk_mq_delay_run_hw_queue()
1676 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) in blk_mq_run_hw_queue() argument
1689 hctx_lock(hctx, &srcu_idx); in blk_mq_run_hw_queue()
1690 need_run = !blk_queue_quiesced(hctx->queue) && in blk_mq_run_hw_queue()
1691 blk_mq_hctx_has_pending(hctx); in blk_mq_run_hw_queue()
1692 hctx_unlock(hctx, srcu_idx); in blk_mq_run_hw_queue()
1694 trace_android_vh_blk_mq_run_hw_queue(&need_run, hctx); in blk_mq_run_hw_queue()
1696 __blk_mq_delay_run_hw_queue(hctx, async, 0); in blk_mq_run_hw_queue()
1728 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx); in blk_mq_get_sq_hctx() local
1730 if (!blk_mq_hctx_stopped(hctx)) in blk_mq_get_sq_hctx()
1731 return hctx; in blk_mq_get_sq_hctx()
1742 struct blk_mq_hw_ctx *hctx, *sq_hctx; in blk_mq_run_hw_queues() local
1748 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_run_hw_queues()
1749 if (blk_mq_hctx_stopped(hctx)) in blk_mq_run_hw_queues()
1756 if (!sq_hctx || sq_hctx == hctx || in blk_mq_run_hw_queues()
1757 !list_empty_careful(&hctx->dispatch)) in blk_mq_run_hw_queues()
1758 blk_mq_run_hw_queue(hctx, async); in blk_mq_run_hw_queues()
1770 struct blk_mq_hw_ctx *hctx, *sq_hctx; in blk_mq_delay_run_hw_queues() local
1776 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_delay_run_hw_queues()
1777 if (blk_mq_hctx_stopped(hctx)) in blk_mq_delay_run_hw_queues()
1784 if (!sq_hctx || sq_hctx == hctx || in blk_mq_delay_run_hw_queues()
1785 !list_empty_careful(&hctx->dispatch)) in blk_mq_delay_run_hw_queues()
1786 blk_mq_delay_run_hw_queue(hctx, msecs); in blk_mq_delay_run_hw_queues()
1800 struct blk_mq_hw_ctx *hctx; in blk_mq_queue_stopped() local
1803 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_queue_stopped()
1804 if (blk_mq_hctx_stopped(hctx)) in blk_mq_queue_stopped()
1820 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_stop_hw_queue() argument
1822 cancel_delayed_work(&hctx->run_work); in blk_mq_stop_hw_queue()
1824 set_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_stop_hw_queue()
1839 struct blk_mq_hw_ctx *hctx; in blk_mq_stop_hw_queues() local
1842 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_stop_hw_queues()
1843 blk_mq_stop_hw_queue(hctx); in blk_mq_stop_hw_queues()
1847 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx) in blk_mq_start_hw_queue() argument
1849 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_hw_queue()
1851 blk_mq_run_hw_queue(hctx, false); in blk_mq_start_hw_queue()
1857 struct blk_mq_hw_ctx *hctx; in blk_mq_start_hw_queues() local
1860 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_hw_queues()
1861 blk_mq_start_hw_queue(hctx); in blk_mq_start_hw_queues()
1865 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) in blk_mq_start_stopped_hw_queue() argument
1867 if (!blk_mq_hctx_stopped(hctx)) in blk_mq_start_stopped_hw_queue()
1870 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); in blk_mq_start_stopped_hw_queue()
1871 blk_mq_run_hw_queue(hctx, async); in blk_mq_start_stopped_hw_queue()
1877 struct blk_mq_hw_ctx *hctx; in blk_mq_start_stopped_hw_queues() local
1880 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_start_stopped_hw_queues()
1881 blk_mq_start_stopped_hw_queue(hctx, async); in blk_mq_start_stopped_hw_queues()
1887 struct blk_mq_hw_ctx *hctx; in blk_mq_run_work_fn() local
1889 hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work); in blk_mq_run_work_fn()
1894 if (blk_mq_hctx_stopped(hctx)) in blk_mq_run_work_fn()
1897 __blk_mq_run_hw_queue(hctx); in blk_mq_run_work_fn()
1900 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx, in __blk_mq_insert_req_list() argument
1905 enum hctx_type type = hctx->type; in __blk_mq_insert_req_list()
1909 trace_block_rq_insert(hctx->queue, rq); in __blk_mq_insert_req_list()
1917 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in __blk_mq_insert_request() argument
1925 trace_android_vh_blk_mq_insert_request(&skip, hctx, rq); in __blk_mq_insert_request()
1929 __blk_mq_insert_req_list(hctx, rq, at_head); in __blk_mq_insert_request()
1930 blk_mq_hctx_mark_pending(hctx, ctx); in __blk_mq_insert_request()
1945 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_request_bypass_insert() local
1947 spin_lock(&hctx->lock); in blk_mq_request_bypass_insert()
1949 list_add(&rq->queuelist, &hctx->dispatch); in blk_mq_request_bypass_insert()
1951 list_add_tail(&rq->queuelist, &hctx->dispatch); in blk_mq_request_bypass_insert()
1952 spin_unlock(&hctx->lock); in blk_mq_request_bypass_insert()
1955 blk_mq_run_hw_queue(hctx, false); in blk_mq_request_bypass_insert()
1958 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, in blk_mq_insert_requests() argument
1963 enum hctx_type type = hctx->type; in blk_mq_insert_requests()
1971 trace_block_rq_insert(hctx->queue, rq); in blk_mq_insert_requests()
1976 blk_mq_hctx_mark_pending(hctx, ctx); in blk_mq_insert_requests()
2048 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx, in __blk_mq_issue_directly() argument
2060 new_cookie = request_to_qc_t(hctx, rq); in __blk_mq_issue_directly()
2067 ret = q->mq_ops->queue_rq(hctx, &bd); in __blk_mq_issue_directly()
2070 blk_mq_update_dispatch_busy(hctx, false); in __blk_mq_issue_directly()
2075 blk_mq_update_dispatch_busy(hctx, true); in __blk_mq_issue_directly()
2079 blk_mq_update_dispatch_busy(hctx, false); in __blk_mq_issue_directly()
2087 static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, in __blk_mq_try_issue_directly() argument
2102 if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { in __blk_mq_try_issue_directly()
2119 return __blk_mq_issue_directly(hctx, rq, cookie, last); in __blk_mq_try_issue_directly()
2140 static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx, in blk_mq_try_issue_directly() argument
2146 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); in blk_mq_try_issue_directly()
2148 hctx_lock(hctx, &srcu_idx); in blk_mq_try_issue_directly()
2150 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); in blk_mq_try_issue_directly()
2156 hctx_unlock(hctx, srcu_idx); in blk_mq_try_issue_directly()
2164 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_request_issue_directly() local
2166 hctx_lock(hctx, &srcu_idx); in blk_mq_request_issue_directly()
2167 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); in blk_mq_request_issue_directly()
2168 hctx_unlock(hctx, srcu_idx); in blk_mq_request_issue_directly()
2173 void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, in blk_mq_try_issue_list_directly() argument
2205 hctx->queue->mq_ops->commit_rqs && queued) in blk_mq_try_issue_list_directly()
2206 hctx->queue->mq_ops->commit_rqs(hctx); in blk_mq_try_issue_list_directly()
2293 cookie = request_to_qc_t(data.hctx, rq); in blk_mq_submit_bio()
2309 blk_mq_run_hw_queue(data.hctx, true); in blk_mq_submit_bio()
2356 data.hctx = same_queue_rq->mq_hctx; in blk_mq_submit_bio()
2358 blk_mq_try_issue_directly(data.hctx, same_queue_rq, in blk_mq_submit_bio()
2362 !data.hctx->dispatch_busy) { in blk_mq_submit_bio()
2367 blk_mq_try_issue_directly(data.hctx, rq, &cookie); in blk_mq_submit_bio()
2594 struct blk_mq_hw_ctx *hctx; member
2602 if (rq->mq_hctx != iter_data->hctx) in blk_mq_has_request()
2608 static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx) in blk_mq_hctx_has_requests() argument
2610 struct blk_mq_tags *tags = hctx->sched_tags ? in blk_mq_hctx_has_requests()
2611 hctx->sched_tags : hctx->tags; in blk_mq_hctx_has_requests()
2613 .hctx = hctx, in blk_mq_hctx_has_requests()
2621 struct blk_mq_hw_ctx *hctx) in blk_mq_last_cpu_in_hctx() argument
2623 if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu) in blk_mq_last_cpu_in_hctx()
2625 if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids) in blk_mq_last_cpu_in_hctx()
2632 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, in blk_mq_hctx_notify_offline() local
2635 if (!cpumask_test_cpu(cpu, hctx->cpumask) || in blk_mq_hctx_notify_offline()
2636 !blk_mq_last_cpu_in_hctx(cpu, hctx)) in blk_mq_hctx_notify_offline()
2646 set_bit(BLK_MQ_S_INACTIVE, &hctx->state); in blk_mq_hctx_notify_offline()
2654 if (percpu_ref_tryget(&hctx->queue->q_usage_counter)) { in blk_mq_hctx_notify_offline()
2655 while (blk_mq_hctx_has_requests(hctx)) in blk_mq_hctx_notify_offline()
2657 percpu_ref_put(&hctx->queue->q_usage_counter); in blk_mq_hctx_notify_offline()
2665 struct blk_mq_hw_ctx *hctx = hlist_entry_safe(node, in blk_mq_hctx_notify_online() local
2668 if (cpumask_test_cpu(cpu, hctx->cpumask)) in blk_mq_hctx_notify_online()
2669 clear_bit(BLK_MQ_S_INACTIVE, &hctx->state); in blk_mq_hctx_notify_online()
2680 struct blk_mq_hw_ctx *hctx; in blk_mq_hctx_notify_dead() local
2685 hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead); in blk_mq_hctx_notify_dead()
2686 if (!cpumask_test_cpu(cpu, hctx->cpumask)) in blk_mq_hctx_notify_dead()
2689 ctx = __blk_mq_get_ctx(hctx->queue, cpu); in blk_mq_hctx_notify_dead()
2690 type = hctx->type; in blk_mq_hctx_notify_dead()
2695 blk_mq_hctx_clear_pending(hctx, ctx); in blk_mq_hctx_notify_dead()
2702 spin_lock(&hctx->lock); in blk_mq_hctx_notify_dead()
2703 list_splice_tail_init(&tmp, &hctx->dispatch); in blk_mq_hctx_notify_dead()
2704 spin_unlock(&hctx->lock); in blk_mq_hctx_notify_dead()
2706 blk_mq_run_hw_queue(hctx, true); in blk_mq_hctx_notify_dead()
2710 static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx) in blk_mq_remove_cpuhp() argument
2712 if (!(hctx->flags & BLK_MQ_F_STACKING)) in blk_mq_remove_cpuhp()
2714 &hctx->cpuhp_online); in blk_mq_remove_cpuhp()
2716 &hctx->cpuhp_dead); in blk_mq_remove_cpuhp()
2751 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) in blk_mq_exit_hctx() argument
2753 struct request *flush_rq = hctx->fq->flush_rq; in blk_mq_exit_hctx()
2755 if (blk_mq_hw_queue_mapped(hctx)) in blk_mq_exit_hctx()
2756 blk_mq_tag_idle(hctx); in blk_mq_exit_hctx()
2764 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_exit_hctx()
2766 blk_mq_remove_cpuhp(hctx); in blk_mq_exit_hctx()
2769 list_add(&hctx->hctx_list, &q->unused_hctx_list); in blk_mq_exit_hctx()
2776 struct blk_mq_hw_ctx *hctx; in blk_mq_exit_hw_queues() local
2779 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_exit_hw_queues()
2782 blk_mq_debugfs_unregister_hctx(hctx); in blk_mq_exit_hw_queues()
2783 blk_mq_exit_hctx(q, set, hctx, i); in blk_mq_exit_hw_queues()
2803 struct blk_mq_hw_ctx *hctx, unsigned hctx_idx) in blk_mq_init_hctx() argument
2805 hctx->queue_num = hctx_idx; in blk_mq_init_hctx()
2807 if (!(hctx->flags & BLK_MQ_F_STACKING)) in blk_mq_init_hctx()
2809 &hctx->cpuhp_online); in blk_mq_init_hctx()
2810 cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); in blk_mq_init_hctx()
2812 hctx->tags = set->tags[hctx_idx]; in blk_mq_init_hctx()
2815 set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) in blk_mq_init_hctx()
2818 if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, in blk_mq_init_hctx()
2819 hctx->numa_node)) in blk_mq_init_hctx()
2825 set->ops->exit_hctx(hctx, hctx_idx); in blk_mq_init_hctx()
2827 blk_mq_remove_cpuhp(hctx); in blk_mq_init_hctx()
2835 struct blk_mq_hw_ctx *hctx; in blk_mq_alloc_hctx() local
2838 hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node); in blk_mq_alloc_hctx()
2839 if (!hctx) in blk_mq_alloc_hctx()
2842 if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node)) in blk_mq_alloc_hctx()
2845 atomic_set(&hctx->nr_active, 0); in blk_mq_alloc_hctx()
2848 hctx->numa_node = node; in blk_mq_alloc_hctx()
2850 INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn); in blk_mq_alloc_hctx()
2851 spin_lock_init(&hctx->lock); in blk_mq_alloc_hctx()
2852 INIT_LIST_HEAD(&hctx->dispatch); in blk_mq_alloc_hctx()
2853 hctx->queue = q; in blk_mq_alloc_hctx()
2854 hctx->flags = set->flags & ~BLK_MQ_F_TAG_QUEUE_SHARED; in blk_mq_alloc_hctx()
2856 INIT_LIST_HEAD(&hctx->hctx_list); in blk_mq_alloc_hctx()
2862 hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *), in blk_mq_alloc_hctx()
2864 if (!hctx->ctxs) in blk_mq_alloc_hctx()
2867 if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), in blk_mq_alloc_hctx()
2870 hctx->nr_ctx = 0; in blk_mq_alloc_hctx()
2872 spin_lock_init(&hctx->dispatch_wait_lock); in blk_mq_alloc_hctx()
2873 init_waitqueue_func_entry(&hctx->dispatch_wait, blk_mq_dispatch_wake); in blk_mq_alloc_hctx()
2874 INIT_LIST_HEAD(&hctx->dispatch_wait.entry); in blk_mq_alloc_hctx()
2876 hctx->fq = blk_alloc_flush_queue(hctx->numa_node, set->cmd_size, gfp); in blk_mq_alloc_hctx()
2877 if (!hctx->fq) in blk_mq_alloc_hctx()
2880 if (hctx->flags & BLK_MQ_F_BLOCKING) in blk_mq_alloc_hctx()
2881 init_srcu_struct(hctx->srcu); in blk_mq_alloc_hctx()
2882 blk_mq_hctx_kobj_init(hctx); in blk_mq_alloc_hctx()
2884 return hctx; in blk_mq_alloc_hctx()
2887 sbitmap_free(&hctx->ctx_map); in blk_mq_alloc_hctx()
2889 kfree(hctx->ctxs); in blk_mq_alloc_hctx()
2891 free_cpumask_var(hctx->cpumask); in blk_mq_alloc_hctx()
2893 kfree(hctx); in blk_mq_alloc_hctx()
2906 struct blk_mq_hw_ctx *hctx; in blk_mq_init_cpu_queues() local
2921 hctx = blk_mq_map_queue_type(q, j, i); in blk_mq_init_cpu_queues()
2922 if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE) in blk_mq_init_cpu_queues()
2923 hctx->numa_node = cpu_to_node(i); in blk_mq_init_cpu_queues()
2964 struct blk_mq_hw_ctx *hctx; in blk_mq_map_swqueue() local
2968 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
2969 cpumask_clear(hctx->cpumask); in blk_mq_map_swqueue()
2970 hctx->nr_ctx = 0; in blk_mq_map_swqueue()
2971 hctx->dispatch_from = NULL; in blk_mq_map_swqueue()
3001 hctx = blk_mq_map_queue_type(q, j, i); in blk_mq_map_swqueue()
3002 ctx->hctxs[j] = hctx; in blk_mq_map_swqueue()
3008 if (cpumask_test_cpu(i, hctx->cpumask)) in blk_mq_map_swqueue()
3011 cpumask_set_cpu(i, hctx->cpumask); in blk_mq_map_swqueue()
3012 hctx->type = j; in blk_mq_map_swqueue()
3013 ctx->index_hw[hctx->type] = hctx->nr_ctx; in blk_mq_map_swqueue()
3014 hctx->ctxs[hctx->nr_ctx++] = ctx; in blk_mq_map_swqueue()
3020 BUG_ON(!hctx->nr_ctx); in blk_mq_map_swqueue()
3028 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_map_swqueue()
3033 if (!hctx->nr_ctx) { in blk_mq_map_swqueue()
3041 hctx->tags = NULL; in blk_mq_map_swqueue()
3045 hctx->tags = set->tags[i]; in blk_mq_map_swqueue()
3046 WARN_ON(!hctx->tags); in blk_mq_map_swqueue()
3053 sbitmap_resize(&hctx->ctx_map, hctx->nr_ctx); in blk_mq_map_swqueue()
3058 hctx->next_cpu = blk_mq_first_mapped_cpu(hctx); in blk_mq_map_swqueue()
3059 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH; in blk_mq_map_swqueue()
3069 struct blk_mq_hw_ctx *hctx; in queue_set_hctx_shared() local
3072 queue_for_each_hw_ctx(q, hctx, i) { in queue_set_hctx_shared()
3074 hctx->flags |= BLK_MQ_F_TAG_QUEUE_SHARED; in queue_set_hctx_shared()
3076 hctx->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; in queue_set_hctx_shared()
3167 struct blk_mq_hw_ctx *hctx, *next; in blk_mq_release() local
3170 queue_for_each_hw_ctx(q, hctx, i) in blk_mq_release()
3171 WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list)); in blk_mq_release()
3174 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { in blk_mq_release()
3175 list_del_init(&hctx->hctx_list); in blk_mq_release()
3176 kobject_put(&hctx->kobj); in blk_mq_release()
3254 struct blk_mq_hw_ctx *hctx = NULL, *tmp; in blk_mq_alloc_and_init_hctx() local
3260 hctx = tmp; in blk_mq_alloc_and_init_hctx()
3264 if (hctx) in blk_mq_alloc_and_init_hctx()
3265 list_del_init(&hctx->hctx_list); in blk_mq_alloc_and_init_hctx()
3268 if (!hctx) in blk_mq_alloc_and_init_hctx()
3269 hctx = blk_mq_alloc_hctx(q, set, node); in blk_mq_alloc_and_init_hctx()
3270 if (!hctx) in blk_mq_alloc_and_init_hctx()
3273 if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) in blk_mq_alloc_and_init_hctx()
3276 return hctx; in blk_mq_alloc_and_init_hctx()
3279 kobject_put(&hctx->kobj); in blk_mq_alloc_and_init_hctx()
3310 struct blk_mq_hw_ctx *hctx; in blk_mq_realloc_hw_ctxs() local
3321 hctx = blk_mq_alloc_and_init_hctx(set, q, i, node); in blk_mq_realloc_hw_ctxs()
3322 if (hctx) { in blk_mq_realloc_hw_ctxs()
3325 hctxs[i] = hctx; in blk_mq_realloc_hw_ctxs()
3349 struct blk_mq_hw_ctx *hctx = hctxs[j]; in blk_mq_realloc_hw_ctxs() local
3351 if (hctx) { in blk_mq_realloc_hw_ctxs()
3352 if (hctx->tags) in blk_mq_realloc_hw_ctxs()
3354 blk_mq_exit_hctx(q, set, hctx, j); in blk_mq_realloc_hw_ctxs()
3687 struct blk_mq_hw_ctx *hctx; in blk_mq_update_nr_requests() local
3700 queue_for_each_hw_ctx(q, hctx, i) { in blk_mq_update_nr_requests()
3701 if (!hctx->tags) in blk_mq_update_nr_requests()
3707 if (!hctx->sched_tags) { in blk_mq_update_nr_requests()
3708 ret = blk_mq_tag_update_depth(hctx, &hctx->tags, nr, in blk_mq_update_nr_requests()
3713 ret = blk_mq_tag_update_depth(hctx, &hctx->sched_tags, in blk_mq_update_nr_requests()
3719 q->elevator->type->ops.depth_updated(hctx); in blk_mq_update_nr_requests()
3995 struct blk_mq_hw_ctx *hctx, blk_qc_t cookie) in blk_mq_poll_hybrid() argument
4003 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll_hybrid()
4005 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll_hybrid()
4033 struct blk_mq_hw_ctx *hctx; in blk_poll() local
4043 hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; in blk_poll()
4052 if (blk_mq_poll_hybrid(q, hctx, cookie)) in blk_poll()
4055 hctx->poll_considered++; in blk_poll()
4061 hctx->poll_invoked++; in blk_poll()
4063 ret = q->mq_ops->poll(hctx); in blk_poll()
4065 hctx->poll_success++; in blk_poll()