Lines Matching refs:ctx
82 struct blk_mq_ctx *ctx) in blk_mq_hctx_mark_pending() argument
84 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending()
91 struct blk_mq_ctx *ctx) in blk_mq_hctx_clear_pending() argument
93 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_clear_pending()
301 rq->mq_ctx = data->ctx; in blk_mq_rq_ctx_init()
336 data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; in blk_mq_rq_ctx_init()
384 data->ctx = blk_mq_get_ctx(q); in __blk_mq_alloc_request()
385 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); in __blk_mq_alloc_request()
483 data.ctx = __blk_mq_get_ctx(q, cpu); in blk_mq_alloc_request_hctx()
503 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_free_request() local
511 blk_mq_put_tag(hctx->tags, ctx, rq->tag); in __blk_mq_free_request()
513 blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag); in __blk_mq_free_request()
522 struct blk_mq_ctx *ctx = rq->mq_ctx; in blk_mq_free_request() local
534 ctx->rq_completed[rq_is_sync(rq)]++; in blk_mq_free_request()
1005 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in flush_busy_ctx() local
1008 spin_lock(&ctx->lock); in flush_busy_ctx()
1009 list_splice_tail_init(&ctx->rq_lists[type], flush_data->list); in flush_busy_ctx()
1011 spin_unlock(&ctx->lock); in flush_busy_ctx()
1040 struct blk_mq_ctx *ctx = hctx->ctxs[bitnr]; in dispatch_rq_from_ctx() local
1043 spin_lock(&ctx->lock); in dispatch_rq_from_ctx()
1044 if (!list_empty(&ctx->rq_lists[type])) { in dispatch_rq_from_ctx()
1045 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); in dispatch_rq_from_ctx()
1047 if (list_empty(&ctx->rq_lists[type])) in dispatch_rq_from_ctx()
1050 spin_unlock(&ctx->lock); in dispatch_rq_from_ctx()
1665 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); in blk_mq_get_sq_hctx() local
1673 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, 0, ctx); in blk_mq_get_sq_hctx()
1850 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_insert_req_list() local
1853 lockdep_assert_held(&ctx->lock); in __blk_mq_insert_req_list()
1858 list_add(&rq->queuelist, &ctx->rq_lists[type]); in __blk_mq_insert_req_list()
1860 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); in __blk_mq_insert_req_list()
1866 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_insert_request() local
1868 lockdep_assert_held(&ctx->lock); in __blk_mq_insert_request()
1871 blk_mq_hctx_mark_pending(hctx, ctx); in __blk_mq_insert_request()
1899 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, in blk_mq_insert_requests() argument
1911 BUG_ON(rq->mq_ctx != ctx); in blk_mq_insert_requests()
1915 spin_lock(&ctx->lock); in blk_mq_insert_requests()
1916 list_splice_tail_init(list, &ctx->rq_lists[type]); in blk_mq_insert_requests()
1917 blk_mq_hctx_mark_pending(hctx, ctx); in blk_mq_insert_requests()
1918 spin_unlock(&ctx->lock); in blk_mq_insert_requests()
2614 struct blk_mq_ctx *ctx; in blk_mq_hctx_notify_dead() local
2622 ctx = __blk_mq_get_ctx(hctx->queue, cpu); in blk_mq_hctx_notify_dead()
2625 spin_lock(&ctx->lock); in blk_mq_hctx_notify_dead()
2626 if (!list_empty(&ctx->rq_lists[type])) { in blk_mq_hctx_notify_dead()
2627 list_splice_init(&ctx->rq_lists[type], &tmp); in blk_mq_hctx_notify_dead()
2628 blk_mq_hctx_clear_pending(hctx, ctx); in blk_mq_hctx_notify_dead()
2630 spin_unlock(&ctx->lock); in blk_mq_hctx_notify_dead()
2898 struct blk_mq_ctx *ctx; in blk_mq_map_swqueue() local
2914 ctx = per_cpu_ptr(q->queue_ctx, i); in blk_mq_map_swqueue()
2917 ctx->hctxs[j] = blk_mq_map_queue_type(q, in blk_mq_map_swqueue()
2935 ctx->hctxs[j] = hctx; in blk_mq_map_swqueue()
2946 ctx->index_hw[hctx->type] = hctx->nr_ctx; in blk_mq_map_swqueue()
2947 hctx->ctxs[hctx->nr_ctx++] = ctx; in blk_mq_map_swqueue()
2957 ctx->hctxs[j] = blk_mq_map_queue_type(q, in blk_mq_map_swqueue()
3081 struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu); in blk_mq_alloc_ctxs() local
3082 ctx->ctxs = ctxs; in blk_mq_alloc_ctxs()