Home
last modified time | relevance | path

Searched refs:ctx (Results 1 – 16 of 16) sorted by relevance

/block/
Dblk-mq-sysfs.c28 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj); in blk_mq_ctx_sysfs_release() local
31 kobject_put(&ctx->ctxs->kobj); in blk_mq_ctx_sysfs_release()
64 struct blk_mq_ctx *ctx; in blk_mq_sysfs_show() local
69 ctx = container_of(kobj, struct blk_mq_ctx, kobj); in blk_mq_sysfs_show()
70 q = ctx->queue; in blk_mq_sysfs_show()
76 res = entry->show(ctx, page); in blk_mq_sysfs_show()
85 struct blk_mq_ctx *ctx; in blk_mq_sysfs_store() local
90 ctx = container_of(kobj, struct blk_mq_ctx, kobj); in blk_mq_sysfs_store()
91 q = ctx->queue; in blk_mq_sysfs_store()
97 res = entry->store(ctx, page, length); in blk_mq_sysfs_store()
[all …]
Dblk-mq-sched.c212 struct blk_mq_ctx *ctx) in blk_mq_next_ctx() argument
214 unsigned short idx = ctx->index_hw[hctx->type]; in blk_mq_next_ctx()
234 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); in blk_mq_do_dispatch_ctx() local
250 rq = blk_mq_dequeue_from_ctx(hctx, ctx); in blk_mq_do_dispatch_ctx()
272 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); in blk_mq_do_dispatch_ctx()
276 WRITE_ONCE(hctx->dispatch_from, ctx); in blk_mq_do_dispatch_ctx()
357 struct blk_mq_ctx *ctx; in __blk_mq_sched_bio_merge() local
365 ctx = blk_mq_get_ctx(q); in __blk_mq_sched_bio_merge()
366 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); in __blk_mq_sched_bio_merge()
369 list_empty_careful(&ctx->rq_lists[type])) in __blk_mq_sched_bio_merge()
[all …]
Dblk-mq-debugfs.c633 __acquires(&ctx->lock) \
635 struct blk_mq_ctx *ctx = m->private; \
637 spin_lock(&ctx->lock); \
638 return seq_list_start(&ctx->rq_lists[type], *pos); \
644 struct blk_mq_ctx *ctx = m->private; \
646 return seq_list_next(v, &ctx->rq_lists[type], pos); \
650 __releases(&ctx->lock) \
652 struct blk_mq_ctx *ctx = m->private; \
654 spin_unlock(&ctx->lock); \
670 struct blk_mq_ctx *ctx = data; in ctx_dispatched_show() local
[all …]
Dblk-mq.c83 struct blk_mq_ctx *ctx) in blk_mq_hctx_mark_pending() argument
85 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending()
92 struct blk_mq_ctx *ctx) in blk_mq_hctx_clear_pending() argument
94 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_clear_pending()
294 rq->mq_ctx = data->ctx; in blk_mq_rq_ctx_init()
329 data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; in blk_mq_rq_ctx_init()
378 data->ctx = blk_mq_get_ctx(q); in __blk_mq_alloc_request()
379 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); in __blk_mq_alloc_request()
479 data.ctx = __blk_mq_get_ctx(q, cpu); in blk_mq_alloc_request_hctx()
499 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_free_request() local
[all …]
Dblk-mq.h75 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
109 struct blk_mq_ctx *ctx) in blk_mq_map_queue() argument
121 return ctx->hctxs[type]; in blk_mq_map_queue()
161 struct blk_mq_ctx *ctx; member
Dblk-mq-tag.c149 data->ctx = blk_mq_get_ctx(data->q); in blk_mq_get_tag()
151 data->ctx); in blk_mq_get_tag()
177 blk_mq_put_tag(tags, data->ctx, tag + tag_offset); in blk_mq_get_tag()
183 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, in blk_mq_put_tag() argument
190 sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu); in blk_mq_put_tag()
193 sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu); in blk_mq_put_tag()
Dblk-iolatency.c835 struct blkg_conf_ctx ctx; in iolatency_set_limit() local
842 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); in iolatency_set_limit()
846 iolat = blkg_to_lat(ctx.blkg); in iolatency_set_limit()
847 p = ctx.body; in iolatency_set_limit()
872 blkg = ctx.blkg; in iolatency_set_limit()
880 blkg_conf_finish(&ctx); in iolatency_set_limit()
Dblk-iocost.c1404 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key; in iocg_wake_fn() local
1405 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse); in iocg_wake_fn()
1407 ctx->vbudget -= cost; in iocg_wake_fn()
1409 if (ctx->vbudget < 0) in iocg_wake_fn()
1412 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost); in iocg_wake_fn()
1436 struct iocg_wake_ctx ctx = { .iocg = iocg }; in iocg_kick_waitq() local
1479 ctx.vbudget = vbudget; in iocg_kick_waitq()
1480 current_hweight(iocg, NULL, &ctx.hw_inuse); in iocg_kick_waitq()
1482 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx); in iocg_kick_waitq()
1495 if (WARN_ON_ONCE(ctx.vbudget >= 0)) in iocg_kick_waitq()
[all …]
Dblk-cgroup.c610 char *input, struct blkg_conf_ctx *ctx) in blkg_conf_prep() argument
693 ctx->disk = disk; in blkg_conf_prep()
694 ctx->blkg = blkg; in blkg_conf_prep()
695 ctx->body = input; in blkg_conf_prep()
726 void blkg_conf_finish(struct blkg_conf_ctx *ctx) in blkg_conf_finish() argument
727 __releases(&ctx->disk->queue->queue_lock) __releases(rcu) in blkg_conf_finish()
729 spin_unlock_irq(&ctx->disk->queue->queue_lock); in blkg_conf_finish()
731 put_disk_and_module(ctx->disk); in blkg_conf_finish()
Dbfq-cgroup.c1083 struct blkg_conf_ctx ctx; in bfq_io_set_device_weight() local
1088 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx); in bfq_io_set_device_weight()
1092 if (sscanf(ctx.body, "%llu", &v) == 1) { in bfq_io_set_device_weight()
1097 } else if (!strcmp(strim(ctx.body), "default")) { in bfq_io_set_device_weight()
1104 bfqg = blkg_to_bfqg(ctx.blkg); in bfq_io_set_device_weight()
1112 blkg_conf_finish(&ctx); in bfq_io_set_device_weight()
Dblk-throttle.c1463 struct blkg_conf_ctx ctx; in tg_set_conf() local
1468 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); in tg_set_conf()
1473 if (sscanf(ctx.body, "%llu", &v) != 1) in tg_set_conf()
1478 tg = blkg_to_tg(ctx.blkg); in tg_set_conf()
1488 blkg_conf_finish(&ctx); in tg_set_conf()
1652 struct blkg_conf_ctx ctx; in tg_set_limit() local
1660 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); in tg_set_limit()
1664 tg = blkg_to_tg(ctx.blkg); in tg_set_limit()
1679 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1) in tg_set_limit()
1683 ctx.body += len; in tg_set_limit()
[all …]
Dblk-flush.c356 struct blk_mq_ctx *ctx = rq->mq_ctx; in mq_flush_data_end_io() local
358 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); in mq_flush_data_end_io()
Dblk-mq-tag.h43 extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
Dblk-mq-sched.h22 struct blk_mq_ctx *ctx,
Dblk.h37 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) in blk_get_flush_queue() argument
39 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; in blk_get_flush_queue()
Dkyber-iosched.c568 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); in kyber_bio_merge() local
569 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); in kyber_bio_merge()
571 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; in kyber_bio_merge()