/block/ |
D | blk-mq-sysfs.c | 28 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj); in blk_mq_ctx_sysfs_release() local 31 kobject_put(&ctx->ctxs->kobj); in blk_mq_ctx_sysfs_release() 64 struct blk_mq_ctx *ctx; in blk_mq_sysfs_show() local 69 ctx = container_of(kobj, struct blk_mq_ctx, kobj); in blk_mq_sysfs_show() 70 q = ctx->queue; in blk_mq_sysfs_show() 76 res = entry->show(ctx, page); in blk_mq_sysfs_show() 85 struct blk_mq_ctx *ctx; in blk_mq_sysfs_store() local 90 ctx = container_of(kobj, struct blk_mq_ctx, kobj); in blk_mq_sysfs_store() 91 q = ctx->queue; in blk_mq_sysfs_store() 97 res = entry->store(ctx, page, length); in blk_mq_sysfs_store() [all …]
|
D | blk-mq-sched.c | 212 struct blk_mq_ctx *ctx) in blk_mq_next_ctx() argument 214 unsigned short idx = ctx->index_hw[hctx->type]; in blk_mq_next_ctx() 234 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from); in blk_mq_do_dispatch_ctx() local 250 rq = blk_mq_dequeue_from_ctx(hctx, ctx); in blk_mq_do_dispatch_ctx() 272 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx); in blk_mq_do_dispatch_ctx() 276 WRITE_ONCE(hctx->dispatch_from, ctx); in blk_mq_do_dispatch_ctx() 357 struct blk_mq_ctx *ctx; in __blk_mq_sched_bio_merge() local 365 ctx = blk_mq_get_ctx(q); in __blk_mq_sched_bio_merge() 366 hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); in __blk_mq_sched_bio_merge() 369 list_empty_careful(&ctx->rq_lists[type])) in __blk_mq_sched_bio_merge() [all …]
|
D | blk-mq-debugfs.c | 633 __acquires(&ctx->lock) \ 635 struct blk_mq_ctx *ctx = m->private; \ 637 spin_lock(&ctx->lock); \ 638 return seq_list_start(&ctx->rq_lists[type], *pos); \ 644 struct blk_mq_ctx *ctx = m->private; \ 646 return seq_list_next(v, &ctx->rq_lists[type], pos); \ 650 __releases(&ctx->lock) \ 652 struct blk_mq_ctx *ctx = m->private; \ 654 spin_unlock(&ctx->lock); \ 670 struct blk_mq_ctx *ctx = data; in ctx_dispatched_show() local [all …]
|
D | blk-mq.c | 83 struct blk_mq_ctx *ctx) in blk_mq_hctx_mark_pending() argument 85 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_mark_pending() 92 struct blk_mq_ctx *ctx) in blk_mq_hctx_clear_pending() argument 94 const int bit = ctx->index_hw[hctx->type]; in blk_mq_hctx_clear_pending() 294 rq->mq_ctx = data->ctx; in blk_mq_rq_ctx_init() 329 data->ctx->rq_dispatched[op_is_sync(data->cmd_flags)]++; in blk_mq_rq_ctx_init() 378 data->ctx = blk_mq_get_ctx(q); in __blk_mq_alloc_request() 379 data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx); in __blk_mq_alloc_request() 479 data.ctx = __blk_mq_get_ctx(q, cpu); in blk_mq_alloc_request_hctx() 499 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_free_request() local [all …]
|
D | blk-mq.h | 75 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, 109 struct blk_mq_ctx *ctx) in blk_mq_map_queue() argument 121 return ctx->hctxs[type]; in blk_mq_map_queue() 161 struct blk_mq_ctx *ctx; member
|
D | blk-mq-tag.c | 149 data->ctx = blk_mq_get_ctx(data->q); in blk_mq_get_tag() 151 data->ctx); in blk_mq_get_tag() 177 blk_mq_put_tag(tags, data->ctx, tag + tag_offset); in blk_mq_get_tag() 183 void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, in blk_mq_put_tag() argument 190 sbitmap_queue_clear(tags->bitmap_tags, real_tag, ctx->cpu); in blk_mq_put_tag() 193 sbitmap_queue_clear(tags->breserved_tags, tag, ctx->cpu); in blk_mq_put_tag()
|
D | blk-iolatency.c | 835 struct blkg_conf_ctx ctx; in iolatency_set_limit() local 842 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); in iolatency_set_limit() 846 iolat = blkg_to_lat(ctx.blkg); in iolatency_set_limit() 847 p = ctx.body; in iolatency_set_limit() 872 blkg = ctx.blkg; in iolatency_set_limit() 880 blkg_conf_finish(&ctx); in iolatency_set_limit()
|
D | blk-iocost.c | 1404 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key; in iocg_wake_fn() local 1405 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse); in iocg_wake_fn() 1407 ctx->vbudget -= cost; in iocg_wake_fn() 1409 if (ctx->vbudget < 0) in iocg_wake_fn() 1412 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost); in iocg_wake_fn() 1436 struct iocg_wake_ctx ctx = { .iocg = iocg }; in iocg_kick_waitq() local 1479 ctx.vbudget = vbudget; in iocg_kick_waitq() 1480 current_hweight(iocg, NULL, &ctx.hw_inuse); in iocg_kick_waitq() 1482 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx); in iocg_kick_waitq() 1495 if (WARN_ON_ONCE(ctx.vbudget >= 0)) in iocg_kick_waitq() [all …]
|
D | blk-cgroup.c | 610 char *input, struct blkg_conf_ctx *ctx) in blkg_conf_prep() argument 693 ctx->disk = disk; in blkg_conf_prep() 694 ctx->blkg = blkg; in blkg_conf_prep() 695 ctx->body = input; in blkg_conf_prep() 726 void blkg_conf_finish(struct blkg_conf_ctx *ctx) in blkg_conf_finish() argument 727 __releases(&ctx->disk->queue->queue_lock) __releases(rcu) in blkg_conf_finish() 729 spin_unlock_irq(&ctx->disk->queue->queue_lock); in blkg_conf_finish() 731 put_disk_and_module(ctx->disk); in blkg_conf_finish()
|
D | bfq-cgroup.c | 1083 struct blkg_conf_ctx ctx; in bfq_io_set_device_weight() local 1088 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx); in bfq_io_set_device_weight() 1092 if (sscanf(ctx.body, "%llu", &v) == 1) { in bfq_io_set_device_weight() 1097 } else if (!strcmp(strim(ctx.body), "default")) { in bfq_io_set_device_weight() 1104 bfqg = blkg_to_bfqg(ctx.blkg); in bfq_io_set_device_weight() 1112 blkg_conf_finish(&ctx); in bfq_io_set_device_weight()
|
D | blk-throttle.c | 1463 struct blkg_conf_ctx ctx; in tg_set_conf() local 1468 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); in tg_set_conf() 1473 if (sscanf(ctx.body, "%llu", &v) != 1) in tg_set_conf() 1478 tg = blkg_to_tg(ctx.blkg); in tg_set_conf() 1488 blkg_conf_finish(&ctx); in tg_set_conf() 1652 struct blkg_conf_ctx ctx; in tg_set_limit() local 1660 ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx); in tg_set_limit() 1664 tg = blkg_to_tg(ctx.blkg); in tg_set_limit() 1679 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1) in tg_set_limit() 1683 ctx.body += len; in tg_set_limit() [all …]
|
D | blk-flush.c | 356 struct blk_mq_ctx *ctx = rq->mq_ctx; in mq_flush_data_end_io() local 358 struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); in mq_flush_data_end_io()
|
D | blk-mq-tag.h | 43 extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
|
D | blk-mq-sched.h | 22 struct blk_mq_ctx *ctx,
|
D | blk.h | 37 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) in blk_get_flush_queue() argument 39 return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; in blk_get_flush_queue()
|
D | kyber-iosched.c | 568 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); in kyber_bio_merge() local 569 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); in kyber_bio_merge() 571 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; in kyber_bio_merge()
|