/block/ |
D | blk-rq-qos.c | 35 if (rqos->ops->cleanup) in __rq_qos_cleanup() 36 rqos->ops->cleanup(rqos, bio); in __rq_qos_cleanup() 44 if (rqos->ops->done) in __rq_qos_done() 45 rqos->ops->done(rqos, rq); in __rq_qos_done() 53 if (rqos->ops->issue) in __rq_qos_issue() 54 rqos->ops->issue(rqos, rq); in __rq_qos_issue() 62 if (rqos->ops->requeue) in __rq_qos_requeue() 63 rqos->ops->requeue(rqos, rq); in __rq_qos_requeue() 71 if (rqos->ops->throttle) in __rq_qos_throttle() 72 rqos->ops->throttle(rqos, bio); in __rq_qos_throttle() [all …]
|
D | blk-mq-sched.h | 47 if (e && e->type->ops.allow_merge) in blk_mq_sched_allow_merge() 48 return e->type->ops.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge() 57 if (e && e->type->ops.completed_request) in blk_mq_sched_completed_request() 58 e->type->ops.completed_request(rq, now); in blk_mq_sched_completed_request() 66 if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) in blk_mq_sched_requeue_request() 67 e->type->ops.requeue_request(rq); in blk_mq_sched_requeue_request() 74 if (e && e->type->ops.has_work) in blk_mq_sched_has_work() 75 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work()
|
D | ioctl.c | 271 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_register() local 276 if (!ops || !ops->pr_register) in blkdev_pr_register() 283 return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags); in blkdev_pr_register() 289 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_reserve() local 294 if (!ops || !ops->pr_reserve) in blkdev_pr_reserve() 301 return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags); in blkdev_pr_reserve() 307 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_release() local 312 if (!ops || !ops->pr_release) in blkdev_pr_release() 319 return ops->pr_release(bdev, rsv.key, rsv.type); in blkdev_pr_release() 325 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_preempt() local [all …]
|
D | blk-mq-sched.c | 134 if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) in __blk_mq_do_dispatch_sched() 145 rq = e->type->ops.dispatch_request(hctx); in __blk_mq_do_dispatch_sched() 284 const bool has_sched_dispatch = e && e->type->ops.dispatch_request; in __blk_mq_sched_dispatch_requests() 362 if (e && e->type->ops.bio_merge) in __blk_mq_sched_bio_merge() 363 return e->type->ops.bio_merge(q, bio, nr_segs); in __blk_mq_sched_bio_merge() 466 if (e && e->type->ops.insert_requests) { in blk_mq_sched_insert_request() 470 e->type->ops.insert_requests(hctx, &list, at_head); in blk_mq_sched_insert_request() 497 if (e && e->type->ops.insert_requests) in blk_mq_sched_insert_requests() 498 e->type->ops.insert_requests(hctx, list, false); in blk_mq_sched_insert_requests() 596 ret = e->ops.init_sched(q, e); in blk_mq_init_sched() [all …]
|
D | elevator.c | 65 if (e->type->ops.allow_merge) in elv_iosched_allow_bio_merge() 66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge() 345 if (e->type->ops.request_merge) in elv_merge() 346 return e->type->ops.request_merge(q, req, bio); in elv_merge() 397 if (e->type->ops.request_merged) in elv_merged_request() 398 e->type->ops.request_merged(q, rq, type); in elv_merged_request() 411 if (e->type->ops.requests_merged) in elv_merge_requests() 412 e->type->ops.requests_merged(q, rq, next); in elv_merge_requests() 422 if (e->type->ops.next_request) in elv_latter_request() 423 return e->type->ops.next_request(q, rq); in elv_latter_request() [all …]
|
D | bsg.c | 151 ret = q->bsg_dev.ops->check_proto(&hdr); in bsg_sg_io() 160 ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode); in bsg_sg_io() 188 ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr); in bsg_sg_io() 192 rq->q->bsg_dev.ops->free_rq(rq); in bsg_sg_io() 421 const char *name, const struct bsg_ops *ops) in bsg_register_queue() argument 450 bcd->ops = ops; in bsg_register_queue()
|
D | blk-mq.c | 336 if (e && e->type->ops.prepare_request) { in blk_mq_rq_ctx_init() 340 e->type->ops.prepare_request(rq); in blk_mq_rq_ctx_init() 372 e->type->ops.limit_depth && in __blk_mq_alloc_request() 374 e->type->ops.limit_depth(data->cmd_flags, data); in __blk_mq_alloc_request() 527 if (e && e->type->ops.finish_request) in blk_mq_free_request() 528 e->type->ops.finish_request(rq); in blk_mq_free_request() 1708 if (e && e->type->ops.dispatch_request && in blk_mq_has_sqsched() 2423 if (tags->rqs && set->ops->exit_request) { in blk_mq_free_rqs() 2431 set->ops->exit_request(set, rq, hctx_idx); in blk_mq_free_rqs() 2505 if (set->ops->init_request) { in blk_mq_init_request() [all …]
|
D | blk-ioc.c | 50 if (et->ops.exit_icq) in ioc_exit_icq() 51 et->ops.exit_icq(icq); in ioc_exit_icq() 401 if (et->ops.init_icq) in ioc_create_icq() 402 et->ops.init_icq(icq); in ioc_create_icq()
|
D | blk-rq-qos.h | 29 struct rq_qos_ops *ops; member 107 if (rqos->ops->debugfs_attrs) in rq_qos_add()
|
D | blk-ioprio.c | 238 rqos->ops = &blkcg_ioprio_ops; in blk_ioprio_init()
|
D | bsg-lib.c | 381 set->ops = &bsg_mq_ops; in bsg_setup_queue()
|
D | blk-mq-debugfs.c | 972 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs) in blk_mq_debugfs_register_rqos() 982 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); in blk_mq_debugfs_register_rqos()
|
D | blk-wbt.c | 836 rwb->rqos.ops = &wbt_rqos_ops; in wbt_init()
|
D | blk-iolatency.c | 772 rqos->ops = &blkcg_iolatency_ops; in blk_iolatency_init()
|
D | kyber-iosched.c | 1012 .ops = {
|
D | mq-deadline-main.c | 1126 .ops = {
|
D | blk-iocost.c | 2865 rqos->ops = &ioc_rqos_ops; in blk_iocost_init()
|
D | bfq-iosched.c | 6806 .ops = {
|