Home
last modified time | relevance | path

Searched refs:ops (Results 1 – 18 of 18) sorted by relevance

/block/
Dblk-rq-qos.c35 if (rqos->ops->cleanup) in __rq_qos_cleanup()
36 rqos->ops->cleanup(rqos, bio); in __rq_qos_cleanup()
44 if (rqos->ops->done) in __rq_qos_done()
45 rqos->ops->done(rqos, rq); in __rq_qos_done()
53 if (rqos->ops->issue) in __rq_qos_issue()
54 rqos->ops->issue(rqos, rq); in __rq_qos_issue()
62 if (rqos->ops->requeue) in __rq_qos_requeue()
63 rqos->ops->requeue(rqos, rq); in __rq_qos_requeue()
71 if (rqos->ops->throttle) in __rq_qos_throttle()
72 rqos->ops->throttle(rqos, bio); in __rq_qos_throttle()
[all …]
Dblk-mq-sched.h49 if (e && e->type->ops.allow_merge) in blk_mq_sched_allow_merge()
50 return e->type->ops.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge()
59 if (e && e->type->ops.completed_request) in blk_mq_sched_completed_request()
60 e->type->ops.completed_request(rq, now); in blk_mq_sched_completed_request()
68 if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request) in blk_mq_sched_requeue_request()
69 e->type->ops.requeue_request(rq); in blk_mq_sched_requeue_request()
76 if (e && e->type->ops.has_work) in blk_mq_sched_has_work()
77 return e->type->ops.has_work(hctx); in blk_mq_sched_has_work()
Dioctl.c261 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_register() local
266 if (!ops || !ops->pr_register) in blkdev_pr_register()
273 return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags); in blkdev_pr_register()
279 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_reserve() local
284 if (!ops || !ops->pr_reserve) in blkdev_pr_reserve()
291 return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags); in blkdev_pr_reserve()
297 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_release() local
302 if (!ops || !ops->pr_release) in blkdev_pr_release()
309 return ops->pr_release(bdev, rsv.key, rsv.type); in blkdev_pr_release()
315 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_preempt() local
[all …]
Dblk-mq-sched.c136 if (e->type->ops.has_work && !e->type->ops.has_work(hctx)) in __blk_mq_do_dispatch_sched()
148 rq = e->type->ops.dispatch_request(hctx); in __blk_mq_do_dispatch_sched()
381 if (e && e->type->ops.bio_merge) in __blk_mq_sched_bio_merge()
382 return e->type->ops.bio_merge(q, bio, nr_segs); in __blk_mq_sched_bio_merge()
472 e->type->ops.insert_requests(hctx, &list, at_head); in blk_mq_sched_insert_request()
500 e->type->ops.insert_requests(hctx, list, false); in blk_mq_sched_insert_requests()
625 ret = e->ops.init_sched(q, e); in blk_mq_init_sched()
632 if (e->ops.init_hctx) { in blk_mq_init_sched()
633 ret = e->ops.init_hctx(hctx, i); in blk_mq_init_sched()
680 if (e->type->ops.exit_hctx && hctx->sched_data) { in blk_mq_exit_sched()
[all …]
Delevator.c65 if (e->type->ops.allow_merge) in elv_iosched_allow_bio_merge()
66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
345 if (e->type->ops.request_merge) in elv_merge()
346 return e->type->ops.request_merge(q, req, bio); in elv_merge()
402 if (e->type->ops.request_merged) in elv_merged_request()
403 e->type->ops.request_merged(q, rq, type); in elv_merged_request()
416 if (e->type->ops.requests_merged) in elv_merge_requests()
417 e->type->ops.requests_merged(q, rq, next); in elv_merge_requests()
427 if (e->type->ops.next_request) in elv_latter_request()
428 return e->type->ops.next_request(q, rq); in elv_latter_request()
[all …]
Dblk-mq.c343 if (e && e->type->ops.prepare_request) { in blk_mq_rq_ctx_init()
347 e->type->ops.prepare_request(rq); in blk_mq_rq_ctx_init()
378 e->type->ops.limit_depth && in __blk_mq_alloc_request()
380 e->type->ops.limit_depth(data->cmd_flags, data); in __blk_mq_alloc_request()
526 if (e && e->type->ops.finish_request) in blk_mq_free_request()
527 e->type->ops.finish_request(rq); in blk_mq_free_request()
1653 if (e && e->type->ops.dispatch_request && in blk_mq_has_sqsched()
2358 if (tags->rqs && set->ops->exit_request) { in blk_mq_free_rqs()
2366 set->ops->exit_request(set, rq, hctx_idx); in blk_mq_free_rqs()
2437 if (set->ops->init_request) { in blk_mq_init_request()
[all …]
Dblk-ioc.c50 if (et->ops.exit_icq) in ioc_exit_icq()
51 et->ops.exit_icq(icq); in ioc_exit_icq()
402 if (et->ops.init_icq) in ioc_create_icq()
403 et->ops.init_icq(icq); in ioc_create_icq()
Dbdev.c324 const struct block_device_operations *ops = bdev->bd_disk->fops; in bdev_read_page() local
327 if (!ops->rw_page || bdev_get_integrity(bdev)) in bdev_read_page()
333 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, in bdev_read_page()
362 const struct block_device_operations *ops = bdev->bd_disk->fops; in bdev_write_page() local
364 if (!ops->rw_page || bdev_get_integrity(bdev)) in bdev_write_page()
371 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, in bdev_write_page()
447 ctx->ops = &bdev_sops; in bd_init_fs_context()
Dblk-rq-qos.h29 struct rq_qos_ops *ops; member
109 if (rqos->ops->debugfs_attrs) in rq_qos_add()
Dblk-ioprio.c238 rqos->ops = &blkcg_ioprio_ops; in blk_ioprio_init()
Dbsg-lib.c382 set->ops = &bsg_mq_ops; in bsg_setup_queue()
Dblk-mq-debugfs.c970 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs) in blk_mq_debugfs_register_rqos()
980 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); in blk_mq_debugfs_register_rqos()
Dblk-wbt.c839 rwb->rqos.ops = &wbt_rqos_ops; in wbt_init()
Dblk-iolatency.c772 rqos->ops = &blkcg_iolatency_ops; in blk_iolatency_init()
Dkyber-iosched.c1013 .ops = {
Dmq-deadline.c1254 .ops = {
Dblk-iocost.c2887 rqos->ops = &ioc_rqos_ops; in blk_iocost_init()
Dbfq-iosched.c7299 .ops = {