Searched refs:ops (Results 1 – 14 of 14) sorted by relevance
/block/ |
D | blk-mq-sched.h | 53 if (e && e->type->ops.mq.allow_merge) in blk_mq_sched_allow_merge() 54 return e->type->ops.mq.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge() 63 if (e && e->type->ops.mq.completed_request) in blk_mq_sched_completed_request() 64 e->type->ops.mq.completed_request(rq); in blk_mq_sched_completed_request() 72 if (e && e->type->ops.mq.started_request) in blk_mq_sched_started_request() 73 e->type->ops.mq.started_request(rq); in blk_mq_sched_started_request() 81 if (e && e->type->ops.mq.requeue_request) in blk_mq_sched_requeue_request() 82 e->type->ops.mq.requeue_request(rq); in blk_mq_sched_requeue_request() 89 if (e && e->type->ops.mq.has_work) in blk_mq_sched_has_work() 90 return e->type->ops.mq.has_work(hctx); in blk_mq_sched_has_work()
|
D | elevator.c | 63 if (e->uses_mq && e->type->ops.mq.allow_merge) in elv_iosched_allow_bio_merge() 64 return e->type->ops.mq.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge() 65 else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn) in elv_iosched_allow_bio_merge() 66 return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio); in elv_iosched_allow_bio_merge() 256 err = e->ops.sq.elevator_init_fn(q, e); in elevator_init() 266 if (e->uses_mq && e->type->ops.mq.exit_sched) in elevator_exit() 268 else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn) in elevator_exit() 269 e->type->ops.sq.elevator_exit_fn(e); in elevator_exit() 480 if (e->uses_mq && e->type->ops.mq.request_merge) in elv_merge() 481 return e->type->ops.mq.request_merge(q, req, bio); in elv_merge() [all …]
|
D | blk-mq-sched.c | 96 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request; in blk_mq_sched_dispatch_requests() 143 rq = e->type->ops.mq.dispatch_request(hctx); in blk_mq_sched_dispatch_requests() 234 if (e && e->type->ops.mq.bio_merge) { in __blk_mq_sched_bio_merge() 236 return e->type->ops.mq.bio_merge(hctx, bio); in __blk_mq_sched_bio_merge() 374 if (e && e->type->ops.mq.insert_requests) { in blk_mq_sched_insert_request() 378 e->type->ops.mq.insert_requests(hctx, &list, at_head); in blk_mq_sched_insert_request() 414 if (e && e->type->ops.mq.insert_requests) in blk_mq_sched_insert_requests() 415 e->type->ops.mq.insert_requests(hctx, list, false); in blk_mq_sched_insert_requests() 475 if (e->type->ops.mq.init_hctx) { in blk_mq_sched_init_hctx() 476 ret = e->type->ops.mq.init_hctx(hctx, hctx_idx); in blk_mq_sched_init_hctx() [all …]
|
D | ioctl.c | 311 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_register() local 316 if (!ops || !ops->pr_register) in blkdev_pr_register() 323 return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags); in blkdev_pr_register() 329 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_reserve() local 334 if (!ops || !ops->pr_reserve) in blkdev_pr_reserve() 341 return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags); in blkdev_pr_reserve() 347 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_release() local 352 if (!ops || !ops->pr_release) in blkdev_pr_release() 359 return ops->pr_release(bdev, rsv.key, rsv.type); in blkdev_pr_release() 365 const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops; in blkdev_pr_preempt() local [all …]
|
D | blk-ioc.c | 51 if (et->uses_mq && et->ops.mq.exit_icq) in ioc_exit_icq() 52 et->ops.mq.exit_icq(icq); in ioc_exit_icq() 53 else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn) in ioc_exit_icq() 54 et->ops.sq.elevator_exit_icq_fn(icq); in ioc_exit_icq() 418 if (et->uses_mq && et->ops.mq.init_icq) in ioc_create_icq() 419 et->ops.mq.init_icq(icq); in ioc_create_icq() 420 else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn) in ioc_create_icq() 421 et->ops.sq.elevator_init_icq_fn(icq); in ioc_create_icq()
|
D | blk-mq.c | 378 if (!op_is_flush(op) && e->type->ops.mq.limit_depth) in blk_mq_get_request() 379 e->type->ops.mq.limit_depth(op, data); in blk_mq_get_request() 395 if (e && e->type->ops.mq.prepare_request) { in blk_mq_get_request() 399 e->type->ops.mq.prepare_request(rq, bio); in blk_mq_get_request() 488 if (e && e->type->ops.mq.finish_request) in blk_mq_free_request() 489 e->type->ops.mq.finish_request(rq); in blk_mq_free_request() 766 const struct blk_mq_ops *ops = req->q->mq_ops; in blk_mq_rq_timed_out() local 781 if (ops->timeout) in blk_mq_rq_timed_out() 782 ret = ops->timeout(req, reserved); in blk_mq_rq_timed_out() 1766 if (tags->rqs && set->ops->exit_request) { in blk_mq_free_rqs() [all …]
|
D | blk.h | 186 !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0)) in __elv_next_request() 195 if (e->type->ops.sq.elevator_activate_req_fn) in elv_activate_rq() 196 e->type->ops.sq.elevator_activate_req_fn(q, rq); in elv_activate_rq() 203 if (e->type->ops.sq.elevator_deactivate_req_fn) in elv_deactivate_rq() 204 e->type->ops.sq.elevator_deactivate_req_fn(q, rq); in elv_deactivate_rq()
|
D | noop-iosched.c | 95 .ops.sq = {
|
D | blk-merge.c | 808 if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn) in blk_attempt_req_merge() 809 if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next)) in blk_attempt_req_merge()
|
D | deadline-iosched.c | 437 .ops.sq = {
|
D | mq-deadline.c | 640 .ops.mq = {
|
D | kyber-iosched.c | 809 .ops.mq = {
|
D | bfq-iosched.c | 5042 .ops.mq = {
|
D | cfq-iosched.c | 4884 .ops.sq = {
|