Home
last modified time | relevance | path

Searched refs:op (Results 1 – 15 of 15) sorted by relevance

/block/
Dkyber-iosched.c108 unsigned int op = rq->cmd_flags; in rq_sched_domain() local
110 if ((op & REQ_OP_MASK) == REQ_OP_READ) in rq_sched_domain()
112 else if ((op & REQ_OP_MASK) == REQ_OP_WRITE && op_is_sync(op)) in rq_sched_domain()
429 static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) in kyber_limit_depth() argument
435 if (!op_is_sync(op)) { in kyber_limit_depth()
647 #define KYBER_LAT_SHOW_STORE(op) \ argument
648 static ssize_t kyber_##op##_lat_show(struct elevator_queue *e, \
653 return sprintf(page, "%llu\n", kqd->op##_lat_nsec); \
656 static ssize_t kyber_##op##_lat_store(struct elevator_queue *e, \
667 kqd->op##_lat_nsec = nsec; \
[all …]
Dbfq-cgroup.c233 unsigned int op) in bfqg_stats_update_io_add() argument
235 blkg_rwstat_add(&bfqg->stats.queued, op, 1); in bfqg_stats_update_io_add()
241 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) in bfqg_stats_update_io_remove() argument
243 blkg_rwstat_add(&bfqg->stats.queued, op, -1); in bfqg_stats_update_io_remove()
246 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) in bfqg_stats_update_io_merged() argument
248 blkg_rwstat_add(&bfqg->stats.merged, op, 1); in bfqg_stats_update_io_merged()
252 uint64_t io_start_time, unsigned int op) in bfqg_stats_update_completion() argument
258 blkg_rwstat_add(&stats->service_time, op, in bfqg_stats_update_completion()
261 blkg_rwstat_add(&stats->wait_time, op, in bfqg_stats_update_completion()
1156 unsigned int op) { } in bfqg_stats_update_io_add() argument
[all …]
Dblk-mq-debugfs.c95 char opbuf[16] = { }, *op; in queue_state_write() local
112 op = strstrip(opbuf); in queue_state_write()
113 if (strcmp(op, "run") == 0) { in queue_state_write()
115 } else if (strcmp(op, "start") == 0) { in queue_state_write()
117 } else if (strcmp(op, "kick") == 0) { in queue_state_write()
120 pr_err("%s: unsupported operation '%s'\n", __func__, op); in queue_state_write()
307 const unsigned int op = rq->cmd_flags & REQ_OP_MASK; in __blk_mq_debugfs_rq_show() local
310 if (op < ARRAY_SIZE(op_name) && op_name[op]) in __blk_mq_debugfs_rq_show()
311 seq_printf(m, "%s", op_name[op]); in __blk_mq_debugfs_rq_show()
313 seq_printf(m, "%d", op); in __blk_mq_debugfs_rq_show()
Dblk-wbt.c563 const int op = bio_op(bio); in wbt_should_throttle() local
568 if (op != REQ_OP_WRITE) in wbt_should_throttle()
701 const int op = req_op(rq); in wbt_data_dir() local
703 if (op == REQ_OP_READ) in wbt_data_dir()
705 else if (op == REQ_OP_WRITE || op == REQ_OP_FLUSH) in wbt_data_dir()
Dblk-lib.c33 unsigned int op; in __blkdev_issue_discard() local
43 op = REQ_OP_SECURE_ERASE; in __blkdev_issue_discard()
47 op = REQ_OP_DISCARD; in __blkdev_issue_discard()
90 bio_set_op_attrs(bio, op, 0); in __blkdev_issue_discard()
Dbfq-iosched.h837 unsigned int op);
838 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op);
839 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op);
841 uint64_t io_start_time, unsigned int op);
Dblk-core.c1183 static struct request *__get_request(struct request_list *rl, unsigned int op, in __get_request() argument
1191 const bool is_sync = op_is_sync(op); in __get_request()
1200 may_queue = elv_may_queue(q, op); in __get_request()
1255 if (!op_is_flush(op) && !blk_queue_bypass(q)) { in __get_request()
1273 rq->cmd_flags = op; in __get_request()
1303 trace_block_getrq(q, bio, op); in __get_request()
1362 static struct request *get_request(struct request_queue *q, unsigned int op, in get_request() argument
1365 const bool is_sync = op_is_sync(op); in get_request()
1375 rq = __get_request(rl, op, bio, gfp_mask); in get_request()
1379 if (op & REQ_NOWAIT) { in get_request()
[all …]
Dbsg.c180 bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op) in bsg_validate_sgv4_hdr() argument
201 *op = hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN; in bsg_validate_sgv4_hdr()
214 unsigned int op, dxfer_len; in bsg_map_hdr() local
229 ret = bsg_validate_sgv4_hdr(hdr, &op); in bsg_map_hdr()
236 rq = blk_get_request(q, op, GFP_KERNEL); in bsg_map_hdr()
244 if (op == REQ_OP_SCSI_OUT && hdr->din_xfer_len) { in bsg_map_hdr()
Dcompat_ioctl.c178 compat_int_t op; member
192 err = get_user(n, &ua32->op); in compat_blkpg_ioctl()
193 err |= put_user(n, &a->op); in compat_blkpg_ioctl()
DKconfig.iosched10 The no-op I/O scheduler is a minimal scheduler that does basic merging
58 bool "No-op"
Dblk-mq.c295 unsigned int tag, unsigned int op) in blk_mq_rq_ctx_init() argument
319 rq->cmd_flags = op; in blk_mq_rq_ctx_init()
349 data->ctx->rq_dispatched[op_is_sync(op)]++; in blk_mq_rq_ctx_init()
354 struct bio *bio, unsigned int op, in blk_mq_get_request() argument
368 if (op & REQ_NOWAIT) in blk_mq_get_request()
378 if (!op_is_flush(op) && e->type->ops.mq.limit_depth) in blk_mq_get_request()
379 e->type->ops.mq.limit_depth(op, data); in blk_mq_get_request()
392 rq = blk_mq_rq_ctx_init(data, tag, op); in blk_mq_get_request()
393 if (!op_is_flush(op)) { in blk_mq_get_request()
407 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, in blk_mq_alloc_request() argument
[all …]
Dcfq-iosched.c677 unsigned int op) in cfqg_stats_update_io_add() argument
679 blkg_rwstat_add(&cfqg->stats.queued, op, 1); in cfqg_stats_update_io_add()
694 unsigned int op) in cfqg_stats_update_io_remove() argument
696 blkg_rwstat_add(&cfqg->stats.queued, op, -1); in cfqg_stats_update_io_remove()
700 unsigned int op) in cfqg_stats_update_io_merged() argument
702 blkg_rwstat_add(&cfqg->stats.merged, op, 1); in cfqg_stats_update_io_merged()
707 unsigned int op) in cfqg_stats_update_completion() argument
713 blkg_rwstat_add(&stats->service_time, op, now - io_start_time); in cfqg_stats_update_completion()
715 blkg_rwstat_add(&stats->wait_time, op, in cfqg_stats_update_completion()
794 struct cfq_group *curr_cfqg, unsigned int op) { } in cfqg_stats_update_io_add() argument
[all …]
Delevator.c779 int elv_may_queue(struct request_queue *q, unsigned int op) in elv_may_queue() argument
787 return e->type->ops.sq.elevator_may_queue_fn(q, op); in elv_may_queue()
Dioctl.c36 switch (a.op) { in blkpg_ioctl()
Dblk-throttle.c2237 int op, unsigned long time) in throtl_track_latency() argument
2242 if (!td || td->limit_index != LIMIT_LOW || op != REQ_OP_READ || in throtl_track_latency()