/block/ |
D | blk-zoned.c | 156 int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, in blkdev_zone_mgmt() argument 173 if (!op_is_zone_mgmt(op)) in blkdev_zone_mgmt() 195 if (op == REQ_OP_ZONE_RESET && in blkdev_zone_mgmt() 201 bio->bi_opf = op; in blkdev_zone_mgmt() 284 enum req_opf op; in blkdev_zone_mgmt_ioctl() local 307 op = REQ_OP_ZONE_RESET; in blkdev_zone_mgmt_ioctl() 310 op = REQ_OP_ZONE_OPEN; in blkdev_zone_mgmt_ioctl() 313 op = REQ_OP_ZONE_CLOSE; in blkdev_zone_mgmt_ioctl() 316 op = REQ_OP_ZONE_FINISH; in blkdev_zone_mgmt_ioctl() 322 return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, in blkdev_zone_mgmt_ioctl()
|
D | bfq-cgroup.c | 223 unsigned int op) in bfqg_stats_update_io_add() argument 225 blkg_rwstat_add(&bfqg->stats.queued, op, 1); in bfqg_stats_update_io_add() 231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) in bfqg_stats_update_io_remove() argument 233 blkg_rwstat_add(&bfqg->stats.queued, op, -1); in bfqg_stats_update_io_remove() 236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) in bfqg_stats_update_io_merged() argument 238 blkg_rwstat_add(&bfqg->stats.merged, op, 1); in bfqg_stats_update_io_merged() 242 u64 io_start_time_ns, unsigned int op) in bfqg_stats_update_completion() argument 248 blkg_rwstat_add(&stats->service_time, op, in bfqg_stats_update_completion() 251 blkg_rwstat_add(&stats->wait_time, op, in bfqg_stats_update_completion() 258 unsigned int op) { } in bfqg_stats_update_io_add() argument [all …]
|
D | blk-lib.c | 31 unsigned int op; in __blkdev_issue_discard() local 43 op = REQ_OP_SECURE_ERASE; in __blkdev_issue_discard() 47 op = REQ_OP_DISCARD; in __blkdev_issue_discard() 66 bio_set_op_attrs(bio, op, 0); in __blkdev_issue_discard()
|
D | blk-mq-debugfs.c | 145 char opbuf[16] = { }, *op; in queue_state_write() local 162 op = strstrip(opbuf); in queue_state_write() 163 if (strcmp(op, "run") == 0) { in queue_state_write() 165 } else if (strcmp(op, "start") == 0) { in queue_state_write() 167 } else if (strcmp(op, "kick") == 0) { in queue_state_write() 170 pr_err("%s: unsupported operation '%s'\n", __func__, op); in queue_state_write() 327 const unsigned int op = req_op(rq); in __blk_mq_debugfs_rq_show() local 328 const char *op_str = blk_op_str(op); in __blk_mq_debugfs_rq_show() 332 seq_printf(m, "%u", op); in __blk_mq_debugfs_rq_show()
|
D | kyber-iosched.c | 195 static unsigned int kyber_sched_domain(unsigned int op) in kyber_sched_domain() argument 197 switch (op & REQ_OP_MASK) { in kyber_sched_domain() 552 static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) in kyber_limit_depth() argument 558 if (!op_is_sync(op)) { in kyber_limit_depth() 887 #define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_stor… argument
|
D | bfq-iosched.h | 960 unsigned int op); 961 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op); 962 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op); 964 u64 io_start_time_ns, unsigned int op);
|
D | blk-core.c | 156 inline const char *blk_op_str(unsigned int op) in blk_op_str() argument 160 if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op]) in blk_op_str() 161 op_str = blk_op_name[op]; in blk_op_str() 581 struct request *blk_get_request(struct request_queue *q, unsigned int op, in blk_get_request() argument 586 WARN_ON_ONCE(op & REQ_NOWAIT); in blk_get_request() 589 req = blk_mq_alloc_request(q, op, flags); in blk_get_request() 793 const int op = bio_op(bio); in bio_check_ro() local 795 if (part->policy && op_is_write(op)) { in bio_check_ro()
|
D | compat_ioctl.c | 178 compat_int_t op; member 192 err = get_user(n, &ua32->op); in compat_blkpg_ioctl() 193 err |= put_user(n, &a->op); in compat_blkpg_ioctl()
|
D | blk-wbt.c | 674 const int op = req_op(rq); in wbt_data_dir() local 676 if (op == REQ_OP_READ) in wbt_data_dir() 678 else if (op_is_write(op)) in wbt_data_dir()
|
D | blk-mq.c | 295 unsigned int tag, unsigned int op, u64 alloc_time_ns) in blk_mq_rq_ctx_init() argument 319 rq->cmd_flags = op; in blk_mq_rq_ctx_init() 351 data->ctx->rq_dispatched[op_is_sync(op)]++; in blk_mq_rq_ctx_init() 422 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, in blk_mq_alloc_request() argument 425 struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op }; in blk_mq_alloc_request() 447 unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx) in blk_mq_alloc_request_hctx() argument 449 struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op }; in blk_mq_alloc_request_hctx()
|
D | blk-throttle.c | 2228 int op, unsigned long time) in throtl_track_latency() argument 2234 !(op == REQ_OP_READ || op == REQ_OP_WRITE) || in throtl_track_latency() 2240 latency = get_cpu_ptr(td->latency_buckets[op]); in throtl_track_latency() 2243 put_cpu_ptr(td->latency_buckets[op]); in throtl_track_latency()
|
D | bio.c | 1772 void generic_start_io_acct(struct request_queue *q, int op, in generic_start_io_acct() argument 1775 const int sgrp = op_stat_group(op); in generic_start_io_acct() 1782 part_inc_in_flight(q, part, op_is_write(op)); in generic_start_io_acct()
|
D | ioctl.c | 37 switch (a.op) { in blkpg_ioctl()
|
D | bfq-iosched.c | 535 static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) in bfq_limit_depth() argument 539 if (op_is_sync(op) && !op_is_write(op)) in bfq_limit_depth() 543 bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)]; in bfq_limit_depth() 546 __func__, bfqd->wr_busy_queues, op_is_sync(op), in bfq_limit_depth()
|