/kernel/linux/linux-5.10/block/ |
D | blk-merge.c | 550 if (req_op(rq) == REQ_OP_DISCARD) in blk_rq_get_max_segments() 565 if (req_op(req) == REQ_OP_DISCARD) in ll_new_hw_segment() 705 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_request() 733 if (req_op(req) != req_op(next)) in attempt_merge() 740 if (req_op(req) == REQ_OP_WRITE_SAME && in attempt_merge() 857 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok() 881 if (req_op(rq) == REQ_OP_WRITE_SAME && in blk_rq_merge_ok() 915 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_bio()
|
D | blk-core.c | 233 blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)), in print_req_error() 250 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) { in req_bio_endio() 1142 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); in blk_cloned_rq_check_limits() 1270 const int sgrp = op_stat_group(req_op(req)); in blk_account_io_completion() 1289 const int sgrp = op_stat_group(req_op(req)); in blk_account_io_done() 1435 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && in blk_update_request()
|
D | blk-map.c | 161 bio->bi_opf |= req_op(rq); in bio_copy_user_iov() 257 bio->bi_opf |= req_op(rq); in bio_map_user_iov() 706 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
|
D | blk-zoned.c | 74 switch (req_op(rq)) { in blk_req_needs_zone_write_lock()
|
/kernel/linux/linux-5.10/include/linux/ |
D | blkdev.h | 267 return blk_op_is_scsi(req_op(rq)); in blk_rq_is_scsi() 272 return blk_op_is_private(req_op(rq)); in blk_rq_is_private() 692 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 695 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) 822 if (req_op(rq) == REQ_OP_FLUSH) in rq_mergeable() 825 if (req_op(rq) == REQ_OP_WRITE_ZEROES) in rq_mergeable() 828 if (req_op(rq) == REQ_OP_ZONE_APPEND) in rq_mergeable() 1122 req_op(rq) == REQ_OP_DISCARD || in blk_rq_get_max_sectors() 1123 req_op(rq) == REQ_OP_SECURE_ERASE) in blk_rq_get_max_sectors() 1124 return blk_queue_get_max_sectors(q, req_op(rq)); in blk_rq_get_max_sectors() [all …]
|
D | blk_types.h | 449 #define req_op(req) \ macro
|
/kernel/linux/linux-5.10/drivers/block/rnbd/ |
D | rnbd-proto.h | 267 switch (req_op(rq)) { in rq_to_rnbd_flags() 288 req_op(rq), (unsigned long long)rq->cmd_flags); in rq_to_rnbd_flags()
|
/kernel/linux/linux-5.10/drivers/crypto/hisilicon/sec2/ |
D | sec_crypto.c | 189 ctx->req_op->buf_unmap(ctx, req); in sec_req_cb() 191 ctx->req_op->callback(ctx, req, err); in sec_req_cb() 953 ret = ctx->req_op->buf_map(ctx, req); in sec_request_transfer() 957 ctx->req_op->do_transfer(ctx, req); in sec_request_transfer() 959 ret = ctx->req_op->bd_fill(ctx, req); in sec_request_transfer() 966 ctx->req_op->buf_unmap(ctx, req); in sec_request_transfer() 973 ctx->req_op->buf_unmap(ctx, req); in sec_request_untransfer() 1257 ret = ctx->req_op->bd_send(ctx, req); in sec_process() 1308 ctx->req_op = &sec_skcipher_req_ops; in sec_skcipher_ctx_init() 1331 ctx->req_op = &sec_aead_req_ops; in sec_aead_init() [all …]
|
D | sec.h | 122 const struct sec_req_op *req_op; member
|
/kernel/linux/linux-5.10/drivers/scsi/ |
D | sd_zbc.c | 417 switch (req_op(rq)) { in sd_zbc_need_zone_wp_update() 447 enum req_opf op = req_op(rq); in sd_zbc_zone_wp_update() 519 if (op_is_zone_mgmt(req_op(rq)) && in sd_zbc_complete() 532 if (req_op(rq) == REQ_OP_ZONE_APPEND) in sd_zbc_complete()
|
/kernel/linux/linux-5.10/drivers/block/ |
D | null_blk_trace.h | 44 __entry->op = req_op(cmd->rq);
|
D | xen-blkfront.c | 578 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard) in blkif_queue_discard_req() 783 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA); in blkif_queue_rw_req() 795 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) { in blkif_queue_rw_req() 884 if (unlikely(req_op(req) == REQ_OP_DISCARD || in blkif_queue_request() 885 req_op(req) == REQ_OP_SECURE_ERASE)) in blkif_queue_request() 905 ((req_op(req) == REQ_OP_FLUSH) && in blkif_request_flush_invalid() 2182 if (req_op(shadow[j].request) == REQ_OP_FLUSH || in blkfront_resume() 2183 req_op(shadow[j].request) == REQ_OP_DISCARD || in blkfront_resume() 2184 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || in blkfront_resume()
|
D | ps3disk.c | 183 switch (req_op(req)) { in ps3disk_do_request() 246 if (req_op(req) == REQ_OP_FLUSH) { in ps3disk_interrupt()
|
D | null_blk_main.c | 1158 if (req_op(rq) == REQ_OP_DISCARD) { in null_handle_rq() 1167 op_is_write(req_op(rq)), sector, in null_handle_rq() 1289 } else if (req_op(cmd->rq) == REQ_OP_READ) { in nullb_zero_read_cmd_buffer() 1509 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); in null_queue_rq()
|
D | loop.c | 463 req_op(rq) != REQ_OP_READ) { in lo_complete_rq() 600 switch (req_op(rq)) { in do_req_filebacked() 2009 switch (req_op(rq)) { in loop_queue_rq() 2036 const bool write = op_is_write(req_op(rq)); in loop_handle_cmd()
|
/kernel/linux/linux-5.10/drivers/mmc/core/ |
D | queue.c | 48 switch (req_op(req)) { in mmc_cqe_issue_type() 68 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) in mmc_issue_type()
|
/kernel/linux/linux-5.10/drivers/md/ |
D | dm-rq.c | 218 if (req_op(clone) == REQ_OP_DISCARD && in dm_done() 221 else if (req_op(clone) == REQ_OP_WRITE_SAME && in dm_done() 224 else if (req_op(clone) == REQ_OP_WRITE_ZEROES && in dm_done()
|
/kernel/linux/linux-5.10/arch/um/drivers/ |
D | ubd_kern.c | 530 if ((io_req->error == BLK_STS_NOTSUPP) && (req_op(io_req->req) == REQ_OP_DISCARD)) { in ubd_handler() 1305 if (req_op(req->req) == REQ_OP_READ) { in cowify_req() 1325 int op = req_op(req); in ubd_map_req() 1389 int op = req_op(req); in ubd_submit_request() 1426 switch (req_op(req)) { in ubd_queue_rq() 1544 if (req_op(req->req) == REQ_OP_FLUSH) { in do_io() 1565 switch (req_op(req->req)) { in do_io()
|
/kernel/linux/linux-5.10/drivers/mtd/ |
D | mtd_blkdevs.c | 75 if (req_op(req) == REQ_OP_FLUSH) { in do_blktrans_request() 85 switch (req_op(req)) { in do_blktrans_request()
|
/kernel/linux/linux-5.10/drivers/ide/ |
D | ide-cd.c | 317 if (req_op(rq) == REQ_OP_WRITE) { in cdrom_decode_status() 667 switch (req_op(rq)) { in cdrom_newpc_intr() 745 switch (req_op(rq)) { in cdrom_newpc_intr() 889 switch (req_op(rq)) { in ide_cd_do_request()
|
D | ide-floppy.c | 259 switch (req_op(rq)) { in ide_floppy_do_request()
|
/kernel/linux/linux-5.10/drivers/nvme/host/ |
D | zns.c | 253 if (req_op(req) == REQ_OP_ZONE_RESET_ALL) in nvme_setup_zone_mgmt_send()
|
/kernel/linux/linux-5.10/drivers/s390/block/ |
D | dasd_fba.c | 569 if (req_op(req) == REQ_OP_DISCARD || req_op(req) == REQ_OP_WRITE_ZEROES) in dasd_fba_build_cp()
|
/kernel/linux/linux-5.10/drivers/block/paride/ |
D | pd.c | 487 switch (req_op(pd_req)) { in do_pd_io_start() 500 if (req_op(pd_req) == REQ_OP_READ) in do_pd_io_start()
|
/kernel/linux/linux-5.10/drivers/nvme/target/ |
D | passthru.c | 205 bio->bi_opf = req_op(rq) | op_flags; in nvmet_passthru_map_sg()
|