• Home
  • Raw
  • Download

Lines Matching refs:opcode

865 	u8				opcode;  member
1437 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_prep_async_work()
2070 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { in io_kill_linked_timeout()
2116 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { in io_disarm_next()
3020 if (req->opcode == IORING_OP_READ_FIXED || in io_prep_rw()
3021 req->opcode == IORING_OP_WRITE_FIXED) { in io_prep_rw()
3326 u8 opcode = req->opcode; in io_import_iovec() local
3329 if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { in io_import_iovec()
3338 if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { in io_import_iovec()
3460 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); in io_alloc_async_data()
3461 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); in io_alloc_async_data()
3469 if (!force && !io_op_defs[req->opcode].needs_async_setup) in io_setup_async_rw()
5546 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_double()
5553 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_single()
5744 if (req->opcode == IORING_OP_POLL_ADD) in __io_poll_execute()
5749 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); in __io_poll_execute()
5952 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_arm_poll_handler()
5968 if ((req->opcode == IORING_OP_RECVMSG) && in io_arm_poll_handler()
5997 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data, in io_arm_poll_handler()
6041 if (poll_only && req->opcode != IORING_OP_POLL_ADD) in io_poll_find()
6433 if (link->last->opcode == IORING_OP_LINK_TIMEOUT) in io_timeout_prep()
6639 switch (req->opcode) { in io_req_prep()
6717 req->opcode); in io_req_prep()
6723 if (!io_op_defs[req->opcode].needs_async_setup) in io_req_prep_async()
6730 switch (req->opcode) { in io_req_prep_async()
6743 req->opcode); in io_req_prep_async()
6835 switch (req->opcode) { in io_clean_op()
6849 switch (req->opcode) { in io_clean_op()
6918 switch (req->opcode) { in io_issue_sqe()
7323 if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) in io_check_restriction()
7346 req->opcode = READ_ONCE(sqe->opcode); in io_init_req()
7357 if (unlikely(req->opcode >= IORING_OP_LAST)) in io_init_req()
7363 !io_op_defs[req->opcode].buffer_select) in io_init_req()
7383 io_op_defs[req->opcode].plug) { in io_init_req()
7388 if (io_op_defs[req->opcode].needs_file) { in io_init_req()
7439 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data, in io_submit_sqe()
10416 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, in __io_uring_show_fdinfo()
10782 switch (res[i].opcode) { in io_register_restrictions()
11094 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, in __io_uring_register() argument
11110 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST); in __io_uring_register()
11111 if (!test_bit(opcode, ctx->restrictions.register_op)) in __io_uring_register()
11115 if (io_register_op_must_quiesce(opcode)) { in __io_uring_register()
11121 switch (opcode) { in __io_uring_register()
11151 if (opcode == IORING_REGISTER_EVENTFD_ASYNC) in __io_uring_register()
11226 if (io_register_op_must_quiesce(opcode)) { in __io_uring_register()
11234 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, in SYSCALL_DEFINE4() argument
11241 if (opcode >= IORING_REGISTER_LAST) in SYSCALL_DEFINE4()
11257 ret = __io_uring_register(ctx, opcode, arg, nr_args); in SYSCALL_DEFINE4()
11259 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, in SYSCALL_DEFINE4()
11276 BUILD_BUG_SQE_ELEM(0, __u8, opcode); in io_uring_init()