Lines Matching refs:flags
336 unsigned int flags; member
523 u32 flags; member
530 int flags; member
539 int flags; member
565 u32 flags; member
641 unsigned int flags; member
657 unsigned int flags; member
673 int flags; member
679 int flags; member
703 int flags; member
883 unsigned int flags; member
1159 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_inc_not_zero()
1165 if (likely(!(req->flags & REQ_F_REFCOUNT))) in req_ref_put_and_test()
1174 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_get()
1181 if (!(req->flags & REQ_F_REFCOUNT)) { in __io_req_set_refcount()
1182 req->flags |= REQ_F_REFCOUNT; in __io_req_set_refcount()
1226 if (req->flags & REQ_F_INFLIGHT) in io_match_task()
1237 if (req->flags & REQ_F_INFLIGHT) in io_match_linked()
1257 if (head->flags & REQ_F_LINK_TIMEOUT) { in io_match_task_safe()
1272 req->flags |= REQ_F_FAIL; in req_set_fail()
1348 ctx->flags = p->flags; in io_ring_ctx_alloc()
1390 if (unlikely(req->flags & REQ_F_IO_DRAIN)) { in req_need_defer()
1410 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); in io_req_ffs_set()
1415 if (!(req->flags & REQ_F_INFLIGHT)) { in io_req_track_inflight()
1416 req->flags |= REQ_F_INFLIGHT; in io_req_track_inflight()
1426 req->flags &= ~REQ_F_ARM_LTIMEOUT; in __io_prep_linked_timeout()
1427 req->flags |= REQ_F_LINK_TIMEOUT; in __io_prep_linked_timeout()
1437 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) in io_prep_linked_timeout()
1447 if (!(req->flags & REQ_F_CREDS)) { in io_prep_async_work()
1448 req->flags |= REQ_F_CREDS; in io_prep_async_work()
1453 req->work.flags = 0; in io_prep_async_work()
1454 if (req->flags & REQ_F_FORCE_ASYNC) in io_prep_async_work()
1455 req->work.flags |= IO_WQ_WORK_CONCURRENT; in io_prep_async_work()
1457 if (req->flags & REQ_F_ISREG) { in io_prep_async_work()
1458 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL)) in io_prep_async_work()
1462 req->work.flags |= IO_WQ_WORK_UNBOUND; in io_prep_async_work()
1470 if (req->flags & REQ_F_LINK_TIMEOUT) { in io_prep_async_link()
1506 req->work.flags |= IO_WQ_WORK_CANCEL; in io_queue_async_work()
1509 &req->work, req->flags); in io_queue_async_work()
1674 if (ctx->flags & IORING_SETUP_SQPOLL) { in io_cqring_ev_posted_iopoll()
1735 if (ctx->flags & IORING_SETUP_IOPOLL) in io_cqring_overflow_flush()
1738 if (ctx->flags & IORING_SETUP_IOPOLL) in io_cqring_overflow_flush()
1813 ocqe->cqe.flags = cflags; in io_cqring_event_overflow()
1834 WRITE_ONCE(cqe->flags, cflags); in __io_fill_cqe()
1864 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_req_complete_post()
1865 if (req->flags & IO_DISARM_MASK) in io_req_complete_post()
1891 return req->flags & IO_REQ_CLEAN_FLAGS; in io_req_needs_clean()
1901 req->flags |= REQ_F_COMPLETE_INLINE; in io_req_complete_state()
1930 req->flags &= ~REQ_F_HARDLINK; in io_req_complete_fail_submit()
1931 req->flags |= REQ_F_LINK; in io_req_complete_fail_submit()
2034 unsigned int flags = req->flags; in io_dismantle_req() local
2038 if (!(flags & REQ_F_FIXED_FILE)) in io_dismantle_req()
2101 if (link->flags & REQ_F_FAIL) in io_fail_links()
2119 if (req->flags & REQ_F_ARM_LTIMEOUT) { in io_disarm_next()
2122 req->flags &= ~REQ_F_ARM_LTIMEOUT; in io_disarm_next()
2129 } else if (req->flags & REQ_F_LINK_TIMEOUT) { in io_disarm_next()
2136 if (unlikely((req->flags & REQ_F_FAIL) && in io_disarm_next()
2137 !(req->flags & REQ_F_HARDLINK))) { in io_disarm_next()
2154 if (req->flags & IO_DISARM_MASK) { in __io_req_find_next()
2173 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) in io_req_find_next()
2248 unsigned long flags; in io_req_task_work_add() local
2253 spin_lock_irqsave(&tctx->task_lock, flags); in io_req_task_work_add()
2258 spin_unlock_irqrestore(&tctx->task_lock, flags); in io_req_task_work_add()
2270 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL; in io_req_task_work_add()
2276 spin_lock_irqsave(&tctx->task_lock, flags); in io_req_task_work_add()
2280 spin_unlock_irqrestore(&tctx->task_lock, flags); in io_req_task_work_add()
2306 if (likely(!(req->task->flags & PF_EXITING))) in io_req_task_submit()
2473 req->flags &= ~REQ_F_BUFFER_SELECTED; in io_put_kbuf()
2482 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) in io_put_rw_kbuf()
2494 if (current->flags & PF_IO_WORKER && in io_run_task_work()
2534 WRITE_ONCE(cqe->flags, cflags); in io_iopoll_complete()
2608 if (!(ctx->flags & IORING_SETUP_IOPOLL)) in io_iopoll_try_reap_events()
2695 if (req->flags & REQ_F_ISREG) { in kiocb_end_write()
2721 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
2722 !(ctx->flags & IORING_SETUP_IOPOLL))) in io_rw_should_reissue()
2776 req->flags |= REQ_F_REISSUE; in __io_complete_rw_common()
2842 req->flags |= REQ_F_REISSUE; in io_complete_rw_iopoll()
2908 if ((ctx->flags & IORING_SETUP_SQPOLL) && in io_iopoll_req_issued()
2961 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ)) in io_file_supports_nowait()
2963 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE)) in io_file_supports_nowait()
2979 req->flags |= REQ_F_ISREG; in io_prep_rw()
2995 req->flags |= REQ_F_NOWAIT; in io_prep_rw()
3007 if (ctx->flags & IORING_SETUP_IOPOLL) { in io_prep_rw()
3071 req->flags |= REQ_F_CUR_POS; in io_kiocb_update_pos()
3085 if (req->flags & REQ_F_CUR_POS) in kiocb_done()
3102 if (req->flags & REQ_F_REISSUE) { in kiocb_done()
3103 req->flags &= ~REQ_F_REISSUE; in kiocb_done()
3212 if (req->flags & REQ_F_BUFFER_SELECTED) in io_buffer_select()
3252 req->flags |= REQ_F_BUFFER_SELECTED; in io_rw_buffer_select()
3307 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_iov_buffer_select()
3340 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) in io_import_iovec()
3344 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3356 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3459 req->flags |= REQ_F_NEED_CLEANUP; in io_req_map_rw()
3507 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_prep_async()
3567 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
3583 wait->wait.flags = 0; in io_rw_should_retry()
3603 return req->flags & REQ_F_ISREG || in need_read_all()
3659 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { in io_read()
3660 req->flags &= ~REQ_F_REISSUE; in io_read()
3662 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_read()
3665 if (req->flags & REQ_F_NOWAIT) in io_read()
3671 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { in io_read()
3783 (req->flags & REQ_F_ISREG)) in io_write()
3799 if (req->flags & REQ_F_ISREG) { in io_write()
3813 if (req->flags & REQ_F_REISSUE) { in io_write()
3814 req->flags &= ~REQ_F_REISSUE; in io_write()
3825 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
3829 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) in io_write()
3857 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_renameat_prep()
3861 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_renameat_prep()
3868 ren->flags = READ_ONCE(sqe->rename_flags); in io_renameat_prep()
3880 req->flags |= REQ_F_NEED_CLEANUP; in io_renameat_prep()
3893 ren->newpath, ren->flags); in io_renameat()
3895 req->flags &= ~REQ_F_NEED_CLEANUP; in io_renameat()
3908 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_unlinkat_prep()
3913 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_unlinkat_prep()
3918 un->flags = READ_ONCE(sqe->unlink_flags); in io_unlinkat_prep()
3919 if (un->flags & ~AT_REMOVEDIR) in io_unlinkat_prep()
3927 req->flags |= REQ_F_NEED_CLEANUP; in io_unlinkat_prep()
3939 if (un->flags & AT_REMOVEDIR) in io_unlinkat()
3944 req->flags &= ~REQ_F_NEED_CLEANUP; in io_unlinkat()
3955 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_shutdown_prep()
3997 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_splice_prep()
4001 sp->flags = READ_ONCE(sqe->splice_flags); in __io_splice_prep()
4002 if (unlikely(sp->flags & ~valid_flags)) in __io_splice_prep()
4020 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED; in io_tee() local
4028 (sp->flags & SPLICE_F_FD_IN_FIXED), issue_flags); in io_tee()
4035 ret = do_tee(in, out, sp->len, flags); in io_tee()
4037 if (!(sp->flags & SPLICE_F_FD_IN_FIXED)) in io_tee()
4059 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED; in io_splice() local
4068 (sp->flags & SPLICE_F_FD_IN_FIXED), issue_flags); in io_splice()
4078 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags); in io_splice()
4080 if (!(sp->flags & SPLICE_F_FD_IN_FIXED)) in io_splice()
4096 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) in io_nop()
4107 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) in io_fsync_prep()
4113 req->sync.flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep()
4114 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC)) in io_fsync_prep()
4133 req->sync.flags & IORING_FSYNC_DATASYNC); in io_fsync()
4146 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fallocate_prep()
4177 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_openat_prep()
4181 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in __io_openat_prep()
4185 if (!(req->open.how.flags & O_PATH) && force_o_largefile()) in __io_openat_prep()
4186 req->open.how.flags |= O_LARGEFILE; in __io_openat_prep()
4198 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC)) in __io_openat_prep()
4202 req->flags |= REQ_F_NEED_CLEANUP; in __io_openat_prep()
4209 u64 flags = READ_ONCE(sqe->open_flags); in io_openat_prep() local
4211 req->open.how = build_open_how(flags, mode); in io_openat_prep()
4254 if (req->open.how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE)) in io_openat2()
4261 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); in io_openat2()
4295 req->flags &= ~REQ_F_NEED_CLEANUP; in io_openat2()
4477 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_epoll_ctl_prep()
4523 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_madvise_prep()
4558 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fadvise_prep()
4592 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_statx_prep()
4596 if (req->flags & REQ_F_FIXED_FILE) in io_statx_prep()
4603 req->statx.flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
4616 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask, in io_statx()
4627 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_close_prep()
4632 if (req->flags & REQ_F_FIXED_FILE) in io_close_prep()
4698 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) in io_sfr_prep()
4706 req->sync.flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep()
4719 req->sync.flags); in io_sync_file_range()
4727 static bool io_net_retry(struct socket *sock, int flags) in io_net_retry() argument
4729 if (!(flags & MSG_WAITALL)) in io_net_retry()
4746 req->flags |= REQ_F_NEED_CLEANUP; in io_setup_async_msg()
4780 req->flags |= REQ_F_NEED_CLEANUP; in io_sendmsg_prep_async()
4788 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_sendmsg_prep()
4799 req->flags |= REQ_F_NOWAIT; in io_sendmsg_prep()
4814 unsigned flags; in io_sendmsg() local
4832 flags = req->sr_msg.msg_flags; in io_sendmsg()
4834 flags |= MSG_DONTWAIT; in io_sendmsg()
4835 if (flags & MSG_WAITALL) in io_sendmsg()
4838 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); in io_sendmsg()
4845 if (ret > 0 && io_net_retry(sock, flags)) { in io_sendmsg()
4847 req->flags |= REQ_F_PARTIAL_IO; in io_sendmsg()
4855 req->flags &= ~REQ_F_NEED_CLEANUP; in io_sendmsg()
4870 unsigned flags; in io_send() local
4887 flags = req->sr_msg.msg_flags; in io_send()
4889 flags |= MSG_DONTWAIT; in io_send()
4890 if (flags & MSG_WAITALL) in io_send()
4893 msg.msg_flags = flags; in io_send()
4900 if (ret > 0 && io_net_retry(sock, flags)) { in io_send()
4904 req->flags |= REQ_F_PARTIAL_IO; in io_send()
4930 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_recvmsg_copy_hdr()
4965 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_compat_recvmsg_copy_hdr()
5015 req->flags |= REQ_F_BUFFER_SELECTED; in io_recv_buffer_select()
5030 req->flags |= REQ_F_NEED_CLEANUP; in io_recvmsg_prep_async()
5038 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_recvmsg_prep()
5050 req->flags |= REQ_F_NOWAIT; in io_recvmsg_prep()
5066 unsigned flags; in io_recvmsg() local
5083 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recvmsg()
5093 flags = req->sr_msg.msg_flags; in io_recvmsg()
5095 flags |= MSG_DONTWAIT; in io_recvmsg()
5096 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) in io_recvmsg()
5100 kmsg->uaddr, flags); in io_recvmsg()
5106 if (ret > 0 && io_net_retry(sock, flags)) { in io_recvmsg()
5110 req->flags |= REQ_F_PARTIAL_IO; in io_recvmsg()
5114 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recvmsg()
5118 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recvmsg()
5123 req->flags &= ~REQ_F_NEED_CLEANUP; in io_recvmsg()
5140 unsigned flags; in io_recv() local
5149 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recv()
5167 flags = req->sr_msg.msg_flags; in io_recv()
5169 flags |= MSG_DONTWAIT; in io_recv()
5170 if (flags & MSG_WAITALL) in io_recv()
5173 ret = sock_recvmsg(sock, &msg, flags); in io_recv()
5179 if (ret > 0 && io_net_retry(sock, flags)) { in io_recv()
5183 req->flags |= REQ_F_PARTIAL_IO; in io_recv()
5187 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recv()
5191 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recv()
5205 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_accept_prep()
5212 accept->flags = READ_ONCE(sqe->accept_flags); in io_accept_prep()
5216 if (accept->file_slot && (accept->flags & SOCK_CLOEXEC)) in io_accept_prep()
5218 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) in io_accept_prep()
5220 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) in io_accept_prep()
5221 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; in io_accept_prep()
5235 fd = __get_unused_fd_flags(accept->flags, accept->nofile); in io_accept()
5240 accept->flags); in io_accept()
5247 req->flags |= REQ_F_PARTIAL_IO; in io_accept()
5276 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_connect_prep()
5495 if (unlikely(req->task->flags & PF_EXITING)) in io_poll_check_events()
5838 if (req->flags & REQ_F_POLLED) { in io_arm_poll_handler()
5853 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
5934 unsigned int flags) in io_poll_parse_events() argument
5942 if (!(flags & IORING_POLL_ADD_MULTI)) in io_poll_parse_events()
5951 u32 flags; in io_poll_update_prep() local
5953 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_update_prep()
5957 flags = READ_ONCE(sqe->len); in io_poll_update_prep()
5958 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | in io_poll_update_prep()
5962 if (flags == IORING_POLL_ADD_MULTI) in io_poll_update_prep()
5966 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS; in io_poll_update_prep()
5967 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA; in io_poll_update_prep()
5973 upd->events = io_poll_parse_events(sqe, flags); in io_poll_update_prep()
5983 u32 flags; in io_poll_add_prep() local
5985 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_add_prep()
5989 flags = READ_ONCE(sqe->len); in io_poll_add_prep()
5990 if (flags & ~IORING_POLL_ADD_MULTI) in io_poll_add_prep()
5994 poll->events = io_poll_parse_events(sqe, flags); in io_poll_add_prep()
6070 unsigned long flags; in io_timeout_fn() local
6072 spin_lock_irqsave(&ctx->timeout_lock, flags); in io_timeout_fn()
6076 spin_unlock_irqrestore(&ctx->timeout_lock, flags); in io_timeout_fn()
6123 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) { in io_timeout_get_clock()
6186 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_remove_prep()
6188 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_timeout_remove_prep()
6195 tr->flags = READ_ONCE(sqe->timeout_flags); in io_timeout_remove_prep()
6196 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) { in io_timeout_remove_prep()
6197 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1) in io_timeout_remove_prep()
6199 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE) in io_timeout_remove_prep()
6201 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) in io_timeout_remove_prep()
6205 } else if (tr->flags) { in io_timeout_remove_prep()
6213 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) in io_translate_timeout_mode() argument
6215 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS in io_translate_timeout_mode()
6228 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) { in io_timeout_remove()
6235 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); in io_timeout_remove()
6255 unsigned flags; in io_timeout_prep() local
6258 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_prep()
6265 flags = READ_ONCE(sqe->timeout_flags); in io_timeout_prep()
6266 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK)) in io_timeout_prep()
6269 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) in io_timeout_prep()
6282 data->flags = flags; in io_timeout_prep()
6288 data->mode = io_translate_timeout_mode(flags); in io_timeout_prep()
6299 link->last->flags |= REQ_F_ARM_LTIMEOUT; in io_timeout_prep()
6419 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_async_cancel_prep()
6421 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_async_cancel_prep()
6463 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_rsrc_update_prep()
6622 if (req->flags & REQ_F_FAIL) { in io_drain_req()
6634 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6639 if (pos->flags & REQ_F_IO_DRAIN) { in io_drain_req()
6641 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6649 !(req->flags & REQ_F_IO_DRAIN))) { in io_drain_req()
6691 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_clean_op()
6705 if (req->flags & REQ_F_NEED_CLEANUP) { in io_clean_op()
6739 if ((req->flags & REQ_F_POLLED) && req->apoll) { in io_clean_op()
6744 if (req->flags & REQ_F_INFLIGHT) { in io_clean_op()
6749 if (req->flags & REQ_F_CREDS) in io_clean_op()
6752 req->flags &= ~IO_REQ_CLEAN_FLAGS; in io_clean_op()
6761 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) in io_issue_sqe()
6875 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) in io_issue_sqe()
6896 if (!(req->flags & REQ_F_REFCOUNT)) in io_wq_submit_work()
6906 if (work->flags & IO_WQ_WORK_CANCEL) in io_wq_submit_work()
6917 if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_wq_submit_work()
6925 if (req->flags & REQ_F_NOWAIT) in io_wq_submit_work()
6980 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); in io_file_get_fixed()
7016 if (!(req->task->flags & PF_EXITING)) in io_req_task_link_timeout()
7031 unsigned long flags; in io_link_timeout_fn() local
7033 spin_lock_irqsave(&ctx->timeout_lock, flags); in io_link_timeout_fn()
7048 spin_unlock_irqrestore(&ctx->timeout_lock, flags); in io_link_timeout_fn()
7091 if (req->flags & REQ_F_COMPLETE_INLINE) { in __io_queue_sqe()
7104 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { in __io_queue_sqe()
7134 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { in io_queue_sqe()
7136 } else if (req->flags & REQ_F_FAIL) { in io_queue_sqe()
7185 req->flags = sqe_flags = READ_ONCE(sqe->flags); in io_init_req()
7211 req->flags |= REQ_F_CREDS; in io_init_req()
7258 if (!(link->head->flags & REQ_F_FAIL)) in io_submit_sqe()
7260 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7277 req->flags, true, in io_submit_sqe()
7278 ctx->flags & IORING_SETUP_SQPOLL); in io_submit_sqe()
7290 if (!(req->flags & REQ_F_FAIL)) { in io_submit_sqe()
7294 if (!(head->flags & REQ_F_FAIL)) in io_submit_sqe()
7303 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7308 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_submit_sqe()
7485 !(ctx->flags & IORING_SETUP_R_DISABLED)) in __io_sq_thread()
7539 current->flags |= PF_NO_SETAFFINITY; in io_sq_thread()
7575 if ((ctx->flags & IORING_SETUP_IOPOLL) && in io_sq_thread()
7805 unsigned long flags; in io_rsrc_node_ref_zero() local
7809 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags); in io_rsrc_node_ref_zero()
7825 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags); in io_rsrc_node_ref_zero()
8148 if (p->flags & IORING_SETUP_ATTACH_WQ) { in io_get_sq_data()
8348 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL; in __io_rsrc_put_work()
8732 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == in io_sq_offload_create()
8745 if (ctx->flags & IORING_SETUP_SQPOLL) { in io_sq_offload_create()
8774 if (p->flags & IORING_SETUP_SQ_AFF) { in io_sq_offload_create()
8798 } else if (p->flags & IORING_SETUP_SQ_AFF) { in io_sq_offload_create()
9747 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || in io_uring_try_cancel_requests()
10006 unsigned long pgoff, unsigned long flags) in io_uring_nommu_get_unmapped_area() argument
10037 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz, in io_get_ext_arg() argument
10047 if (!(flags & IORING_ENTER_EXT_ARG)) { in io_get_ext_arg()
10070 u32, min_complete, u32, flags, const void __user *, argp, in SYSCALL_DEFINE6() argument
10080 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | in SYSCALL_DEFINE6()
10098 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED)) in SYSCALL_DEFINE6()
10107 if (ctx->flags & IORING_SETUP_SQPOLL) { in SYSCALL_DEFINE6()
10114 if (flags & IORING_ENTER_SQ_WAKEUP) in SYSCALL_DEFINE6()
10116 if (flags & IORING_ENTER_SQ_WAIT) { in SYSCALL_DEFINE6()
10133 if (flags & IORING_ENTER_GETEVENTS) { in SYSCALL_DEFINE6()
10137 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig); in SYSCALL_DEFINE6()
10149 if (ctx->flags & IORING_SETUP_IOPOLL && in SYSCALL_DEFINE6()
10150 !(ctx->flags & IORING_SETUP_SQPOLL)) { in SYSCALL_DEFINE6()
10211 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { in __io_uring_show_fdinfo()
10388 if (!(p->flags & IORING_SETUP_CLAMP)) in io_uring_create()
10402 if (p->flags & IORING_SETUP_CQSIZE) { in io_uring_create()
10411 if (!(p->flags & IORING_SETUP_CLAMP)) in io_uring_create()
10456 p->sq_off.flags = offsetof(struct io_rings, sq_flags); in io_uring_create()
10467 p->cq_off.flags = offsetof(struct io_rings, cq_flags); in io_uring_create()
10498 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); in io_uring_create()
10522 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | in io_uring_setup()
10564 p->ops[i].flags = IO_URING_OP_SUPPORTED; in io_probe()
10601 if (!(ctx->flags & IORING_SETUP_R_DISABLED)) in io_register_restrictions()
10665 if (!(ctx->flags & IORING_SETUP_R_DISABLED)) in io_register_enable_rings()
10671 ctx->flags &= ~IORING_SETUP_R_DISABLED; in io_register_enable_rings()
10819 if (ctx->flags & IORING_SETUP_SQPOLL) { in io_register_iowq_max_workers()
11121 BUILD_BUG_SQE_ELEM(1, __u8, flags); in io_uring_init()