• Home
  • Raw
  • Download

Lines Matching refs:flags

336 		unsigned int		flags;  member
513 u32 flags; member
520 int flags; member
529 int flags; member
555 u32 flags; member
631 unsigned int flags; member
647 unsigned int flags; member
663 int flags; member
669 int flags; member
693 int flags; member
873 unsigned int flags; member
1152 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_inc_not_zero()
1158 if (likely(!(req->flags & REQ_F_REFCOUNT))) in req_ref_put_and_test()
1167 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_get()
1174 if (!(req->flags & REQ_F_REFCOUNT)) { in __io_req_set_refcount()
1175 req->flags |= REQ_F_REFCOUNT; in __io_req_set_refcount()
1219 if (req->flags & REQ_F_INFLIGHT) in io_match_task()
1230 if (req->flags & REQ_F_INFLIGHT) in io_match_linked()
1250 if (head->flags & REQ_F_LINK_TIMEOUT) { in io_match_task_safe()
1265 req->flags |= REQ_F_FAIL; in req_set_fail()
1341 ctx->flags = p->flags; in io_ring_ctx_alloc()
1383 if (unlikely(req->flags & REQ_F_IO_DRAIN)) { in req_need_defer()
1403 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); in io_req_ffs_set()
1408 if (!(req->flags & REQ_F_INFLIGHT)) { in io_req_track_inflight()
1409 req->flags |= REQ_F_INFLIGHT; in io_req_track_inflight()
1419 req->flags &= ~REQ_F_ARM_LTIMEOUT; in __io_prep_linked_timeout()
1420 req->flags |= REQ_F_LINK_TIMEOUT; in __io_prep_linked_timeout()
1430 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) in io_prep_linked_timeout()
1440 if (!(req->flags & REQ_F_CREDS)) { in io_prep_async_work()
1441 req->flags |= REQ_F_CREDS; in io_prep_async_work()
1446 req->work.flags = 0; in io_prep_async_work()
1447 if (req->flags & REQ_F_FORCE_ASYNC) in io_prep_async_work()
1448 req->work.flags |= IO_WQ_WORK_CONCURRENT; in io_prep_async_work()
1450 if (req->flags & REQ_F_ISREG) { in io_prep_async_work()
1451 if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL)) in io_prep_async_work()
1455 req->work.flags |= IO_WQ_WORK_UNBOUND; in io_prep_async_work()
1463 if (req->flags & REQ_F_LINK_TIMEOUT) { in io_prep_async_link()
1499 req->work.flags |= IO_WQ_WORK_CANCEL; in io_queue_async_work()
1502 &req->work, req->flags); in io_queue_async_work()
1667 if (ctx->flags & IORING_SETUP_SQPOLL) { in io_cqring_ev_posted_iopoll()
1728 if (ctx->flags & IORING_SETUP_IOPOLL) in io_cqring_overflow_flush()
1731 if (ctx->flags & IORING_SETUP_IOPOLL) in io_cqring_overflow_flush()
1806 ocqe->cqe.flags = cflags; in io_cqring_event_overflow()
1827 WRITE_ONCE(cqe->flags, cflags); in __io_fill_cqe()
1857 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_req_complete_post()
1858 if (req->flags & IO_DISARM_MASK) in io_req_complete_post()
1884 return req->flags & IO_REQ_CLEAN_FLAGS; in io_req_needs_clean()
1894 req->flags |= REQ_F_COMPLETE_INLINE; in io_req_complete_state()
1923 req->flags &= ~REQ_F_HARDLINK; in io_req_complete_fail_submit()
1924 req->flags |= REQ_F_LINK; in io_req_complete_fail_submit()
2027 unsigned int flags = req->flags; in io_dismantle_req() local
2031 if (!(flags & REQ_F_FIXED_FILE)) in io_dismantle_req()
2094 if (link->flags & REQ_F_FAIL) in io_fail_links()
2112 if (req->flags & REQ_F_ARM_LTIMEOUT) { in io_disarm_next()
2115 req->flags &= ~REQ_F_ARM_LTIMEOUT; in io_disarm_next()
2122 } else if (req->flags & REQ_F_LINK_TIMEOUT) { in io_disarm_next()
2129 if (unlikely((req->flags & REQ_F_FAIL) && in io_disarm_next()
2130 !(req->flags & REQ_F_HARDLINK))) { in io_disarm_next()
2147 if (req->flags & IO_DISARM_MASK) { in __io_req_find_next()
2166 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) in io_req_find_next()
2241 unsigned long flags; in io_req_task_work_add() local
2246 spin_lock_irqsave(&tctx->task_lock, flags); in io_req_task_work_add()
2251 spin_unlock_irqrestore(&tctx->task_lock, flags); in io_req_task_work_add()
2263 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL; in io_req_task_work_add()
2269 spin_lock_irqsave(&tctx->task_lock, flags); in io_req_task_work_add()
2273 spin_unlock_irqrestore(&tctx->task_lock, flags); in io_req_task_work_add()
2299 if (likely(!(req->task->flags & PF_EXITING))) in io_req_task_submit()
2466 req->flags &= ~REQ_F_BUFFER_SELECTED; in io_put_kbuf()
2475 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) in io_put_rw_kbuf()
2487 if (current->flags & PF_IO_WORKER && in io_run_task_work()
2527 WRITE_ONCE(cqe->flags, cflags); in io_iopoll_complete()
2601 if (!(ctx->flags & IORING_SETUP_IOPOLL)) in io_iopoll_try_reap_events()
2690 if (req->flags & REQ_F_ISREG) { in kiocb_end_write()
2716 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
2717 !(ctx->flags & IORING_SETUP_IOPOLL))) in io_rw_should_reissue()
2771 req->flags |= REQ_F_REISSUE; in __io_complete_rw_common()
2837 req->flags |= REQ_F_REISSUE; in io_complete_rw_iopoll()
2903 if ((ctx->flags & IORING_SETUP_SQPOLL) && in io_iopoll_req_issued()
2956 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ)) in io_file_supports_nowait()
2958 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE)) in io_file_supports_nowait()
2974 req->flags |= REQ_F_ISREG; in io_prep_rw()
2990 req->flags |= REQ_F_NOWAIT; in io_prep_rw()
3002 if (ctx->flags & IORING_SETUP_IOPOLL) { in io_prep_rw()
3066 req->flags |= REQ_F_CUR_POS; in io_kiocb_update_pos()
3080 if (req->flags & REQ_F_CUR_POS) in kiocb_done()
3097 if (req->flags & REQ_F_REISSUE) { in kiocb_done()
3098 req->flags &= ~REQ_F_REISSUE; in kiocb_done()
3207 if (req->flags & REQ_F_BUFFER_SELECTED) in io_buffer_select()
3247 req->flags |= REQ_F_BUFFER_SELECTED; in io_rw_buffer_select()
3302 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_iov_buffer_select()
3335 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) in io_import_iovec()
3339 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3351 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3454 req->flags |= REQ_F_NEED_CLEANUP; in io_req_map_rw()
3502 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_prep_async()
3562 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
3578 wait->wait.flags = 0; in io_rw_should_retry()
3598 return req->flags & REQ_F_ISREG || in need_read_all()
3654 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { in io_read()
3655 req->flags &= ~REQ_F_REISSUE; in io_read()
3657 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_read()
3660 if (req->flags & REQ_F_NOWAIT) in io_read()
3666 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { in io_read()
3778 (req->flags & REQ_F_ISREG)) in io_write()
3794 if (req->flags & REQ_F_ISREG) { in io_write()
3808 if (req->flags & REQ_F_REISSUE) { in io_write()
3809 req->flags &= ~REQ_F_REISSUE; in io_write()
3820 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
3824 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) in io_write()
3852 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_renameat_prep()
3856 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_renameat_prep()
3863 ren->flags = READ_ONCE(sqe->rename_flags); in io_renameat_prep()
3875 req->flags |= REQ_F_NEED_CLEANUP; in io_renameat_prep()
3888 ren->newpath, ren->flags); in io_renameat()
3890 req->flags &= ~REQ_F_NEED_CLEANUP; in io_renameat()
3903 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_unlinkat_prep()
3908 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_unlinkat_prep()
3913 un->flags = READ_ONCE(sqe->unlink_flags); in io_unlinkat_prep()
3914 if (un->flags & ~AT_REMOVEDIR) in io_unlinkat_prep()
3922 req->flags |= REQ_F_NEED_CLEANUP; in io_unlinkat_prep()
3934 if (un->flags & AT_REMOVEDIR) in io_unlinkat()
3939 req->flags &= ~REQ_F_NEED_CLEANUP; in io_unlinkat()
3952 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_mkdirat_prep()
3957 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_mkdirat_prep()
3968 req->flags |= REQ_F_NEED_CLEANUP; in io_mkdirat_prep()
3982 req->flags &= ~REQ_F_NEED_CLEANUP; in io_mkdirat()
3995 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_symlinkat_prep()
4000 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_symlinkat_prep()
4017 req->flags |= REQ_F_NEED_CLEANUP; in io_symlinkat_prep()
4031 req->flags &= ~REQ_F_NEED_CLEANUP; in io_symlinkat()
4044 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_linkat_prep()
4048 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_linkat_prep()
4055 lnk->flags = READ_ONCE(sqe->hardlink_flags); in io_linkat_prep()
4057 lnk->oldpath = getname_uflags(oldf, lnk->flags); in io_linkat_prep()
4067 req->flags |= REQ_F_NEED_CLEANUP; in io_linkat_prep()
4080 lnk->newpath, lnk->flags); in io_linkat()
4082 req->flags &= ~REQ_F_NEED_CLEANUP; in io_linkat()
4093 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_shutdown_prep()
4135 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_splice_prep()
4139 sp->flags = READ_ONCE(sqe->splice_flags); in __io_splice_prep()
4140 if (unlikely(sp->flags & ~valid_flags)) in __io_splice_prep()
4158 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED; in io_tee() local
4166 (sp->flags & SPLICE_F_FD_IN_FIXED), issue_flags); in io_tee()
4173 ret = do_tee(in, out, sp->len, flags); in io_tee()
4175 if (!(sp->flags & SPLICE_F_FD_IN_FIXED)) in io_tee()
4197 unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED; in io_splice() local
4206 (sp->flags & SPLICE_F_FD_IN_FIXED), issue_flags); in io_splice()
4216 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags); in io_splice()
4218 if (!(sp->flags & SPLICE_F_FD_IN_FIXED)) in io_splice()
4234 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) in io_nop()
4245 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) in io_fsync_prep()
4251 req->sync.flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep()
4252 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC)) in io_fsync_prep()
4271 req->sync.flags & IORING_FSYNC_DATASYNC); in io_fsync()
4284 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fallocate_prep()
4315 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_openat_prep()
4319 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in __io_openat_prep()
4323 if (!(req->open.how.flags & O_PATH) && force_o_largefile()) in __io_openat_prep()
4324 req->open.how.flags |= O_LARGEFILE; in __io_openat_prep()
4336 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC)) in __io_openat_prep()
4340 req->flags |= REQ_F_NEED_CLEANUP; in __io_openat_prep()
4347 u64 flags = READ_ONCE(sqe->open_flags); in io_openat_prep() local
4349 req->open.how = build_open_how(flags, mode); in io_openat_prep()
4392 if (req->open.how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE)) in io_openat2()
4399 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); in io_openat2()
4433 req->flags &= ~REQ_F_NEED_CLEANUP; in io_openat2()
4615 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_epoll_ctl_prep()
4661 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_madvise_prep()
4696 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fadvise_prep()
4730 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_statx_prep()
4734 if (req->flags & REQ_F_FIXED_FILE) in io_statx_prep()
4741 req->statx.flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
4754 ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask, in io_statx()
4765 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_close_prep()
4770 if (req->flags & REQ_F_FIXED_FILE) in io_close_prep()
4836 if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) in io_sfr_prep()
4844 req->sync.flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep()
4857 req->sync.flags); in io_sync_file_range()
4865 static bool io_net_retry(struct socket *sock, int flags) in io_net_retry() argument
4867 if (!(flags & MSG_WAITALL)) in io_net_retry()
4884 req->flags |= REQ_F_NEED_CLEANUP; in io_setup_async_msg()
4918 req->flags |= REQ_F_NEED_CLEANUP; in io_sendmsg_prep_async()
4926 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_sendmsg_prep()
4937 req->flags |= REQ_F_NOWAIT; in io_sendmsg_prep()
4952 unsigned flags; in io_sendmsg() local
4970 flags = req->sr_msg.msg_flags; in io_sendmsg()
4972 flags |= MSG_DONTWAIT; in io_sendmsg()
4973 if (flags & MSG_WAITALL) in io_sendmsg()
4976 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); in io_sendmsg()
4983 if (ret > 0 && io_net_retry(sock, flags)) { in io_sendmsg()
4987 req->flags |= REQ_F_PARTIAL_IO; in io_sendmsg()
4995 req->flags &= ~REQ_F_NEED_CLEANUP; in io_sendmsg()
5010 unsigned flags; in io_send() local
5027 flags = req->sr_msg.msg_flags; in io_send()
5029 flags |= MSG_DONTWAIT; in io_send()
5030 if (flags & MSG_WAITALL) in io_send()
5033 msg.msg_flags = flags; in io_send()
5040 if (ret > 0 && io_net_retry(sock, flags)) { in io_send()
5044 req->flags |= REQ_F_PARTIAL_IO; in io_send()
5070 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_recvmsg_copy_hdr()
5105 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_compat_recvmsg_copy_hdr()
5155 req->flags |= REQ_F_BUFFER_SELECTED; in io_recv_buffer_select()
5170 req->flags |= REQ_F_NEED_CLEANUP; in io_recvmsg_prep_async()
5178 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_recvmsg_prep()
5190 req->flags |= REQ_F_NOWAIT; in io_recvmsg_prep()
5206 unsigned flags; in io_recvmsg() local
5223 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recvmsg()
5233 flags = req->sr_msg.msg_flags; in io_recvmsg()
5235 flags |= MSG_DONTWAIT; in io_recvmsg()
5236 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) in io_recvmsg()
5240 kmsg->uaddr, flags); in io_recvmsg()
5246 if (ret > 0 && io_net_retry(sock, flags)) { in io_recvmsg()
5248 req->flags |= REQ_F_PARTIAL_IO; in io_recvmsg()
5252 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recvmsg()
5256 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recvmsg()
5261 req->flags &= ~REQ_F_NEED_CLEANUP; in io_recvmsg()
5278 unsigned flags; in io_recv() local
5287 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recv()
5305 flags = req->sr_msg.msg_flags; in io_recv()
5307 flags |= MSG_DONTWAIT; in io_recv()
5308 if (flags & MSG_WAITALL) in io_recv()
5311 ret = sock_recvmsg(sock, &msg, flags); in io_recv()
5317 if (ret > 0 && io_net_retry(sock, flags)) { in io_recv()
5321 req->flags |= REQ_F_PARTIAL_IO; in io_recv()
5325 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recv()
5329 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recv()
5343 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_accept_prep()
5350 accept->flags = READ_ONCE(sqe->accept_flags); in io_accept_prep()
5354 if (accept->file_slot && (accept->flags & SOCK_CLOEXEC)) in io_accept_prep()
5356 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) in io_accept_prep()
5358 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK)) in io_accept_prep()
5359 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK; in io_accept_prep()
5373 fd = __get_unused_fd_flags(accept->flags, accept->nofile); in io_accept()
5378 accept->flags); in io_accept()
5384 req->flags |= REQ_F_PARTIAL_IO; in io_accept()
5413 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_connect_prep()
5632 if (unlikely(req->task->flags & PF_EXITING)) in io_poll_check_events()
5975 if (req->flags & REQ_F_POLLED) { in io_arm_poll_handler()
5990 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
6071 unsigned int flags) in io_poll_parse_events() argument
6079 if (!(flags & IORING_POLL_ADD_MULTI)) in io_poll_parse_events()
6088 u32 flags; in io_poll_update_prep() local
6090 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_update_prep()
6094 flags = READ_ONCE(sqe->len); in io_poll_update_prep()
6095 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | in io_poll_update_prep()
6099 if (flags == IORING_POLL_ADD_MULTI) in io_poll_update_prep()
6103 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS; in io_poll_update_prep()
6104 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA; in io_poll_update_prep()
6110 upd->events = io_poll_parse_events(sqe, flags); in io_poll_update_prep()
6120 u32 flags; in io_poll_add_prep() local
6122 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_add_prep()
6126 flags = READ_ONCE(sqe->len); in io_poll_add_prep()
6127 if (flags & ~IORING_POLL_ADD_MULTI) in io_poll_add_prep()
6131 poll->events = io_poll_parse_events(sqe, flags); in io_poll_add_prep()
6207 unsigned long flags; in io_timeout_fn() local
6209 spin_lock_irqsave(&ctx->timeout_lock, flags); in io_timeout_fn()
6213 spin_unlock_irqrestore(&ctx->timeout_lock, flags); in io_timeout_fn()
6260 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) { in io_timeout_get_clock()
6323 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_remove_prep()
6325 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_timeout_remove_prep()
6332 tr->flags = READ_ONCE(sqe->timeout_flags); in io_timeout_remove_prep()
6333 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) { in io_timeout_remove_prep()
6334 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1) in io_timeout_remove_prep()
6336 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE) in io_timeout_remove_prep()
6338 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) in io_timeout_remove_prep()
6342 } else if (tr->flags) { in io_timeout_remove_prep()
6350 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) in io_translate_timeout_mode() argument
6352 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS in io_translate_timeout_mode()
6365 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) { in io_timeout_remove()
6372 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); in io_timeout_remove()
6392 unsigned flags; in io_timeout_prep() local
6395 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_prep()
6402 flags = READ_ONCE(sqe->timeout_flags); in io_timeout_prep()
6403 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK)) in io_timeout_prep()
6406 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) in io_timeout_prep()
6419 data->flags = flags; in io_timeout_prep()
6425 data->mode = io_translate_timeout_mode(flags); in io_timeout_prep()
6436 link->last->flags |= REQ_F_ARM_LTIMEOUT; in io_timeout_prep()
6556 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_async_cancel_prep()
6558 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_async_cancel_prep()
6600 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_rsrc_update_prep()
6765 if (req->flags & REQ_F_FAIL) { in io_drain_req()
6777 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6782 if (pos->flags & REQ_F_IO_DRAIN) { in io_drain_req()
6784 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6792 !(req->flags & REQ_F_IO_DRAIN))) { in io_drain_req()
6834 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_clean_op()
6848 if (req->flags & REQ_F_NEED_CLEANUP) { in io_clean_op()
6893 if ((req->flags & REQ_F_POLLED) && req->apoll) { in io_clean_op()
6898 if (req->flags & REQ_F_INFLIGHT) { in io_clean_op()
6903 if (req->flags & REQ_F_CREDS) in io_clean_op()
6906 req->flags &= ~IO_REQ_CLEAN_FLAGS; in io_clean_op()
6915 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) in io_issue_sqe()
7038 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) in io_issue_sqe()
7059 if (!(req->flags & REQ_F_REFCOUNT)) in io_wq_submit_work()
7069 if (work->flags & IO_WQ_WORK_CANCEL) in io_wq_submit_work()
7080 if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_wq_submit_work()
7088 if (req->flags & REQ_F_NOWAIT) in io_wq_submit_work()
7143 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); in io_file_get_fixed()
7179 if (!(req->task->flags & PF_EXITING)) in io_req_task_link_timeout()
7194 unsigned long flags; in io_link_timeout_fn() local
7196 spin_lock_irqsave(&ctx->timeout_lock, flags); in io_link_timeout_fn()
7211 spin_unlock_irqrestore(&ctx->timeout_lock, flags); in io_link_timeout_fn()
7254 if (req->flags & REQ_F_COMPLETE_INLINE) { in __io_queue_sqe()
7267 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { in __io_queue_sqe()
7297 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { in io_queue_sqe()
7299 } else if (req->flags & REQ_F_FAIL) { in io_queue_sqe()
7348 req->flags = sqe_flags = READ_ONCE(sqe->flags); in io_init_req()
7374 req->flags |= REQ_F_CREDS; in io_init_req()
7421 if (!(link->head->flags & REQ_F_FAIL)) in io_submit_sqe()
7423 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7440 req->flags, true, in io_submit_sqe()
7441 ctx->flags & IORING_SETUP_SQPOLL); in io_submit_sqe()
7453 if (!(req->flags & REQ_F_FAIL)) { in io_submit_sqe()
7457 if (!(head->flags & REQ_F_FAIL)) in io_submit_sqe()
7466 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7471 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_submit_sqe()
7650 !(ctx->flags & IORING_SETUP_R_DISABLED)) in __io_sq_thread()
7704 current->flags |= PF_NO_SETAFFINITY; in io_sq_thread()
7740 if ((ctx->flags & IORING_SETUP_IOPOLL) && in io_sq_thread()
7970 unsigned long flags; in io_rsrc_node_ref_zero() local
7974 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags); in io_rsrc_node_ref_zero()
7990 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags); in io_rsrc_node_ref_zero()
8313 if (p->flags & IORING_SETUP_ATTACH_WQ) { in io_get_sq_data()
8513 bool lock_ring = ctx->flags & IORING_SETUP_IOPOLL; in __io_rsrc_put_work()
8897 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) == in io_sq_offload_create()
8910 if (ctx->flags & IORING_SETUP_SQPOLL) { in io_sq_offload_create()
8939 if (p->flags & IORING_SETUP_SQ_AFF) { in io_sq_offload_create()
8963 } else if (p->flags & IORING_SETUP_SQ_AFF) { in io_sq_offload_create()
9907 if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) || in io_uring_try_cancel_requests()
10166 unsigned long pgoff, unsigned long flags) in io_uring_nommu_get_unmapped_area() argument
10197 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz, in io_get_ext_arg() argument
10207 if (!(flags & IORING_ENTER_EXT_ARG)) { in io_get_ext_arg()
10230 u32, min_complete, u32, flags, const void __user *, argp, in SYSCALL_DEFINE6() argument
10240 if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | in SYSCALL_DEFINE6()
10258 if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED)) in SYSCALL_DEFINE6()
10267 if (ctx->flags & IORING_SETUP_SQPOLL) { in SYSCALL_DEFINE6()
10274 if (flags & IORING_ENTER_SQ_WAKEUP) in SYSCALL_DEFINE6()
10276 if (flags & IORING_ENTER_SQ_WAIT) { in SYSCALL_DEFINE6()
10293 if (flags & IORING_ENTER_GETEVENTS) { in SYSCALL_DEFINE6()
10297 ret = io_get_ext_arg(flags, argp, &argsz, &ts, &sig); in SYSCALL_DEFINE6()
10309 if (ctx->flags & IORING_SETUP_IOPOLL && in SYSCALL_DEFINE6()
10310 !(ctx->flags & IORING_SETUP_SQPOLL)) { in SYSCALL_DEFINE6()
10371 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { in __io_uring_show_fdinfo()
10548 if (!(p->flags & IORING_SETUP_CLAMP)) in io_uring_create()
10562 if (p->flags & IORING_SETUP_CQSIZE) { in io_uring_create()
10571 if (!(p->flags & IORING_SETUP_CLAMP)) in io_uring_create()
10616 p->sq_off.flags = offsetof(struct io_rings, sq_flags); in io_uring_create()
10627 p->cq_off.flags = offsetof(struct io_rings, cq_flags); in io_uring_create()
10658 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); in io_uring_create()
10682 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | in io_uring_setup()
10724 p->ops[i].flags = IO_URING_OP_SUPPORTED; in io_probe()
10761 if (!(ctx->flags & IORING_SETUP_R_DISABLED)) in io_register_restrictions()
10825 if (!(ctx->flags & IORING_SETUP_R_DISABLED)) in io_register_enable_rings()
10831 ctx->flags &= ~IORING_SETUP_R_DISABLED; in io_register_enable_rings()
10975 if (ctx->flags & IORING_SETUP_SQPOLL) { in io_register_iowq_max_workers()
11277 BUILD_BUG_SQE_ELEM(1, __u8, flags); in io_uring_init()