Home
last modified time | relevance | path

Searched refs:work (Results 1 – 10 of 10) sorted by relevance

/io_uring/
Dio-wq.c64 struct work_struct work; member
168 struct io_wq_work *work) in io_work_get_acct() argument
170 return io_get_acct(wqe, !(work->flags & IO_WQ_WORK_UNBOUND)); in io_work_get_acct()
453 static inline unsigned int io_get_work_hash(struct io_wq_work *work) in io_get_work_hash() argument
455 return work->flags >> IO_WQ_HASH_SHIFT; in io_get_work_hash()
481 struct io_wq_work *work, *tail; in io_get_next_work() local
488 work = container_of(node, struct io_wq_work, list); in io_get_next_work()
491 if (!io_wq_is_hashed(work)) { in io_get_next_work()
493 return work; in io_get_next_work()
496 hash = io_get_work_hash(work); in io_get_next_work()
[all …]
Dio-wq.h50 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
51 void io_wq_hash_work(struct io_wq_work *work, void *val);
57 static inline bool io_wq_is_hashed(struct io_wq_work *work) in io_wq_is_hashed() argument
59 return work->flags & IO_WQ_WORK_HASHED; in io_wq_is_hashed()
Dslist.h130 static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) in wq_next_work() argument
132 if (!work->list.next) in wq_next_work()
135 return container_of(work->list.next, struct io_wq_work, list); in wq_next_work()
Dcancel.c30 static bool io_cancel_cb(struct io_wq_work *work, void *data) in io_cancel_cb() argument
32 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_cb()
47 if (cd->seq == req->work.cancel_seq) in io_cancel_cb()
49 req->work.cancel_seq = cd->seq; in io_cancel_cb()
Dio_uring.c241 static __cold void io_fallback_req_func(struct work_struct *work) in io_fallback_req_func() argument
243 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, in io_fallback_req_func()
244 fallback_work.work); in io_fallback_req_func()
416 req->work.list.next = NULL; in io_prep_async_work()
417 req->work.flags = 0; in io_prep_async_work()
418 req->work.cancel_seq = atomic_read(&ctx->cancel_seq); in io_prep_async_work()
420 req->work.flags |= IO_WQ_WORK_CONCURRENT; in io_prep_async_work()
427 io_wq_hash_work(&req->work, file_inode(req->file)); in io_prep_async_work()
430 req->work.flags |= IO_WQ_WORK_UNBOUND; in io_prep_async_work()
470 req->work.flags |= IO_WQ_WORK_CANCEL; in io_queue_iowq()
[all …]
Dpoll.c590 req->work.cancel_seq = atomic_read(&ctx->cancel_seq); in __io_arm_poll_handler()
817 if (cd->seq == req->work.cancel_seq) in io_poll_find()
819 req->work.cancel_seq = cd->seq; in io_poll_find()
847 if (cd->seq == req->work.cancel_seq) in io_poll_file_find()
849 req->work.cancel_seq = cd->seq; in io_poll_file_find()
Dio_uring.h73 struct io_wq_work *io_wq_free_work(struct io_wq_work *work);
74 void io_wq_submit_work(struct io_wq_work *work);
Drsrc.h56 void io_rsrc_put_work(struct work_struct *work);
Dtimeout.c237 if (cd->seq == tmp->work.cancel_seq) in io_timeout_extract()
239 tmp->work.cancel_seq = cd->seq; in io_timeout_extract()
Drsrc.c189 void io_rsrc_put_work(struct work_struct *work) in io_rsrc_put_work() argument
194 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work); in io_rsrc_put_work()