• Home
  • Raw
  • Download

Lines Matching refs:work

241 static __cold void io_fallback_req_func(struct work_struct *work)
243 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
244 fallback_work.work);
416 req->work.list.next = NULL;
417 req->work.flags = 0;
418 req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
420 req->work.flags |= IO_WQ_WORK_CONCURRENT;
427 io_wq_hash_work(&req->work, file_inode(req->file));
430 req->work.flags |= IO_WQ_WORK_UNBOUND;
459 /* init ->work of the whole link before punting */
465 * canceled. That will make io-wq go through the usual work cancel
470 req->work.flags |= IO_WQ_WORK_CANCEL;
472 trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
473 io_wq_enqueue(tctx->io_wq, &req->work);
1763 struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
1765 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1768 return req ? &req->work : NULL;
1771 void io_wq_submit_work(struct io_wq_work *work)
1773 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1788 if (work->flags & IO_WQ_WORK_CANCEL) {
1795 work->flags |= IO_WQ_WORK_CANCEL;
2425 /* always run at least 1 task work to process local work */
2728 struct io_tctx_exit *work;
2730 work = container_of(cb, struct io_tctx_exit, task_work);
2735 * work cancelation off the exec path.
2738 io_uring_del_tctx_node((unsigned long)work->ctx);
2739 complete(&work->completion);
2742 static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
2744 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2749 static __cold void io_ring_exit_work(struct work_struct *work)
2751 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
2880 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
2882 struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3534 * work, which can reduce cpu usage and uring_lock contention.