Searched refs:tctx (Results 1 – 9 of 9) sorted by relevance
/io_uring/ |
D | tctx.c | 49 struct io_uring_task *tctx = tsk->io_uring; in __io_uring_free() local 51 WARN_ON_ONCE(!xa_empty(&tctx->xa)); in __io_uring_free() 52 WARN_ON_ONCE(tctx->io_wq); in __io_uring_free() 53 WARN_ON_ONCE(tctx->cached_refs); in __io_uring_free() 55 percpu_counter_destroy(&tctx->inflight); in __io_uring_free() 56 kfree(tctx); in __io_uring_free() 63 struct io_uring_task *tctx; in io_uring_alloc_task_context() local 66 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL); in io_uring_alloc_task_context() 67 if (unlikely(!tctx)) in io_uring_alloc_task_context() 70 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); in io_uring_alloc_task_context() [all …]
|
D | cancel.c | 54 static int io_async_cancel_one(struct io_uring_task *tctx, in io_async_cancel_one() argument 61 if (!tctx || !tctx->io_wq) in io_async_cancel_one() 65 cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all); in io_async_cancel_one() 81 int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd, in io_try_cancel() argument 87 WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring); in io_try_cancel() 89 ret = io_async_cancel_one(tctx, cd); in io_try_cancel() 131 struct io_uring_task *tctx, in __io_async_cancel() argument 140 ret = io_try_cancel(tctx, cd, issue_flags); in __io_async_cancel() 152 struct io_uring_task *tctx = node->task->io_uring; in __io_async_cancel() local 154 ret = io_async_cancel_one(tctx, cd); in __io_async_cancel() [all …]
|
D | tctx.h | 14 void io_uring_clean_tctx(struct io_uring_task *tctx); 27 struct io_uring_task *tctx = current->io_uring; in io_uring_add_tctx_node() local 29 if (likely(tctx && tctx->last == ctx)) in io_uring_add_tctx_node()
|
D | io_uring.c | 454 struct io_uring_task *tctx = req->task->io_uring; in io_queue_iowq() local 456 BUG_ON(!tctx); in io_queue_iowq() 457 BUG_ON(!tctx->io_wq); in io_queue_iowq() 473 io_wq_enqueue(tctx->io_wq, &req->work); in io_queue_iowq() 662 struct io_uring_task *tctx = task->io_uring; in __io_put_task() local 664 percpu_counter_sub(&tctx->inflight, nr); in __io_put_task() 665 if (unlikely(atomic_read(&tctx->in_idle))) in __io_put_task() 666 wake_up(&tctx->wait); in __io_put_task() 670 void io_task_refs_refill(struct io_uring_task *tctx) in io_task_refs_refill() argument 672 unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; in io_task_refs_refill() [all …]
|
D | io_uring.h | 79 void io_task_refs_refill(struct io_uring_task *tctx); 359 struct io_uring_task *tctx = current->io_uring; in io_get_task_refs() local 361 tctx->cached_refs -= nr; in io_get_task_refs() 362 if (unlikely(tctx->cached_refs < 0)) in io_get_task_refs() 363 io_task_refs_refill(tctx); in io_get_task_refs()
|
D | cancel.h | 19 int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
|
D | Makefile | 9 sqpoll.o fdinfo.o tctx.o poll.o \
|
D | io-wq.h | 53 int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
|
D | io-wq.c | 1353 int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask) in io_wq_cpu_affinity() argument 1357 if (!tctx || !tctx->io_wq) in io_wq_cpu_affinity() 1362 struct io_wqe *wqe = tctx->io_wq->wqes[i]; in io_wq_cpu_affinity()
|