Lines Matching refs:node
238 struct list_head node; member
824 struct io_wq_work_node node; member
1297 struct llist_node *node = llist_del_all(&ctx->fallback_llist); in io_fallback_req_func() local
1302 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node) in io_fallback_req_func()
2199 struct io_wq_work_node *node; in tctx_task_work() local
2205 node = tctx->task_list.first; in tctx_task_work()
2207 if (!node) in tctx_task_work()
2210 if (!node) in tctx_task_work()
2214 struct io_wq_work_node *next = node->next; in tctx_task_work()
2215 struct io_kiocb *req = container_of(node, struct io_kiocb, in tctx_task_work()
2216 io_task_work.node); in tctx_task_work()
2226 node = next; in tctx_task_work()
2232 } while (node); in tctx_task_work()
2247 struct io_wq_work_node *node; in io_req_task_work_add() local
2254 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list); in io_req_task_work_add()
2278 node = tctx->task_list.first; in io_req_task_work_add()
2282 while (node) { in io_req_task_work_add()
2283 req = container_of(node, struct io_kiocb, io_task_work.node); in io_req_task_work_add()
2284 node = node->next; in io_req_task_work_add()
6435 struct io_tctx_node *node; in io_async_cancel() local
6445 list_for_each_entry(node, &ctx->tctx_list, ctx_node) { in io_async_cancel()
6446 struct io_uring_task *tctx = node->task->io_uring; in io_async_cancel()
7803 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs); in io_rsrc_node_ref_zero() local
7804 struct io_ring_ctx *ctx = node->rsrc_data->ctx; in io_rsrc_node_ref_zero()
7810 node->done = true; in io_rsrc_node_ref_zero()
7813 if (node->rsrc_data->quiesce) in io_rsrc_node_ref_zero()
7817 node = list_first_entry(&ctx->rsrc_ref_list, in io_rsrc_node_ref_zero()
7818 struct io_rsrc_node, node); in io_rsrc_node_ref_zero()
7820 if (!node->done) in io_rsrc_node_ref_zero()
7822 list_del(&node->node); in io_rsrc_node_ref_zero()
7823 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist); in io_rsrc_node_ref_zero()
7844 INIT_LIST_HEAD(&ref_node->node); in io_rsrc_node_alloc()
7861 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list); in io_rsrc_node_switch()
8371 struct llist_node *node; in io_rsrc_put_work() local
8374 node = llist_del_all(&ctx->rsrc_put_llist); in io_rsrc_put_work()
8376 while (node) { in io_rsrc_put_work()
8378 struct llist_node *next = node->next; in io_rsrc_put_work()
8380 ref_node = llist_entry(node, struct io_rsrc_node, llist); in io_rsrc_put_work()
8382 node = next; in io_rsrc_put_work()
8469 struct io_rsrc_node *node, void *rsrc) in io_queue_rsrc_removal() argument
8481 list_add(&prsrc->list, &node->rsrc_list); in io_queue_rsrc_removal()
9516 struct io_tctx_node *node; in io_ring_exit_work() local
9569 node = list_first_entry(&ctx->tctx_list, struct io_tctx_node, in io_ring_exit_work()
9573 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL); in io_ring_exit_work()
9576 wake_up_process(node->task); in io_ring_exit_work()
9701 struct io_tctx_node *node; in io_uring_try_cancel_iowq() local
9706 list_for_each_entry(node, &ctx->tctx_list, ctx_node) { in io_uring_try_cancel_iowq()
9707 struct io_uring_task *tctx = node->task->io_uring; in io_uring_try_cancel_iowq()
9770 struct io_tctx_node *node; in __io_uring_add_tctx_node() local
9789 node = kmalloc(sizeof(*node), GFP_KERNEL); in __io_uring_add_tctx_node()
9790 if (!node) in __io_uring_add_tctx_node()
9792 node->ctx = ctx; in __io_uring_add_tctx_node()
9793 node->task = current; in __io_uring_add_tctx_node()
9796 node, GFP_KERNEL)); in __io_uring_add_tctx_node()
9798 kfree(node); in __io_uring_add_tctx_node()
9803 list_add(&node->ctx_node, &ctx->tctx_list); in __io_uring_add_tctx_node()
9828 struct io_tctx_node *node; in io_uring_del_tctx_node() local
9832 node = xa_erase(&tctx->xa, index); in io_uring_del_tctx_node()
9833 if (!node) in io_uring_del_tctx_node()
9836 WARN_ON_ONCE(current != node->task); in io_uring_del_tctx_node()
9837 WARN_ON_ONCE(list_empty(&node->ctx_node)); in io_uring_del_tctx_node()
9839 mutex_lock(&node->ctx->uring_lock); in io_uring_del_tctx_node()
9840 list_del(&node->ctx_node); in io_uring_del_tctx_node()
9841 mutex_unlock(&node->ctx->uring_lock); in io_uring_del_tctx_node()
9843 if (tctx->last == node->ctx) in io_uring_del_tctx_node()
9845 kfree(node); in io_uring_del_tctx_node()
9851 struct io_tctx_node *node; in io_uring_clean_tctx() local
9854 xa_for_each(&tctx->xa, index, node) { in io_uring_clean_tctx()
9902 struct io_tctx_node *node; in io_uring_cancel_generic() local
9905 xa_for_each(&tctx->xa, index, node) { in io_uring_cancel_generic()
9907 if (node->ctx->sq_data) in io_uring_cancel_generic()
9909 io_uring_try_cancel_requests(node->ctx, current, in io_uring_cancel_generic()
10807 struct io_tctx_node *node; in io_register_iowq_max_workers() local
10867 list_for_each_entry(node, &ctx->tctx_list, ctx_node) { in io_register_iowq_max_workers()
10868 struct io_uring_task *tctx = node->task->io_uring; in io_register_iowq_max_workers()