/io_uring/ |
D | slist.h | 18 static inline void wq_list_add_after(struct io_wq_work_node *node, in wq_list_add_after() argument 24 pos->next = node; in wq_list_add_after() 25 node->next = next; in wq_list_add_after() 27 list->last = node; in wq_list_add_after() 52 static inline void wq_list_add_tail(struct io_wq_work_node *node, in wq_list_add_tail() argument 55 node->next = NULL; in wq_list_add_tail() 57 list->last = node; in wq_list_add_tail() 58 WRITE_ONCE(list->first, node); in wq_list_add_tail() 60 list->last->next = node; in wq_list_add_tail() 61 list->last = node; in wq_list_add_tail() [all …]
|
D | alloc_cache.h | 10 struct hlist_node node; member 18 hlist_add_head(&entry->node, &cache->list); in io_alloc_cache_put() 27 struct hlist_node *node = cache->list.first; in io_alloc_cache_get() local 29 hlist_del(node); in io_alloc_cache_get() 31 return container_of(node, struct io_cache_entry, node); in io_alloc_cache_get() 47 struct hlist_node *node = cache->list.first; in io_alloc_cache_free() local 49 hlist_del(node); in io_alloc_cache_free() 50 free(container_of(node, struct io_cache_entry, node)); in io_alloc_cache_free()
|
D | tctx.c | 97 struct io_tctx_node *node; in __io_uring_add_tctx_node() local 116 node = kmalloc(sizeof(*node), GFP_KERNEL); in __io_uring_add_tctx_node() 117 if (!node) in __io_uring_add_tctx_node() 119 node->ctx = ctx; in __io_uring_add_tctx_node() 120 node->task = current; in __io_uring_add_tctx_node() 123 node, GFP_KERNEL)); in __io_uring_add_tctx_node() 125 kfree(node); in __io_uring_add_tctx_node() 130 list_add(&node->ctx_node, &ctx->tctx_list); in __io_uring_add_tctx_node() 158 struct io_tctx_node *node; in io_uring_del_tctx_node() local 162 node = xa_erase(&tctx->xa, index); in io_uring_del_tctx_node() [all …]
|
D | rsrc.h | 41 struct list_head node; member 63 struct io_rsrc_node *node, void *rsrc); 102 static inline void io_rsrc_put_node(struct io_rsrc_node *node, int nr) in io_rsrc_put_node() argument 104 percpu_ref_put_many(&node->refs, nr); in io_rsrc_put_node() 117 struct io_rsrc_node *node = req->rsrc_node; in io_req_put_rsrc_locked() local 119 if (node) { in io_req_put_rsrc_locked() 120 if (node == ctx->rsrc_node) in io_req_put_rsrc_locked() 123 io_rsrc_put_node(node, 1); in io_req_put_rsrc_locked()
|
D | io-wq.c | 99 int node; member 480 struct io_wq_work_node *node, *prev; in io_get_next_work() local 485 wq_list_for_each(node, prev, &acct->work_list) { in io_get_next_work() 488 work = container_of(node, struct io_wq_work, list); in io_get_next_work() 492 wq_list_del(&acct->work_list, node, prev); in io_get_next_work() 509 node = &tail->list; in io_get_next_work() 761 tsk = create_io_thread(io_wqe_worker, worker, wqe->node); in create_worker_cont() 811 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node); in create_io_worker() 830 tsk = create_io_thread(io_wqe_worker, worker, wqe->node); in create_io_worker() 1038 struct io_wq_work_node *node, *prev; in io_acct_cancel_pending_work() local [all …]
|
D | io_uring.c | 245 struct llist_node *node = llist_del_all(&ctx->fallback_llist); in io_fallback_req_func() local 250 llist_for_each_entry_safe(req, tmp, node, io_task_work.node) in io_fallback_req_func() 1015 static unsigned int handle_tw_list(struct llist_node *node, in handle_tw_list() argument 1021 while (node != last) { in handle_tw_list() 1022 struct llist_node *next = node->next; in handle_tw_list() 1023 struct io_kiocb *req = container_of(node, struct io_kiocb, in handle_tw_list() 1024 io_task_work.node); in handle_tw_list() 1026 prefetch(container_of(next, struct io_kiocb, io_task_work.node)); in handle_tw_list() 1037 node = next; in handle_tw_list() 1086 struct llist_node *node = io_llist_xchg(&tctx->task_list, &fake); in tctx_task_work() local [all …]
|
D | rsrc.c | 192 struct llist_node *node; in io_rsrc_put_work() local 195 node = llist_del_all(&ctx->rsrc_put_llist); in io_rsrc_put_work() 197 while (node) { in io_rsrc_put_work() 199 struct llist_node *next = node->next; in io_rsrc_put_work() 201 ref_node = llist_entry(node, struct io_rsrc_node, llist); in io_rsrc_put_work() 203 node = next; in io_rsrc_put_work() 221 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs); in io_rsrc_node_ref_zero() local 222 struct io_ring_ctx *ctx = node->rsrc_data->ctx; in io_rsrc_node_ref_zero() 228 node->done = true; in io_rsrc_node_ref_zero() 231 if (node->rsrc_data->quiesce) in io_rsrc_node_ref_zero() [all …]
|
D | io_uring.h | 70 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node); 380 struct io_wq_work_node *node; in io_alloc_req() local 382 node = wq_stack_extract(&ctx->submit_state.free_list); in io_alloc_req() 383 return container_of(node, struct io_kiocb, comp_list); in io_alloc_req()
|
D | cancel.c | 136 struct io_tctx_node *node; in __io_async_cancel() local 151 list_for_each_entry(node, &ctx->tctx_list, ctx_node) { in __io_async_cancel() 152 struct io_uring_task *tctx = node->task->io_uring; in __io_async_cancel()
|