Lines Matching +full:reg +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0
22 mutex_lock(&ctx->uring_lock); in io_init_wq_offload()
23 hash = ctx->hash_map; in io_init_wq_offload()
27 mutex_unlock(&ctx->uring_lock); in io_init_wq_offload()
28 return ERR_PTR(-ENOMEM); in io_init_wq_offload()
30 refcount_set(&hash->refs, 1); in io_init_wq_offload()
31 init_waitqueue_head(&hash->wait); in io_init_wq_offload()
32 ctx->hash_map = hash; in io_init_wq_offload()
34 mutex_unlock(&ctx->uring_lock); in io_init_wq_offload()
42 concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); in io_init_wq_offload()
49 struct io_uring_task *tctx = tsk->io_uring; in __io_uring_free()
59 xa_for_each(&tctx->xa, index, node) { in __io_uring_free()
63 WARN_ON_ONCE(tctx->io_wq); in __io_uring_free()
64 WARN_ON_ONCE(tctx->cached_refs); in __io_uring_free()
66 percpu_counter_destroy(&tctx->inflight); in __io_uring_free()
68 tsk->io_uring = NULL; in __io_uring_free()
79 return -ENOMEM; in io_uring_alloc_task_context()
81 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); in io_uring_alloc_task_context()
87 tctx->io_wq = io_init_wq_offload(ctx, task); in io_uring_alloc_task_context()
88 if (IS_ERR(tctx->io_wq)) { in io_uring_alloc_task_context()
89 ret = PTR_ERR(tctx->io_wq); in io_uring_alloc_task_context()
90 percpu_counter_destroy(&tctx->inflight); in io_uring_alloc_task_context()
95 xa_init(&tctx->xa); in io_uring_alloc_task_context()
96 init_waitqueue_head(&tctx->wait); in io_uring_alloc_task_context()
97 atomic_set(&tctx->in_cancel, 0); in io_uring_alloc_task_context()
98 atomic_set(&tctx->inflight_tracked, 0); in io_uring_alloc_task_context()
99 task->io_uring = tctx; in io_uring_alloc_task_context()
100 init_llist_head(&tctx->task_list); in io_uring_alloc_task_context()
101 init_task_work(&tctx->task_work, tctx_task_work); in io_uring_alloc_task_context()
107 struct io_uring_task *tctx = current->io_uring; in __io_uring_add_tctx_node()
116 tctx = current->io_uring; in __io_uring_add_tctx_node()
117 if (ctx->iowq_limits_set) { in __io_uring_add_tctx_node()
118 unsigned int limits[2] = { ctx->iowq_limits[0], in __io_uring_add_tctx_node()
119 ctx->iowq_limits[1], }; in __io_uring_add_tctx_node()
121 ret = io_wq_max_workers(tctx->io_wq, limits); in __io_uring_add_tctx_node()
126 if (!xa_load(&tctx->xa, (unsigned long)ctx)) { in __io_uring_add_tctx_node()
129 return -ENOMEM; in __io_uring_add_tctx_node()
130 node->ctx = ctx; in __io_uring_add_tctx_node()
131 node->task = current; in __io_uring_add_tctx_node()
133 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, in __io_uring_add_tctx_node()
140 mutex_lock(&ctx->uring_lock); in __io_uring_add_tctx_node()
141 list_add(&node->ctx_node, &ctx->tctx_list); in __io_uring_add_tctx_node()
142 mutex_unlock(&ctx->uring_lock); in __io_uring_add_tctx_node()
151 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER in __io_uring_add_tctx_node_from_submit()
152 && ctx->submitter_task != current) in __io_uring_add_tctx_node_from_submit()
153 return -EEXIST; in __io_uring_add_tctx_node_from_submit()
159 current->io_uring->last = ctx; in __io_uring_add_tctx_node_from_submit()
164 * Remove this io_uring_file -> task mapping.
168 struct io_uring_task *tctx = current->io_uring; in io_uring_del_tctx_node()
173 node = xa_erase(&tctx->xa, index); in io_uring_del_tctx_node()
177 WARN_ON_ONCE(current != node->task); in io_uring_del_tctx_node()
178 WARN_ON_ONCE(list_empty(&node->ctx_node)); in io_uring_del_tctx_node()
180 mutex_lock(&node->ctx->uring_lock); in io_uring_del_tctx_node()
181 list_del(&node->ctx_node); in io_uring_del_tctx_node()
182 mutex_unlock(&node->ctx->uring_lock); in io_uring_del_tctx_node()
184 if (tctx->last == node->ctx) in io_uring_del_tctx_node()
185 tctx->last = NULL; in io_uring_del_tctx_node()
191 struct io_wq *wq = tctx->io_wq; in io_uring_clean_tctx()
195 xa_for_each(&tctx->xa, index, node) { in io_uring_clean_tctx()
205 tctx->io_wq = NULL; in io_uring_clean_tctx()
211 struct io_uring_task *tctx = current->io_uring; in io_uring_unreg_ringfd()
215 if (tctx->registered_rings[i]) { in io_uring_unreg_ringfd()
216 fput(tctx->registered_rings[i]); in io_uring_unreg_ringfd()
217 tctx->registered_rings[i] = NULL; in io_uring_unreg_ringfd()
225 int offset; in io_ring_add_registered_file() local
226 for (offset = start; offset < end; offset++) { in io_ring_add_registered_file()
227 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX); in io_ring_add_registered_file()
228 if (tctx->registered_rings[offset]) in io_ring_add_registered_file()
231 tctx->registered_rings[offset] = file; in io_ring_add_registered_file()
232 return offset; in io_ring_add_registered_file()
234 return -EBUSY; in io_ring_add_registered_file()
241 int offset; in io_ring_add_registered_fd() local
245 return -EBADF; in io_ring_add_registered_fd()
248 return -EOPNOTSUPP; in io_ring_add_registered_fd()
250 offset = io_ring_add_registered_file(tctx, file, start, end); in io_ring_add_registered_fd()
251 if (offset < 0) in io_ring_add_registered_fd()
253 return offset; in io_ring_add_registered_fd()
259 * with ->data set to the ring_fd, and ->offset given for the desired
260 * index. If no index is desired, application may set ->offset == -1U
268 struct io_uring_rsrc_update reg; in io_ringfd_register() local
273 return -EINVAL; in io_ringfd_register()
275 mutex_unlock(&ctx->uring_lock); in io_ringfd_register()
277 mutex_lock(&ctx->uring_lock); in io_ringfd_register()
281 tctx = current->io_uring; in io_ringfd_register()
285 if (copy_from_user(®, &arg[i], sizeof(reg))) { in io_ringfd_register()
286 ret = -EFAULT; in io_ringfd_register()
290 if (reg.resv) { in io_ringfd_register()
291 ret = -EINVAL; in io_ringfd_register()
295 if (reg.offset == -1U) { in io_ringfd_register()
299 if (reg.offset >= IO_RINGFD_REG_MAX) { in io_ringfd_register()
300 ret = -EINVAL; in io_ringfd_register()
303 start = reg.offset; in io_ringfd_register()
307 ret = io_ring_add_registered_fd(tctx, reg.data, start, end); in io_ringfd_register()
311 reg.offset = ret; in io_ringfd_register()
312 if (copy_to_user(&arg[i], ®, sizeof(reg))) { in io_ringfd_register()
313 fput(tctx->registered_rings[reg.offset]); in io_ringfd_register()
314 tctx->registered_rings[reg.offset] = NULL; in io_ringfd_register()
315 ret = -EFAULT; in io_ringfd_register()
327 struct io_uring_task *tctx = current->io_uring; in io_ringfd_unregister()
328 struct io_uring_rsrc_update reg; in io_ringfd_unregister() local
332 return -EINVAL; in io_ringfd_unregister()
337 if (copy_from_user(®, &arg[i], sizeof(reg))) { in io_ringfd_unregister()
338 ret = -EFAULT; in io_ringfd_unregister()
341 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) { in io_ringfd_unregister()
342 ret = -EINVAL; in io_ringfd_unregister()
346 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX); in io_ringfd_unregister()
347 if (tctx->registered_rings[reg.offset]) { in io_ringfd_unregister()
348 fput(tctx->registered_rings[reg.offset]); in io_ringfd_unregister()
349 tctx->registered_rings[reg.offset] = NULL; in io_ringfd_unregister()