• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "alloc_cache.h"
17 #include "openclose.h"
18 #include "rsrc.h"
19 #include "memmap.h"
20 #include "register.h"
21 
22 struct io_rsrc_update {
23 	struct file			*file;
24 	u64				arg;
25 	u32				nr_args;
26 	u32				offset;
27 };
28 
29 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
30 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
31 				  struct io_mapped_ubuf **pimu,
32 				  struct page **last_hpage);
33 
34 /* only define max */
35 #define IORING_MAX_FIXED_FILES	(1U << 20)
36 #define IORING_MAX_REG_BUFFERS	(1U << 14)
37 
38 static const struct io_mapped_ubuf dummy_ubuf = {
39 	/* set invalid range, so io_import_fixed() fails meeting it */
40 	.ubuf = -1UL,
41 	.len = UINT_MAX,
42 };
43 
__io_account_mem(struct user_struct * user,unsigned long nr_pages)44 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
45 {
46 	unsigned long page_limit, cur_pages, new_pages;
47 
48 	if (!nr_pages)
49 		return 0;
50 
51 	/* Don't allow more pages than we can safely lock */
52 	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
53 
54 	cur_pages = atomic_long_read(&user->locked_vm);
55 	do {
56 		new_pages = cur_pages + nr_pages;
57 		if (new_pages > page_limit)
58 			return -ENOMEM;
59 	} while (!atomic_long_try_cmpxchg(&user->locked_vm,
60 					  &cur_pages, new_pages));
61 	return 0;
62 }
63 
io_unaccount_mem(struct io_ring_ctx * ctx,unsigned long nr_pages)64 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
65 {
66 	if (ctx->user)
67 		__io_unaccount_mem(ctx->user, nr_pages);
68 
69 	if (ctx->mm_account)
70 		atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
71 }
72 
io_account_mem(struct io_ring_ctx * ctx,unsigned long nr_pages)73 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
74 {
75 	int ret;
76 
77 	if (ctx->user) {
78 		ret = __io_account_mem(ctx->user, nr_pages);
79 		if (ret)
80 			return ret;
81 	}
82 
83 	if (ctx->mm_account)
84 		atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
85 
86 	return 0;
87 }
88 
io_buffer_validate(struct iovec * iov)89 static int io_buffer_validate(struct iovec *iov)
90 {
91 	unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
92 
93 	/*
94 	 * Don't impose further limits on the size and buffer
95 	 * constraints here, we'll -EINVAL later when IO is
96 	 * submitted if they are wrong.
97 	 */
98 	if (!iov->iov_base)
99 		return iov->iov_len ? -EFAULT : 0;
100 	if (!iov->iov_len)
101 		return -EFAULT;
102 
103 	/* arbitrary limit, but we need something */
104 	if (iov->iov_len > SZ_1G)
105 		return -EFAULT;
106 
107 	if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
108 		return -EOVERFLOW;
109 
110 	return 0;
111 }
112 
io_buffer_unmap(struct io_ring_ctx * ctx,struct io_mapped_ubuf ** slot)113 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
114 {
115 	struct io_mapped_ubuf *imu = *slot;
116 	unsigned int i;
117 
118 	*slot = NULL;
119 	if (imu != &dummy_ubuf) {
120 		if (!refcount_dec_and_test(&imu->refs))
121 			return;
122 		for (i = 0; i < imu->nr_bvecs; i++) {
123 			struct folio *folio = page_folio(imu->bvec[i].bv_page);
124 
125 			unpin_user_folio(folio, 1);
126 		}
127 		if (imu->acct_pages)
128 			io_unaccount_mem(ctx, imu->acct_pages);
129 		kvfree(imu);
130 	}
131 }
132 
io_rsrc_put_work(struct io_rsrc_node * node)133 static void io_rsrc_put_work(struct io_rsrc_node *node)
134 {
135 	struct io_rsrc_put *prsrc = &node->item;
136 
137 	if (prsrc->tag)
138 		io_post_aux_cqe(node->ctx, prsrc->tag, 0, 0);
139 
140 	switch (node->type) {
141 	case IORING_RSRC_FILE:
142 		fput(prsrc->file);
143 		break;
144 	case IORING_RSRC_BUFFER:
145 		io_rsrc_buf_put(node->ctx, prsrc);
146 		break;
147 	default:
148 		WARN_ON_ONCE(1);
149 		break;
150 	}
151 }
152 
io_rsrc_node_destroy(struct io_ring_ctx * ctx,struct io_rsrc_node * node)153 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
154 {
155 	if (!io_alloc_cache_put(&ctx->rsrc_node_cache, node))
156 		kfree(node);
157 }
158 
io_rsrc_node_ref_zero(struct io_rsrc_node * node)159 void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
160 	__must_hold(&node->ctx->uring_lock)
161 {
162 	struct io_ring_ctx *ctx = node->ctx;
163 
164 	while (!list_empty(&ctx->rsrc_ref_list)) {
165 		node = list_first_entry(&ctx->rsrc_ref_list,
166 					    struct io_rsrc_node, node);
167 		/* recycle ref nodes in order */
168 		if (node->refs)
169 			break;
170 		list_del(&node->node);
171 
172 		if (likely(!node->empty))
173 			io_rsrc_put_work(node);
174 		io_rsrc_node_destroy(ctx, node);
175 	}
176 	if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
177 		wake_up_all(&ctx->rsrc_quiesce_wq);
178 }
179 
io_rsrc_node_alloc(struct io_ring_ctx * ctx)180 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
181 {
182 	struct io_rsrc_node *ref_node;
183 
184 	ref_node = io_alloc_cache_get(&ctx->rsrc_node_cache);
185 	if (!ref_node) {
186 		ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
187 		if (!ref_node)
188 			return NULL;
189 	}
190 
191 	ref_node->ctx = ctx;
192 	ref_node->empty = 0;
193 	ref_node->refs = 1;
194 	return ref_node;
195 }
196 
io_rsrc_ref_quiesce(struct io_rsrc_data * data,struct io_ring_ctx * ctx)197 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
198 				      struct io_ring_ctx *ctx)
199 {
200 	struct io_rsrc_node *backup;
201 	DEFINE_WAIT(we);
202 	int ret;
203 
204 	/* As We may drop ->uring_lock, other task may have started quiesce */
205 	if (data->quiesce)
206 		return -ENXIO;
207 
208 	backup = io_rsrc_node_alloc(ctx);
209 	if (!backup)
210 		return -ENOMEM;
211 	ctx->rsrc_node->empty = true;
212 	ctx->rsrc_node->type = -1;
213 	list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
214 	io_put_rsrc_node(ctx, ctx->rsrc_node);
215 	ctx->rsrc_node = backup;
216 
217 	if (list_empty(&ctx->rsrc_ref_list))
218 		return 0;
219 
220 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
221 		atomic_set(&ctx->cq_wait_nr, 1);
222 		smp_mb();
223 	}
224 
225 	ctx->rsrc_quiesce++;
226 	data->quiesce = true;
227 	do {
228 		prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
229 		mutex_unlock(&ctx->uring_lock);
230 
231 		ret = io_run_task_work_sig(ctx);
232 		if (ret < 0) {
233 			finish_wait(&ctx->rsrc_quiesce_wq, &we);
234 			mutex_lock(&ctx->uring_lock);
235 			if (list_empty(&ctx->rsrc_ref_list))
236 				ret = 0;
237 			break;
238 		}
239 
240 		schedule();
241 		mutex_lock(&ctx->uring_lock);
242 		ret = 0;
243 	} while (!list_empty(&ctx->rsrc_ref_list));
244 
245 	finish_wait(&ctx->rsrc_quiesce_wq, &we);
246 	data->quiesce = false;
247 	ctx->rsrc_quiesce--;
248 
249 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
250 		atomic_set(&ctx->cq_wait_nr, 0);
251 		smp_mb();
252 	}
253 	return ret;
254 }
255 
io_free_page_table(void ** table,size_t size)256 static void io_free_page_table(void **table, size_t size)
257 {
258 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
259 
260 	for (i = 0; i < nr_tables; i++)
261 		kfree(table[i]);
262 	kfree(table);
263 }
264 
io_rsrc_data_free(struct io_rsrc_data * data)265 static void io_rsrc_data_free(struct io_rsrc_data *data)
266 {
267 	size_t size = data->nr * sizeof(data->tags[0][0]);
268 
269 	if (data->tags)
270 		io_free_page_table((void **)data->tags, size);
271 	kfree(data);
272 }
273 
io_alloc_page_table(size_t size)274 static __cold void **io_alloc_page_table(size_t size)
275 {
276 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
277 	size_t init_size = size;
278 	void **table;
279 
280 	table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
281 	if (!table)
282 		return NULL;
283 
284 	for (i = 0; i < nr_tables; i++) {
285 		unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
286 
287 		table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
288 		if (!table[i]) {
289 			io_free_page_table(table, init_size);
290 			return NULL;
291 		}
292 		size -= this_size;
293 	}
294 	return table;
295 }
296 
io_rsrc_data_alloc(struct io_ring_ctx * ctx,int type,u64 __user * utags,unsigned nr,struct io_rsrc_data ** pdata)297 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type,
298 				     u64 __user *utags,
299 				     unsigned nr, struct io_rsrc_data **pdata)
300 {
301 	struct io_rsrc_data *data;
302 	int ret = 0;
303 	unsigned i;
304 
305 	data = kzalloc(sizeof(*data), GFP_KERNEL);
306 	if (!data)
307 		return -ENOMEM;
308 	data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
309 	if (!data->tags) {
310 		kfree(data);
311 		return -ENOMEM;
312 	}
313 
314 	data->nr = nr;
315 	data->ctx = ctx;
316 	data->rsrc_type = type;
317 	if (utags) {
318 		ret = -EFAULT;
319 		for (i = 0; i < nr; i++) {
320 			u64 *tag_slot = io_get_tag_slot(data, i);
321 
322 			if (copy_from_user(tag_slot, &utags[i],
323 					   sizeof(*tag_slot)))
324 				goto fail;
325 		}
326 	}
327 	*pdata = data;
328 	return 0;
329 fail:
330 	io_rsrc_data_free(data);
331 	return ret;
332 }
333 
__io_sqe_files_update(struct io_ring_ctx * ctx,struct io_uring_rsrc_update2 * up,unsigned nr_args)334 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
335 				 struct io_uring_rsrc_update2 *up,
336 				 unsigned nr_args)
337 {
338 	u64 __user *tags = u64_to_user_ptr(up->tags);
339 	__s32 __user *fds = u64_to_user_ptr(up->data);
340 	struct io_rsrc_data *data = ctx->file_data;
341 	struct io_fixed_file *file_slot;
342 	int fd, i, err = 0;
343 	unsigned int done;
344 
345 	if (!ctx->file_data)
346 		return -ENXIO;
347 	if (up->offset + nr_args > ctx->nr_user_files)
348 		return -EINVAL;
349 
350 	for (done = 0; done < nr_args; done++) {
351 		u64 tag = 0;
352 
353 		if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
354 		    copy_from_user(&fd, &fds[done], sizeof(fd))) {
355 			err = -EFAULT;
356 			break;
357 		}
358 		if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
359 			err = -EINVAL;
360 			break;
361 		}
362 		if (fd == IORING_REGISTER_FILES_SKIP)
363 			continue;
364 
365 		i = array_index_nospec(up->offset + done, ctx->nr_user_files);
366 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
367 
368 		if (file_slot->file_ptr) {
369 			err = io_queue_rsrc_removal(data, i,
370 						    io_slot_file(file_slot));
371 			if (err)
372 				break;
373 			file_slot->file_ptr = 0;
374 			io_file_bitmap_clear(&ctx->file_table, i);
375 		}
376 		if (fd != -1) {
377 			struct file *file = fget(fd);
378 
379 			if (!file) {
380 				err = -EBADF;
381 				break;
382 			}
383 			/*
384 			 * Don't allow io_uring instances to be registered.
385 			 */
386 			if (io_is_uring_fops(file)) {
387 				fput(file);
388 				err = -EBADF;
389 				break;
390 			}
391 			*io_get_tag_slot(data, i) = tag;
392 			io_fixed_file_set(file_slot, file);
393 			io_file_bitmap_set(&ctx->file_table, i);
394 		}
395 	}
396 	return done ? done : err;
397 }
398 
__io_sqe_buffers_update(struct io_ring_ctx * ctx,struct io_uring_rsrc_update2 * up,unsigned int nr_args)399 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
400 				   struct io_uring_rsrc_update2 *up,
401 				   unsigned int nr_args)
402 {
403 	u64 __user *tags = u64_to_user_ptr(up->tags);
404 	struct iovec fast_iov, *iov;
405 	struct page *last_hpage = NULL;
406 	struct iovec __user *uvec;
407 	u64 user_data = up->data;
408 	__u32 done;
409 	int i, err;
410 
411 	if (!ctx->buf_data)
412 		return -ENXIO;
413 	if (up->offset + nr_args > ctx->nr_user_bufs)
414 		return -EINVAL;
415 
416 	for (done = 0; done < nr_args; done++) {
417 		struct io_mapped_ubuf *imu;
418 		u64 tag = 0;
419 
420 		uvec = u64_to_user_ptr(user_data);
421 		iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
422 		if (IS_ERR(iov)) {
423 			err = PTR_ERR(iov);
424 			break;
425 		}
426 		if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
427 			err = -EFAULT;
428 			break;
429 		}
430 		err = io_buffer_validate(iov);
431 		if (err)
432 			break;
433 		if (!iov->iov_base && tag) {
434 			err = -EINVAL;
435 			break;
436 		}
437 		err = io_sqe_buffer_register(ctx, iov, &imu, &last_hpage);
438 		if (err)
439 			break;
440 
441 		i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
442 		if (ctx->user_bufs[i] != &dummy_ubuf) {
443 			err = io_queue_rsrc_removal(ctx->buf_data, i,
444 						    ctx->user_bufs[i]);
445 			if (unlikely(err)) {
446 				io_buffer_unmap(ctx, &imu);
447 				break;
448 			}
449 			ctx->user_bufs[i] = (struct io_mapped_ubuf *)&dummy_ubuf;
450 		}
451 
452 		ctx->user_bufs[i] = imu;
453 		*io_get_tag_slot(ctx->buf_data, i) = tag;
454 		if (ctx->compat)
455 			user_data += sizeof(struct compat_iovec);
456 		else
457 			user_data += sizeof(struct iovec);
458 	}
459 	return done ? done : err;
460 }
461 
__io_register_rsrc_update(struct io_ring_ctx * ctx,unsigned type,struct io_uring_rsrc_update2 * up,unsigned nr_args)462 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
463 				     struct io_uring_rsrc_update2 *up,
464 				     unsigned nr_args)
465 {
466 	__u32 tmp;
467 
468 	lockdep_assert_held(&ctx->uring_lock);
469 
470 	if (check_add_overflow(up->offset, nr_args, &tmp))
471 		return -EOVERFLOW;
472 
473 	switch (type) {
474 	case IORING_RSRC_FILE:
475 		return __io_sqe_files_update(ctx, up, nr_args);
476 	case IORING_RSRC_BUFFER:
477 		return __io_sqe_buffers_update(ctx, up, nr_args);
478 	}
479 	return -EINVAL;
480 }
481 
io_register_files_update(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)482 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
483 			     unsigned nr_args)
484 {
485 	struct io_uring_rsrc_update2 up;
486 
487 	if (!nr_args)
488 		return -EINVAL;
489 	memset(&up, 0, sizeof(up));
490 	if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
491 		return -EFAULT;
492 	if (up.resv || up.resv2)
493 		return -EINVAL;
494 	return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
495 }
496 
io_register_rsrc_update(struct io_ring_ctx * ctx,void __user * arg,unsigned size,unsigned type)497 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
498 			    unsigned size, unsigned type)
499 {
500 	struct io_uring_rsrc_update2 up;
501 
502 	if (size != sizeof(up))
503 		return -EINVAL;
504 	if (copy_from_user(&up, arg, sizeof(up)))
505 		return -EFAULT;
506 	if (!up.nr || up.resv || up.resv2)
507 		return -EINVAL;
508 	return __io_register_rsrc_update(ctx, type, &up, up.nr);
509 }
510 
io_register_rsrc(struct io_ring_ctx * ctx,void __user * arg,unsigned int size,unsigned int type)511 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
512 			    unsigned int size, unsigned int type)
513 {
514 	struct io_uring_rsrc_register rr;
515 
516 	/* keep it extendible */
517 	if (size != sizeof(rr))
518 		return -EINVAL;
519 
520 	memset(&rr, 0, sizeof(rr));
521 	if (copy_from_user(&rr, arg, size))
522 		return -EFAULT;
523 	if (!rr.nr || rr.resv2)
524 		return -EINVAL;
525 	if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
526 		return -EINVAL;
527 
528 	switch (type) {
529 	case IORING_RSRC_FILE:
530 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
531 			break;
532 		return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
533 					     rr.nr, u64_to_user_ptr(rr.tags));
534 	case IORING_RSRC_BUFFER:
535 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
536 			break;
537 		return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
538 					       rr.nr, u64_to_user_ptr(rr.tags));
539 	}
540 	return -EINVAL;
541 }
542 
io_files_update_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)543 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
544 {
545 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
546 
547 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
548 		return -EINVAL;
549 	if (sqe->rw_flags || sqe->splice_fd_in)
550 		return -EINVAL;
551 
552 	up->offset = READ_ONCE(sqe->off);
553 	up->nr_args = READ_ONCE(sqe->len);
554 	if (!up->nr_args)
555 		return -EINVAL;
556 	up->arg = READ_ONCE(sqe->addr);
557 	return 0;
558 }
559 
io_files_update_with_index_alloc(struct io_kiocb * req,unsigned int issue_flags)560 static int io_files_update_with_index_alloc(struct io_kiocb *req,
561 					    unsigned int issue_flags)
562 {
563 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
564 	__s32 __user *fds = u64_to_user_ptr(up->arg);
565 	unsigned int done;
566 	struct file *file;
567 	int ret, fd;
568 
569 	if (!req->ctx->file_data)
570 		return -ENXIO;
571 
572 	for (done = 0; done < up->nr_args; done++) {
573 		if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
574 			ret = -EFAULT;
575 			break;
576 		}
577 
578 		file = fget(fd);
579 		if (!file) {
580 			ret = -EBADF;
581 			break;
582 		}
583 		ret = io_fixed_fd_install(req, issue_flags, file,
584 					  IORING_FILE_INDEX_ALLOC);
585 		if (ret < 0)
586 			break;
587 		if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
588 			__io_close_fixed(req->ctx, issue_flags, ret);
589 			ret = -EFAULT;
590 			break;
591 		}
592 	}
593 
594 	if (done)
595 		return done;
596 	return ret;
597 }
598 
io_files_update(struct io_kiocb * req,unsigned int issue_flags)599 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
600 {
601 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
602 	struct io_ring_ctx *ctx = req->ctx;
603 	struct io_uring_rsrc_update2 up2;
604 	int ret;
605 
606 	up2.offset = up->offset;
607 	up2.data = up->arg;
608 	up2.nr = 0;
609 	up2.tags = 0;
610 	up2.resv = 0;
611 	up2.resv2 = 0;
612 
613 	if (up->offset == IORING_FILE_INDEX_ALLOC) {
614 		ret = io_files_update_with_index_alloc(req, issue_flags);
615 	} else {
616 		io_ring_submit_lock(ctx, issue_flags);
617 		ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
618 						&up2, up->nr_args);
619 		io_ring_submit_unlock(ctx, issue_flags);
620 	}
621 
622 	if (ret < 0)
623 		req_set_fail(req);
624 	io_req_set_res(req, ret, 0);
625 	return IOU_OK;
626 }
627 
io_queue_rsrc_removal(struct io_rsrc_data * data,unsigned idx,void * rsrc)628 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
629 {
630 	struct io_ring_ctx *ctx = data->ctx;
631 	struct io_rsrc_node *node = ctx->rsrc_node;
632 	u64 *tag_slot = io_get_tag_slot(data, idx);
633 
634 	ctx->rsrc_node = io_rsrc_node_alloc(ctx);
635 	if (unlikely(!ctx->rsrc_node)) {
636 		ctx->rsrc_node = node;
637 		return -ENOMEM;
638 	}
639 
640 	node->item.rsrc = rsrc;
641 	node->type = data->rsrc_type;
642 	node->item.tag = *tag_slot;
643 	*tag_slot = 0;
644 	list_add_tail(&node->node, &ctx->rsrc_ref_list);
645 	io_put_rsrc_node(ctx, node);
646 	return 0;
647 }
648 
__io_sqe_files_unregister(struct io_ring_ctx * ctx)649 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
650 {
651 	int i;
652 
653 	for (i = 0; i < ctx->nr_user_files; i++) {
654 		struct file *file = io_file_from_index(&ctx->file_table, i);
655 
656 		if (!file)
657 			continue;
658 		io_file_bitmap_clear(&ctx->file_table, i);
659 		fput(file);
660 	}
661 
662 	io_free_file_tables(&ctx->file_table);
663 	io_file_table_set_alloc_range(ctx, 0, 0);
664 	io_rsrc_data_free(ctx->file_data);
665 	ctx->file_data = NULL;
666 	ctx->nr_user_files = 0;
667 }
668 
io_sqe_files_unregister(struct io_ring_ctx * ctx)669 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
670 {
671 	unsigned nr = ctx->nr_user_files;
672 	int ret;
673 
674 	if (!ctx->file_data)
675 		return -ENXIO;
676 
677 	/*
678 	 * Quiesce may unlock ->uring_lock, and while it's not held
679 	 * prevent new requests using the table.
680 	 */
681 	ctx->nr_user_files = 0;
682 	ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
683 	ctx->nr_user_files = nr;
684 	if (!ret)
685 		__io_sqe_files_unregister(ctx);
686 	return ret;
687 }
688 
io_sqe_files_register(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args,u64 __user * tags)689 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
690 			  unsigned nr_args, u64 __user *tags)
691 {
692 	__s32 __user *fds = (__s32 __user *) arg;
693 	struct file *file;
694 	int fd, ret;
695 	unsigned i;
696 
697 	if (ctx->file_data)
698 		return -EBUSY;
699 	if (!nr_args)
700 		return -EINVAL;
701 	if (nr_args > IORING_MAX_FIXED_FILES)
702 		return -EMFILE;
703 	if (nr_args > rlimit(RLIMIT_NOFILE))
704 		return -EMFILE;
705 	ret = io_rsrc_data_alloc(ctx, IORING_RSRC_FILE, tags, nr_args,
706 				 &ctx->file_data);
707 	if (ret)
708 		return ret;
709 
710 	if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
711 		io_rsrc_data_free(ctx->file_data);
712 		ctx->file_data = NULL;
713 		return -ENOMEM;
714 	}
715 
716 	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
717 		struct io_fixed_file *file_slot;
718 
719 		if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
720 			ret = -EFAULT;
721 			goto fail;
722 		}
723 		/* allow sparse sets */
724 		if (!fds || fd == -1) {
725 			ret = -EINVAL;
726 			if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
727 				goto fail;
728 			continue;
729 		}
730 
731 		file = fget(fd);
732 		ret = -EBADF;
733 		if (unlikely(!file))
734 			goto fail;
735 
736 		/*
737 		 * Don't allow io_uring instances to be registered.
738 		 */
739 		if (io_is_uring_fops(file)) {
740 			fput(file);
741 			goto fail;
742 		}
743 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
744 		io_fixed_file_set(file_slot, file);
745 		io_file_bitmap_set(&ctx->file_table, i);
746 	}
747 
748 	/* default it to the whole table */
749 	io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
750 	return 0;
751 fail:
752 	__io_sqe_files_unregister(ctx);
753 	return ret;
754 }
755 
io_rsrc_buf_put(struct io_ring_ctx * ctx,struct io_rsrc_put * prsrc)756 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
757 {
758 	io_buffer_unmap(ctx, &prsrc->buf);
759 	prsrc->buf = NULL;
760 }
761 
__io_sqe_buffers_unregister(struct io_ring_ctx * ctx)762 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
763 {
764 	unsigned int i;
765 
766 	for (i = 0; i < ctx->nr_user_bufs; i++)
767 		io_buffer_unmap(ctx, &ctx->user_bufs[i]);
768 	kfree(ctx->user_bufs);
769 	io_rsrc_data_free(ctx->buf_data);
770 	ctx->user_bufs = NULL;
771 	ctx->buf_data = NULL;
772 	ctx->nr_user_bufs = 0;
773 }
774 
io_sqe_buffers_unregister(struct io_ring_ctx * ctx)775 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
776 {
777 	unsigned nr = ctx->nr_user_bufs;
778 	int ret;
779 
780 	if (!ctx->buf_data)
781 		return -ENXIO;
782 
783 	/*
784 	 * Quiesce may unlock ->uring_lock, and while it's not held
785 	 * prevent new requests using the table.
786 	 */
787 	ctx->nr_user_bufs = 0;
788 	ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
789 	ctx->nr_user_bufs = nr;
790 	if (!ret)
791 		__io_sqe_buffers_unregister(ctx);
792 	return ret;
793 }
794 
795 /*
796  * Not super efficient, but this is just a registration time. And we do cache
797  * the last compound head, so generally we'll only do a full search if we don't
798  * match that one.
799  *
800  * We check if the given compound head page has already been accounted, to
801  * avoid double accounting it. This allows us to account the full size of the
802  * page, not just the constituent pages of a huge page.
803  */
headpage_already_acct(struct io_ring_ctx * ctx,struct page ** pages,int nr_pages,struct page * hpage)804 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
805 				  int nr_pages, struct page *hpage)
806 {
807 	int i, j;
808 
809 	/* check current page array */
810 	for (i = 0; i < nr_pages; i++) {
811 		if (!PageCompound(pages[i]))
812 			continue;
813 		if (compound_head(pages[i]) == hpage)
814 			return true;
815 	}
816 
817 	/* check previously registered pages */
818 	for (i = 0; i < ctx->nr_user_bufs; i++) {
819 		struct io_mapped_ubuf *imu = ctx->user_bufs[i];
820 
821 		for (j = 0; j < imu->nr_bvecs; j++) {
822 			if (!PageCompound(imu->bvec[j].bv_page))
823 				continue;
824 			if (compound_head(imu->bvec[j].bv_page) == hpage)
825 				return true;
826 		}
827 	}
828 
829 	return false;
830 }
831 
io_buffer_account_pin(struct io_ring_ctx * ctx,struct page ** pages,int nr_pages,struct io_mapped_ubuf * imu,struct page ** last_hpage)832 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
833 				 int nr_pages, struct io_mapped_ubuf *imu,
834 				 struct page **last_hpage)
835 {
836 	int i, ret;
837 
838 	imu->acct_pages = 0;
839 	for (i = 0; i < nr_pages; i++) {
840 		if (!PageCompound(pages[i])) {
841 			imu->acct_pages++;
842 		} else {
843 			struct page *hpage;
844 
845 			hpage = compound_head(pages[i]);
846 			if (hpage == *last_hpage)
847 				continue;
848 			*last_hpage = hpage;
849 			if (headpage_already_acct(ctx, pages, i, hpage))
850 				continue;
851 			imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
852 		}
853 	}
854 
855 	if (!imu->acct_pages)
856 		return 0;
857 
858 	ret = io_account_mem(ctx, imu->acct_pages);
859 	if (ret)
860 		imu->acct_pages = 0;
861 	return ret;
862 }
863 
io_do_coalesce_buffer(struct page *** pages,int * nr_pages,struct io_imu_folio_data * data,int nr_folios)864 static bool io_do_coalesce_buffer(struct page ***pages, int *nr_pages,
865 				struct io_imu_folio_data *data, int nr_folios)
866 {
867 	struct page **page_array = *pages, **new_array = NULL;
868 	int nr_pages_left = *nr_pages, i, j;
869 
870 	/* Store head pages only*/
871 	new_array = kvmalloc_array(nr_folios, sizeof(struct page *),
872 					GFP_KERNEL);
873 	if (!new_array)
874 		return false;
875 
876 	new_array[0] = compound_head(page_array[0]);
877 	/*
878 	 * The pages are bound to the folio, it doesn't
879 	 * actually unpin them but drops all but one reference,
880 	 * which is usually put down by io_buffer_unmap().
881 	 * Note, needs a better helper.
882 	 */
883 	if (data->nr_pages_head > 1)
884 		unpin_user_pages(&page_array[1], data->nr_pages_head - 1);
885 
886 	j = data->nr_pages_head;
887 	nr_pages_left -= data->nr_pages_head;
888 	for (i = 1; i < nr_folios; i++) {
889 		unsigned int nr_unpin;
890 
891 		new_array[i] = page_array[j];
892 		nr_unpin = min_t(unsigned int, nr_pages_left - 1,
893 					data->nr_pages_mid - 1);
894 		if (nr_unpin)
895 			unpin_user_pages(&page_array[j+1], nr_unpin);
896 		j += data->nr_pages_mid;
897 		nr_pages_left -= data->nr_pages_mid;
898 	}
899 	kvfree(page_array);
900 	*pages = new_array;
901 	*nr_pages = nr_folios;
902 	return true;
903 }
904 
io_try_coalesce_buffer(struct page *** pages,int * nr_pages,struct io_imu_folio_data * data)905 static bool io_try_coalesce_buffer(struct page ***pages, int *nr_pages,
906 					 struct io_imu_folio_data *data)
907 {
908 	struct page **page_array = *pages;
909 	struct folio *folio = page_folio(page_array[0]);
910 	unsigned int count = 1, nr_folios = 1;
911 	int i;
912 
913 	if (*nr_pages <= 1)
914 		return false;
915 
916 	data->nr_pages_mid = folio_nr_pages(folio);
917 	if (data->nr_pages_mid == 1)
918 		return false;
919 
920 	data->folio_shift = folio_shift(folio);
921 	data->first_folio_page_idx = folio_page_idx(folio, page_array[0]);
922 	/*
923 	 * Check if pages are contiguous inside a folio, and all folios have
924 	 * the same page count except for the head and tail.
925 	 */
926 	for (i = 1; i < *nr_pages; i++) {
927 		if (page_folio(page_array[i]) == folio &&
928 			page_array[i] == page_array[i-1] + 1) {
929 			count++;
930 			continue;
931 		}
932 
933 		if (nr_folios == 1) {
934 			if (folio_page_idx(folio, page_array[i-1]) !=
935 				data->nr_pages_mid - 1)
936 				return false;
937 
938 			data->nr_pages_head = count;
939 		} else if (count != data->nr_pages_mid) {
940 			return false;
941 		}
942 
943 		folio = page_folio(page_array[i]);
944 		if (folio_size(folio) != (1UL << data->folio_shift) ||
945 			folio_page_idx(folio, page_array[i]) != 0)
946 			return false;
947 
948 		count = 1;
949 		nr_folios++;
950 	}
951 	if (nr_folios == 1)
952 		data->nr_pages_head = count;
953 
954 	return io_do_coalesce_buffer(pages, nr_pages, data, nr_folios);
955 }
956 
io_sqe_buffer_register(struct io_ring_ctx * ctx,struct iovec * iov,struct io_mapped_ubuf ** pimu,struct page ** last_hpage)957 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
958 				  struct io_mapped_ubuf **pimu,
959 				  struct page **last_hpage)
960 {
961 	struct io_mapped_ubuf *imu = NULL;
962 	struct page **pages = NULL;
963 	unsigned long off;
964 	size_t size;
965 	int ret, nr_pages, i;
966 	struct io_imu_folio_data data;
967 	bool coalesced;
968 
969 	*pimu = (struct io_mapped_ubuf *)&dummy_ubuf;
970 	if (!iov->iov_base)
971 		return 0;
972 
973 	ret = -ENOMEM;
974 	pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
975 				&nr_pages);
976 	if (IS_ERR(pages)) {
977 		ret = PTR_ERR(pages);
978 		pages = NULL;
979 		goto done;
980 	}
981 
982 	/* If it's huge page(s), try to coalesce them into fewer bvec entries */
983 	coalesced = io_try_coalesce_buffer(&pages, &nr_pages, &data);
984 
985 	imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
986 	if (!imu)
987 		goto done;
988 
989 	ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
990 	if (ret)
991 		goto done;
992 
993 	size = iov->iov_len;
994 	/* store original address for later verification */
995 	imu->ubuf = (unsigned long) iov->iov_base;
996 	imu->len = iov->iov_len;
997 	imu->nr_bvecs = nr_pages;
998 	imu->folio_shift = PAGE_SHIFT;
999 	if (coalesced)
1000 		imu->folio_shift = data.folio_shift;
1001 	refcount_set(&imu->refs, 1);
1002 	off = (unsigned long)iov->iov_base & ~PAGE_MASK;
1003 	if (coalesced)
1004 		off += data.first_folio_page_idx << PAGE_SHIFT;
1005 	*pimu = imu;
1006 	ret = 0;
1007 
1008 	for (i = 0; i < nr_pages; i++) {
1009 		size_t vec_len;
1010 
1011 		vec_len = min_t(size_t, size, (1UL << imu->folio_shift) - off);
1012 		bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
1013 		off = 0;
1014 		size -= vec_len;
1015 	}
1016 done:
1017 	if (ret) {
1018 		kvfree(imu);
1019 		if (pages) {
1020 			for (i = 0; i < nr_pages; i++)
1021 				unpin_user_folio(page_folio(pages[i]), 1);
1022 		}
1023 	}
1024 	kvfree(pages);
1025 	return ret;
1026 }
1027 
io_buffers_map_alloc(struct io_ring_ctx * ctx,unsigned int nr_args)1028 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1029 {
1030 	ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1031 	return ctx->user_bufs ? 0 : -ENOMEM;
1032 }
1033 
io_sqe_buffers_register(struct io_ring_ctx * ctx,void __user * arg,unsigned int nr_args,u64 __user * tags)1034 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1035 			    unsigned int nr_args, u64 __user *tags)
1036 {
1037 	struct page *last_hpage = NULL;
1038 	struct io_rsrc_data *data;
1039 	struct iovec fast_iov, *iov = &fast_iov;
1040 	const struct iovec __user *uvec;
1041 	int i, ret;
1042 
1043 	BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1044 
1045 	if (ctx->user_bufs)
1046 		return -EBUSY;
1047 	if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1048 		return -EINVAL;
1049 	ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, tags, nr_args, &data);
1050 	if (ret)
1051 		return ret;
1052 	ret = io_buffers_map_alloc(ctx, nr_args);
1053 	if (ret) {
1054 		io_rsrc_data_free(data);
1055 		return ret;
1056 	}
1057 
1058 	if (!arg)
1059 		memset(iov, 0, sizeof(*iov));
1060 
1061 	for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1062 		if (arg) {
1063 			uvec = (struct iovec __user *) arg;
1064 			iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat);
1065 			if (IS_ERR(iov)) {
1066 				ret = PTR_ERR(iov);
1067 				break;
1068 			}
1069 			ret = io_buffer_validate(iov);
1070 			if (ret)
1071 				break;
1072 			if (ctx->compat)
1073 				arg += sizeof(struct compat_iovec);
1074 			else
1075 				arg += sizeof(struct iovec);
1076 		}
1077 
1078 		if (!iov->iov_base && *io_get_tag_slot(data, i)) {
1079 			ret = -EINVAL;
1080 			break;
1081 		}
1082 
1083 		ret = io_sqe_buffer_register(ctx, iov, &ctx->user_bufs[i],
1084 					     &last_hpage);
1085 		if (ret)
1086 			break;
1087 	}
1088 
1089 	WARN_ON_ONCE(ctx->buf_data);
1090 
1091 	ctx->buf_data = data;
1092 	if (ret)
1093 		__io_sqe_buffers_unregister(ctx);
1094 	return ret;
1095 }
1096 
io_import_fixed(int ddir,struct iov_iter * iter,struct io_mapped_ubuf * imu,u64 buf_addr,size_t len)1097 int io_import_fixed(int ddir, struct iov_iter *iter,
1098 			   struct io_mapped_ubuf *imu,
1099 			   u64 buf_addr, size_t len)
1100 {
1101 	u64 buf_end;
1102 	size_t offset;
1103 
1104 	if (WARN_ON_ONCE(!imu))
1105 		return -EFAULT;
1106 	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1107 		return -EFAULT;
1108 	/* not inside the mapped region */
1109 	if (unlikely(buf_addr < imu->ubuf || buf_end > (imu->ubuf + imu->len)))
1110 		return -EFAULT;
1111 
1112 	/*
1113 	 * Might not be a start of buffer, set size appropriately
1114 	 * and advance us to the beginning.
1115 	 */
1116 	offset = buf_addr - imu->ubuf;
1117 	iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1118 
1119 	if (offset) {
1120 		/*
1121 		 * Don't use iov_iter_advance() here, as it's really slow for
1122 		 * using the latter parts of a big fixed buffer - it iterates
1123 		 * over each segment manually. We can cheat a bit here, because
1124 		 * we know that:
1125 		 *
1126 		 * 1) it's a BVEC iter, we set it up
1127 		 * 2) all bvecs are the same in size, except potentially the
1128 		 *    first and last bvec
1129 		 *
1130 		 * So just find our index, and adjust the iterator afterwards.
1131 		 * If the offset is within the first bvec (or the whole first
1132 		 * bvec, just use iov_iter_advance(). This makes it easier
1133 		 * since we can just skip the first segment, which may not
1134 		 * be folio_size aligned.
1135 		 */
1136 		const struct bio_vec *bvec = imu->bvec;
1137 
1138 		if (offset < bvec->bv_len) {
1139 			iter->bvec = bvec;
1140 			iter->count -= offset;
1141 			iter->iov_offset = offset;
1142 		} else {
1143 			unsigned long seg_skip;
1144 
1145 			/* skip first vec */
1146 			offset -= bvec->bv_len;
1147 			seg_skip = 1 + (offset >> imu->folio_shift);
1148 
1149 			iter->bvec = bvec + seg_skip;
1150 			iter->nr_segs -= seg_skip;
1151 			iter->count -= bvec->bv_len + offset;
1152 			iter->iov_offset = offset & ((1UL << imu->folio_shift) - 1);
1153 		}
1154 	}
1155 
1156 	return 0;
1157 }
1158 
io_clone_buffers(struct io_ring_ctx * ctx,struct io_ring_ctx * src_ctx)1159 static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx)
1160 {
1161 	struct io_mapped_ubuf **user_bufs;
1162 	struct io_rsrc_data *data;
1163 	int i, ret, nbufs;
1164 
1165 	/*
1166 	 * Accounting state is shared between the two rings; that only works if
1167 	 * both rings are accounted towards the same counters.
1168 	 */
1169 	if (ctx->user != src_ctx->user || ctx->mm_account != src_ctx->mm_account)
1170 		return -EINVAL;
1171 
1172 	/*
1173 	 * Drop our own lock here. We'll setup the data we need and reference
1174 	 * the source buffers, then re-grab, check, and assign at the end.
1175 	 */
1176 	mutex_unlock(&ctx->uring_lock);
1177 
1178 	mutex_lock(&src_ctx->uring_lock);
1179 	ret = -ENXIO;
1180 	nbufs = src_ctx->nr_user_bufs;
1181 	if (!nbufs)
1182 		goto out_unlock;
1183 	ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, NULL, nbufs, &data);
1184 	if (ret)
1185 		goto out_unlock;
1186 
1187 	ret = -ENOMEM;
1188 	user_bufs = kcalloc(nbufs, sizeof(*ctx->user_bufs), GFP_KERNEL);
1189 	if (!user_bufs)
1190 		goto out_free_data;
1191 
1192 	for (i = 0; i < nbufs; i++) {
1193 		struct io_mapped_ubuf *src = src_ctx->user_bufs[i];
1194 
1195 		if (src != &dummy_ubuf)
1196 			refcount_inc(&src->refs);
1197 		user_bufs[i] = src;
1198 	}
1199 
1200 	/* Have a ref on the bufs now, drop src lock and re-grab our own lock */
1201 	mutex_unlock(&src_ctx->uring_lock);
1202 	mutex_lock(&ctx->uring_lock);
1203 	if (!ctx->user_bufs) {
1204 		ctx->user_bufs = user_bufs;
1205 		ctx->buf_data = data;
1206 		ctx->nr_user_bufs = nbufs;
1207 		return 0;
1208 	}
1209 
1210 	/* someone raced setting up buffers, dump ours */
1211 	for (i = 0; i < nbufs; i++)
1212 		io_buffer_unmap(ctx, &user_bufs[i]);
1213 	io_rsrc_data_free(data);
1214 	kfree(user_bufs);
1215 	return -EBUSY;
1216 out_free_data:
1217 	io_rsrc_data_free(data);
1218 out_unlock:
1219 	mutex_unlock(&src_ctx->uring_lock);
1220 	mutex_lock(&ctx->uring_lock);
1221 	return ret;
1222 }
1223 
1224 /*
1225  * Copy the registered buffers from the source ring whose file descriptor
1226  * is given in the src_fd to the current ring. This is identical to registering
1227  * the buffers with ctx, except faster as mappings already exist.
1228  *
1229  * Since the memory is already accounted once, don't account it again.
1230  */
io_register_clone_buffers(struct io_ring_ctx * ctx,void __user * arg)1231 int io_register_clone_buffers(struct io_ring_ctx *ctx, void __user *arg)
1232 {
1233 	struct io_uring_clone_buffers buf;
1234 	bool registered_src;
1235 	struct file *file;
1236 	int ret;
1237 
1238 	if (ctx->user_bufs || ctx->nr_user_bufs)
1239 		return -EBUSY;
1240 	if (copy_from_user(&buf, arg, sizeof(buf)))
1241 		return -EFAULT;
1242 	if (buf.flags & ~IORING_REGISTER_SRC_REGISTERED)
1243 		return -EINVAL;
1244 	if (memchr_inv(buf.pad, 0, sizeof(buf.pad)))
1245 		return -EINVAL;
1246 
1247 	registered_src = (buf.flags & IORING_REGISTER_SRC_REGISTERED) != 0;
1248 	file = io_uring_register_get_file(buf.src_fd, registered_src);
1249 	if (IS_ERR(file))
1250 		return PTR_ERR(file);
1251 	ret = io_clone_buffers(ctx, file->private_data);
1252 	if (!registered_src)
1253 		fput(file);
1254 	return ret;
1255 }
1256