1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/namei.h>
9 #include <linux/poll.h>
10 #include <linux/vmalloc.h>
11 #include <linux/io_uring.h>
12
13 #include <uapi/linux/io_uring.h>
14
15 #include "io_uring.h"
16 #include "opdef.h"
17 #include "kbuf.h"
18 #include "memmap.h"
19
20 /* BIDs are addressed by a 16-bit field in a CQE */
21 #define MAX_BIDS_PER_BGID (1 << 16)
22
23 struct kmem_cache *io_buf_cachep;
24
25 struct io_provide_buf {
26 struct file *file;
27 __u64 addr;
28 __u32 len;
29 __u32 bgid;
30 __u32 nbufs;
31 __u16 bid;
32 };
33
io_buffer_get_list(struct io_ring_ctx * ctx,unsigned int bgid)34 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx,
35 unsigned int bgid)
36 {
37 lockdep_assert_held(&ctx->uring_lock);
38
39 return xa_load(&ctx->io_bl_xa, bgid);
40 }
41
io_buffer_add_list(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned int bgid)42 static int io_buffer_add_list(struct io_ring_ctx *ctx,
43 struct io_buffer_list *bl, unsigned int bgid)
44 {
45 /*
46 * Store buffer group ID and finally mark the list as visible.
47 * The normal lookup doesn't care about the visibility as we're
48 * always under the ->uring_lock, but the RCU lookup from mmap does.
49 */
50 bl->bgid = bgid;
51 atomic_set(&bl->refs, 1);
52 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
53 }
54
io_kbuf_recycle_legacy(struct io_kiocb * req,unsigned issue_flags)55 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
56 {
57 struct io_ring_ctx *ctx = req->ctx;
58 struct io_buffer_list *bl;
59 struct io_buffer *buf;
60
61 io_ring_submit_lock(ctx, issue_flags);
62
63 buf = req->kbuf;
64 bl = io_buffer_get_list(ctx, buf->bgid);
65 list_add(&buf->list, &bl->buf_list);
66 req->flags &= ~REQ_F_BUFFER_SELECTED;
67 req->buf_index = buf->bgid;
68
69 io_ring_submit_unlock(ctx, issue_flags);
70 return true;
71 }
72
__io_put_kbuf(struct io_kiocb * req,int len,unsigned issue_flags)73 void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags)
74 {
75 /*
76 * We can add this buffer back to two lists:
77 *
78 * 1) The io_buffers_cache list. This one is protected by the
79 * ctx->uring_lock. If we already hold this lock, add back to this
80 * list as we can grab it from issue as well.
81 * 2) The io_buffers_comp list. This one is protected by the
82 * ctx->completion_lock.
83 *
84 * We migrate buffers from the comp_list to the issue cache list
85 * when we need one.
86 */
87 if (issue_flags & IO_URING_F_UNLOCKED) {
88 struct io_ring_ctx *ctx = req->ctx;
89
90 spin_lock(&ctx->completion_lock);
91 __io_put_kbuf_list(req, len, &ctx->io_buffers_comp);
92 spin_unlock(&ctx->completion_lock);
93 } else {
94 lockdep_assert_held(&req->ctx->uring_lock);
95
96 __io_put_kbuf_list(req, len, &req->ctx->io_buffers_cache);
97 }
98 }
99
io_provided_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl)100 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len,
101 struct io_buffer_list *bl)
102 {
103 if (!list_empty(&bl->buf_list)) {
104 struct io_buffer *kbuf;
105
106 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
107 list_del(&kbuf->list);
108 if (*len == 0 || *len > kbuf->len)
109 *len = kbuf->len;
110 if (list_empty(&bl->buf_list))
111 req->flags |= REQ_F_BL_EMPTY;
112 req->flags |= REQ_F_BUFFER_SELECTED;
113 req->kbuf = kbuf;
114 req->buf_index = kbuf->bid;
115 return u64_to_user_ptr(kbuf->addr);
116 }
117 return NULL;
118 }
119
io_provided_buffers_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,struct iovec * iov)120 static int io_provided_buffers_select(struct io_kiocb *req, size_t *len,
121 struct io_buffer_list *bl,
122 struct iovec *iov)
123 {
124 void __user *buf;
125
126 buf = io_provided_buffer_select(req, len, bl);
127 if (unlikely(!buf))
128 return -ENOBUFS;
129
130 iov[0].iov_base = buf;
131 iov[0].iov_len = *len;
132 return 1;
133 }
134
io_ring_buffer_select(struct io_kiocb * req,size_t * len,struct io_buffer_list * bl,unsigned int issue_flags)135 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
136 struct io_buffer_list *bl,
137 unsigned int issue_flags)
138 {
139 struct io_uring_buf_ring *br = bl->buf_ring;
140 __u16 tail, head = bl->head;
141 struct io_uring_buf *buf;
142 void __user *ret;
143
144 tail = smp_load_acquire(&br->tail);
145 if (unlikely(tail == head))
146 return NULL;
147
148 if (head + 1 == tail)
149 req->flags |= REQ_F_BL_EMPTY;
150
151 buf = io_ring_head_to_buf(br, head, bl->mask);
152 if (*len == 0 || *len > buf->len)
153 *len = buf->len;
154 req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
155 req->buf_list = bl;
156 req->buf_index = buf->bid;
157 ret = u64_to_user_ptr(buf->addr);
158
159 if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
160 /*
161 * If we came in unlocked, we have no choice but to consume the
162 * buffer here, otherwise nothing ensures that the buffer won't
163 * get used by others. This does mean it'll be pinned until the
164 * IO completes, coming in unlocked means we're being called from
165 * io-wq context and there may be further retries in async hybrid
166 * mode. For the locked case, the caller must call commit when
167 * the transfer completes (or if we get -EAGAIN and must poll of
168 * retry).
169 */
170 io_kbuf_commit(req, bl, *len, 1);
171 req->buf_list = NULL;
172 }
173 return ret;
174 }
175
io_buffer_select(struct io_kiocb * req,size_t * len,unsigned int issue_flags)176 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
177 unsigned int issue_flags)
178 {
179 struct io_ring_ctx *ctx = req->ctx;
180 struct io_buffer_list *bl;
181 void __user *ret = NULL;
182
183 io_ring_submit_lock(req->ctx, issue_flags);
184
185 bl = io_buffer_get_list(ctx, req->buf_index);
186 if (likely(bl)) {
187 if (bl->flags & IOBL_BUF_RING)
188 ret = io_ring_buffer_select(req, len, bl, issue_flags);
189 else
190 ret = io_provided_buffer_select(req, len, bl);
191 }
192 io_ring_submit_unlock(req->ctx, issue_flags);
193 return ret;
194 }
195
196 /* cap it at a reasonable 256, will be one page even for 4K */
197 #define PEEK_MAX_IMPORT 256
198
io_ring_buffers_peek(struct io_kiocb * req,struct buf_sel_arg * arg,struct io_buffer_list * bl)199 static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
200 struct io_buffer_list *bl)
201 {
202 struct io_uring_buf_ring *br = bl->buf_ring;
203 struct iovec *iov = arg->iovs;
204 int nr_iovs = arg->nr_iovs;
205 __u16 nr_avail, tail, head;
206 struct io_uring_buf *buf;
207
208 tail = smp_load_acquire(&br->tail);
209 head = bl->head;
210 nr_avail = min_t(__u16, tail - head, UIO_MAXIOV);
211 if (unlikely(!nr_avail))
212 return -ENOBUFS;
213
214 buf = io_ring_head_to_buf(br, head, bl->mask);
215 if (arg->max_len) {
216 u32 len = READ_ONCE(buf->len);
217
218 if (unlikely(!len))
219 return -ENOBUFS;
220 /*
221 * Limit incremental buffers to 1 segment. No point trying
222 * to peek ahead and map more than we need, when the buffers
223 * themselves should be large when setup with
224 * IOU_PBUF_RING_INC.
225 */
226 if (bl->flags & IOBL_INC) {
227 nr_avail = 1;
228 } else {
229 size_t needed;
230
231 needed = (arg->max_len + len - 1) / len;
232 needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT);
233 if (nr_avail > needed)
234 nr_avail = needed;
235 }
236 }
237
238 /*
239 * only alloc a bigger array if we know we have data to map, eg not
240 * a speculative peek operation.
241 */
242 if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) {
243 iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL);
244 if (unlikely(!iov))
245 return -ENOMEM;
246 if (arg->mode & KBUF_MODE_FREE)
247 kfree(arg->iovs);
248 arg->iovs = iov;
249 nr_iovs = nr_avail;
250 } else if (nr_avail < nr_iovs) {
251 nr_iovs = nr_avail;
252 }
253
254 /* set it to max, if not set, so we can use it unconditionally */
255 if (!arg->max_len)
256 arg->max_len = INT_MAX;
257
258 req->buf_index = buf->bid;
259 do {
260 u32 len = buf->len;
261
262 /* truncate end piece, if needed, for non partial buffers */
263 if (len > arg->max_len) {
264 len = arg->max_len;
265 if (!(bl->flags & IOBL_INC)) {
266 arg->partial_map = 1;
267 if (iov != arg->iovs)
268 break;
269 buf->len = len;
270 }
271 }
272
273 iov->iov_base = u64_to_user_ptr(buf->addr);
274 iov->iov_len = len;
275 iov++;
276
277 arg->out_len += len;
278 arg->max_len -= len;
279 if (!arg->max_len)
280 break;
281
282 buf = io_ring_head_to_buf(br, ++head, bl->mask);
283 } while (--nr_iovs);
284
285 if (head == tail)
286 req->flags |= REQ_F_BL_EMPTY;
287
288 req->flags |= REQ_F_BUFFER_RING;
289 req->buf_list = bl;
290 return iov - arg->iovs;
291 }
292
io_buffers_select(struct io_kiocb * req,struct buf_sel_arg * arg,unsigned int issue_flags)293 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
294 unsigned int issue_flags)
295 {
296 struct io_ring_ctx *ctx = req->ctx;
297 struct io_buffer_list *bl;
298 int ret = -ENOENT;
299
300 io_ring_submit_lock(ctx, issue_flags);
301 bl = io_buffer_get_list(ctx, req->buf_index);
302 if (unlikely(!bl))
303 goto out_unlock;
304
305 if (bl->flags & IOBL_BUF_RING) {
306 ret = io_ring_buffers_peek(req, arg, bl);
307 /*
308 * Don't recycle these buffers if we need to go through poll.
309 * Nobody else can use them anyway, and holding on to provided
310 * buffers for a send/write operation would happen on the app
311 * side anyway with normal buffers. Besides, we already
312 * committed them, they cannot be put back in the queue.
313 */
314 if (ret > 0) {
315 req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE;
316 io_kbuf_commit(req, bl, arg->out_len, ret);
317 }
318 } else {
319 ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs);
320 }
321 out_unlock:
322 io_ring_submit_unlock(ctx, issue_flags);
323 return ret;
324 }
325
io_buffers_peek(struct io_kiocb * req,struct buf_sel_arg * arg)326 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
327 {
328 struct io_ring_ctx *ctx = req->ctx;
329 struct io_buffer_list *bl;
330 int ret;
331
332 lockdep_assert_held(&ctx->uring_lock);
333
334 bl = io_buffer_get_list(ctx, req->buf_index);
335 if (unlikely(!bl))
336 return -ENOENT;
337
338 if (bl->flags & IOBL_BUF_RING) {
339 ret = io_ring_buffers_peek(req, arg, bl);
340 if (ret > 0)
341 req->flags |= REQ_F_BUFFERS_COMMIT;
342 return ret;
343 }
344
345 /* don't support multiple buffer selections for legacy */
346 return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
347 }
348
__io_remove_buffers(struct io_ring_ctx * ctx,struct io_buffer_list * bl,unsigned nbufs)349 static int __io_remove_buffers(struct io_ring_ctx *ctx,
350 struct io_buffer_list *bl, unsigned nbufs)
351 {
352 unsigned i = 0;
353
354 /* shouldn't happen */
355 if (!nbufs)
356 return 0;
357
358 if (bl->flags & IOBL_BUF_RING) {
359 i = bl->buf_ring->tail - bl->head;
360 if (bl->buf_nr_pages) {
361 int j;
362
363 if (!(bl->flags & IOBL_MMAP)) {
364 for (j = 0; j < bl->buf_nr_pages; j++)
365 unpin_user_page(bl->buf_pages[j]);
366 }
367 io_pages_unmap(bl->buf_ring, &bl->buf_pages,
368 &bl->buf_nr_pages, bl->flags & IOBL_MMAP);
369 bl->flags &= ~IOBL_MMAP;
370 }
371 /* make sure it's seen as empty */
372 INIT_LIST_HEAD(&bl->buf_list);
373 bl->flags &= ~IOBL_BUF_RING;
374 return i;
375 }
376
377 /* protects io_buffers_cache */
378 lockdep_assert_held(&ctx->uring_lock);
379
380 while (!list_empty(&bl->buf_list)) {
381 struct io_buffer *nxt;
382
383 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
384 list_move(&nxt->list, &ctx->io_buffers_cache);
385 if (++i == nbufs)
386 return i;
387 cond_resched();
388 }
389
390 return i;
391 }
392
io_put_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)393 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
394 {
395 if (atomic_dec_and_test(&bl->refs)) {
396 __io_remove_buffers(ctx, bl, -1U);
397 kfree_rcu(bl, rcu);
398 }
399 }
400
io_destroy_buffers(struct io_ring_ctx * ctx)401 void io_destroy_buffers(struct io_ring_ctx *ctx)
402 {
403 struct io_buffer_list *bl;
404 struct list_head *item, *tmp;
405 struct io_buffer *buf;
406 unsigned long index;
407
408 xa_for_each(&ctx->io_bl_xa, index, bl) {
409 xa_erase(&ctx->io_bl_xa, bl->bgid);
410 io_put_bl(ctx, bl);
411 }
412
413 /*
414 * Move deferred locked entries to cache before pruning
415 */
416 spin_lock(&ctx->completion_lock);
417 if (!list_empty(&ctx->io_buffers_comp))
418 list_splice_init(&ctx->io_buffers_comp, &ctx->io_buffers_cache);
419 spin_unlock(&ctx->completion_lock);
420
421 list_for_each_safe(item, tmp, &ctx->io_buffers_cache) {
422 buf = list_entry(item, struct io_buffer, list);
423 kmem_cache_free(io_buf_cachep, buf);
424 }
425 }
426
io_destroy_bl(struct io_ring_ctx * ctx,struct io_buffer_list * bl)427 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
428 {
429 xa_erase(&ctx->io_bl_xa, bl->bgid);
430 io_put_bl(ctx, bl);
431 }
432
io_remove_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)433 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
434 {
435 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
436 u64 tmp;
437
438 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
439 sqe->splice_fd_in)
440 return -EINVAL;
441
442 tmp = READ_ONCE(sqe->fd);
443 if (!tmp || tmp > MAX_BIDS_PER_BGID)
444 return -EINVAL;
445
446 memset(p, 0, sizeof(*p));
447 p->nbufs = tmp;
448 p->bgid = READ_ONCE(sqe->buf_group);
449 return 0;
450 }
451
io_remove_buffers(struct io_kiocb * req,unsigned int issue_flags)452 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
453 {
454 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
455 struct io_ring_ctx *ctx = req->ctx;
456 struct io_buffer_list *bl;
457 int ret = 0;
458
459 io_ring_submit_lock(ctx, issue_flags);
460
461 ret = -ENOENT;
462 bl = io_buffer_get_list(ctx, p->bgid);
463 if (bl) {
464 ret = -EINVAL;
465 /* can't use provide/remove buffers command on mapped buffers */
466 if (!(bl->flags & IOBL_BUF_RING))
467 ret = __io_remove_buffers(ctx, bl, p->nbufs);
468 }
469 io_ring_submit_unlock(ctx, issue_flags);
470 if (ret < 0)
471 req_set_fail(req);
472 io_req_set_res(req, ret, 0);
473 return IOU_OK;
474 }
475
io_provide_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)476 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
477 {
478 unsigned long size, tmp_check;
479 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
480 u64 tmp;
481
482 if (sqe->rw_flags || sqe->splice_fd_in)
483 return -EINVAL;
484
485 tmp = READ_ONCE(sqe->fd);
486 if (!tmp || tmp > MAX_BIDS_PER_BGID)
487 return -E2BIG;
488 p->nbufs = tmp;
489 p->addr = READ_ONCE(sqe->addr);
490 p->len = READ_ONCE(sqe->len);
491 if (!p->len)
492 return -EINVAL;
493
494 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
495 &size))
496 return -EOVERFLOW;
497 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
498 return -EOVERFLOW;
499
500 size = (unsigned long)p->len * p->nbufs;
501 if (!access_ok(u64_to_user_ptr(p->addr), size))
502 return -EFAULT;
503
504 p->bgid = READ_ONCE(sqe->buf_group);
505 tmp = READ_ONCE(sqe->off);
506 if (tmp > USHRT_MAX)
507 return -E2BIG;
508 if (tmp + p->nbufs > MAX_BIDS_PER_BGID)
509 return -EINVAL;
510 p->bid = tmp;
511 return 0;
512 }
513
514 #define IO_BUFFER_ALLOC_BATCH 64
515
io_refill_buffer_cache(struct io_ring_ctx * ctx)516 static int io_refill_buffer_cache(struct io_ring_ctx *ctx)
517 {
518 struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH];
519 int allocated;
520
521 /*
522 * Completions that don't happen inline (eg not under uring_lock) will
523 * add to ->io_buffers_comp. If we don't have any free buffers, check
524 * the completion list and splice those entries first.
525 */
526 if (!list_empty_careful(&ctx->io_buffers_comp)) {
527 spin_lock(&ctx->completion_lock);
528 if (!list_empty(&ctx->io_buffers_comp)) {
529 list_splice_init(&ctx->io_buffers_comp,
530 &ctx->io_buffers_cache);
531 spin_unlock(&ctx->completion_lock);
532 return 0;
533 }
534 spin_unlock(&ctx->completion_lock);
535 }
536
537 /*
538 * No free buffers and no completion entries either. Allocate a new
539 * batch of buffer entries and add those to our freelist.
540 */
541
542 allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT,
543 ARRAY_SIZE(bufs), (void **) bufs);
544 if (unlikely(!allocated)) {
545 /*
546 * Bulk alloc is all-or-nothing. If we fail to get a batch,
547 * retry single alloc to be on the safe side.
548 */
549 bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL);
550 if (!bufs[0])
551 return -ENOMEM;
552 allocated = 1;
553 }
554
555 while (allocated)
556 list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache);
557
558 return 0;
559 }
560
io_add_buffers(struct io_ring_ctx * ctx,struct io_provide_buf * pbuf,struct io_buffer_list * bl)561 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf,
562 struct io_buffer_list *bl)
563 {
564 struct io_buffer *buf;
565 u64 addr = pbuf->addr;
566 int i, bid = pbuf->bid;
567
568 for (i = 0; i < pbuf->nbufs; i++) {
569 if (list_empty(&ctx->io_buffers_cache) &&
570 io_refill_buffer_cache(ctx))
571 break;
572 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer,
573 list);
574 list_move_tail(&buf->list, &bl->buf_list);
575 buf->addr = addr;
576 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
577 buf->bid = bid;
578 buf->bgid = pbuf->bgid;
579 addr += pbuf->len;
580 bid++;
581 cond_resched();
582 }
583
584 return i ? 0 : -ENOMEM;
585 }
586
io_provide_buffers(struct io_kiocb * req,unsigned int issue_flags)587 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags)
588 {
589 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf);
590 struct io_ring_ctx *ctx = req->ctx;
591 struct io_buffer_list *bl;
592 int ret = 0;
593
594 io_ring_submit_lock(ctx, issue_flags);
595
596 bl = io_buffer_get_list(ctx, p->bgid);
597 if (unlikely(!bl)) {
598 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
599 if (!bl) {
600 ret = -ENOMEM;
601 goto err;
602 }
603 INIT_LIST_HEAD(&bl->buf_list);
604 ret = io_buffer_add_list(ctx, bl, p->bgid);
605 if (ret) {
606 /*
607 * Doesn't need rcu free as it was never visible, but
608 * let's keep it consistent throughout.
609 */
610 kfree_rcu(bl, rcu);
611 goto err;
612 }
613 }
614 /* can't add buffers via this command for a mapped buffer ring */
615 if (bl->flags & IOBL_BUF_RING) {
616 ret = -EINVAL;
617 goto err;
618 }
619
620 ret = io_add_buffers(ctx, p, bl);
621 err:
622 io_ring_submit_unlock(ctx, issue_flags);
623
624 if (ret < 0)
625 req_set_fail(req);
626 io_req_set_res(req, ret, 0);
627 return IOU_OK;
628 }
629
io_pin_pbuf_ring(struct io_uring_buf_reg * reg,struct io_buffer_list * bl)630 static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg,
631 struct io_buffer_list *bl)
632 {
633 struct io_uring_buf_ring *br = NULL;
634 struct page **pages;
635 int nr_pages, ret;
636
637 pages = io_pin_pages(reg->ring_addr,
638 flex_array_size(br, bufs, reg->ring_entries),
639 &nr_pages);
640 if (IS_ERR(pages))
641 return PTR_ERR(pages);
642
643 br = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
644 if (!br) {
645 ret = -ENOMEM;
646 goto error_unpin;
647 }
648
649 #ifdef SHM_COLOUR
650 /*
651 * On platforms that have specific aliasing requirements, SHM_COLOUR
652 * is set and we must guarantee that the kernel and user side align
653 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and
654 * the application mmap's the provided ring buffer. Fail the request
655 * if we, by chance, don't end up with aligned addresses. The app
656 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle
657 * this transparently.
658 */
659 if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1)) {
660 ret = -EINVAL;
661 goto error_unpin;
662 }
663 #endif
664 bl->buf_pages = pages;
665 bl->buf_nr_pages = nr_pages;
666 bl->buf_ring = br;
667 bl->flags |= IOBL_BUF_RING;
668 bl->flags &= ~IOBL_MMAP;
669 return 0;
670 error_unpin:
671 unpin_user_pages(pages, nr_pages);
672 kvfree(pages);
673 vunmap(br);
674 return ret;
675 }
676
io_alloc_pbuf_ring(struct io_ring_ctx * ctx,struct io_uring_buf_reg * reg,struct io_buffer_list * bl)677 static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
678 struct io_uring_buf_reg *reg,
679 struct io_buffer_list *bl)
680 {
681 size_t ring_size;
682
683 ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring);
684
685 bl->buf_ring = io_pages_map(&bl->buf_pages, &bl->buf_nr_pages, ring_size);
686 if (IS_ERR(bl->buf_ring)) {
687 bl->buf_ring = NULL;
688 return -ENOMEM;
689 }
690
691 bl->flags |= (IOBL_BUF_RING | IOBL_MMAP);
692 return 0;
693 }
694
io_register_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)695 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
696 {
697 struct io_uring_buf_reg reg;
698 struct io_buffer_list *bl, *free_bl = NULL;
699 int ret;
700
701 lockdep_assert_held(&ctx->uring_lock);
702
703 if (copy_from_user(®, arg, sizeof(reg)))
704 return -EFAULT;
705
706 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
707 return -EINVAL;
708 if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC))
709 return -EINVAL;
710 if (!(reg.flags & IOU_PBUF_RING_MMAP)) {
711 if (!reg.ring_addr)
712 return -EFAULT;
713 if (reg.ring_addr & ~PAGE_MASK)
714 return -EINVAL;
715 } else {
716 if (reg.ring_addr)
717 return -EINVAL;
718 }
719
720 if (!is_power_of_2(reg.ring_entries))
721 return -EINVAL;
722
723 /* cannot disambiguate full vs empty due to head/tail size */
724 if (reg.ring_entries >= 65536)
725 return -EINVAL;
726
727 bl = io_buffer_get_list(ctx, reg.bgid);
728 if (bl) {
729 /* if mapped buffer ring OR classic exists, don't allow */
730 if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
731 return -EEXIST;
732 io_destroy_bl(ctx, bl);
733 }
734
735 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
736 if (!bl)
737 return -ENOMEM;
738
739 if (!(reg.flags & IOU_PBUF_RING_MMAP))
740 ret = io_pin_pbuf_ring(®, bl);
741 else
742 ret = io_alloc_pbuf_ring(ctx, ®, bl);
743
744 if (!ret) {
745 bl->nr_entries = reg.ring_entries;
746 bl->mask = reg.ring_entries - 1;
747 if (reg.flags & IOU_PBUF_RING_INC)
748 bl->flags |= IOBL_INC;
749
750 io_buffer_add_list(ctx, bl, reg.bgid);
751 return 0;
752 }
753
754 kfree_rcu(free_bl, rcu);
755 return ret;
756 }
757
io_unregister_pbuf_ring(struct io_ring_ctx * ctx,void __user * arg)758 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
759 {
760 struct io_uring_buf_reg reg;
761 struct io_buffer_list *bl;
762
763 lockdep_assert_held(&ctx->uring_lock);
764
765 if (copy_from_user(®, arg, sizeof(reg)))
766 return -EFAULT;
767 if (reg.resv[0] || reg.resv[1] || reg.resv[2])
768 return -EINVAL;
769 if (reg.flags)
770 return -EINVAL;
771
772 bl = io_buffer_get_list(ctx, reg.bgid);
773 if (!bl)
774 return -ENOENT;
775 if (!(bl->flags & IOBL_BUF_RING))
776 return -EINVAL;
777
778 xa_erase(&ctx->io_bl_xa, bl->bgid);
779 io_put_bl(ctx, bl);
780 return 0;
781 }
782
io_register_pbuf_status(struct io_ring_ctx * ctx,void __user * arg)783 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg)
784 {
785 struct io_uring_buf_status buf_status;
786 struct io_buffer_list *bl;
787 int i;
788
789 if (copy_from_user(&buf_status, arg, sizeof(buf_status)))
790 return -EFAULT;
791
792 for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++)
793 if (buf_status.resv[i])
794 return -EINVAL;
795
796 bl = io_buffer_get_list(ctx, buf_status.buf_group);
797 if (!bl)
798 return -ENOENT;
799 if (!(bl->flags & IOBL_BUF_RING))
800 return -EINVAL;
801
802 buf_status.head = bl->head;
803 if (copy_to_user(arg, &buf_status, sizeof(buf_status)))
804 return -EFAULT;
805
806 return 0;
807 }
808
io_pbuf_get_bl(struct io_ring_ctx * ctx,unsigned long bgid)809 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
810 unsigned long bgid)
811 {
812 struct io_buffer_list *bl;
813 bool ret;
814
815 /*
816 * We have to be a bit careful here - we're inside mmap and cannot grab
817 * the uring_lock. This means the buffer_list could be simultaneously
818 * going away, if someone is trying to be sneaky. Look it up under rcu
819 * so we know it's not going away, and attempt to grab a reference to
820 * it. If the ref is already zero, then fail the mapping. If successful,
821 * the caller will call io_put_bl() to drop the the reference at at the
822 * end. This may then safely free the buffer_list (and drop the pages)
823 * at that point, vm_insert_pages() would've already grabbed the
824 * necessary vma references.
825 */
826 rcu_read_lock();
827 bl = xa_load(&ctx->io_bl_xa, bgid);
828 /* must be a mmap'able buffer ring and have pages */
829 ret = false;
830 if (bl && bl->flags & IOBL_MMAP)
831 ret = atomic_inc_not_zero(&bl->refs);
832 rcu_read_unlock();
833
834 if (ret)
835 return bl;
836
837 return ERR_PTR(-EINVAL);
838 }
839
io_pbuf_mmap(struct file * file,struct vm_area_struct * vma)840 int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma)
841 {
842 struct io_ring_ctx *ctx = file->private_data;
843 loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT;
844 struct io_buffer_list *bl;
845 int bgid, ret;
846
847 bgid = (pgoff & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT;
848 bl = io_pbuf_get_bl(ctx, bgid);
849 if (IS_ERR(bl))
850 return PTR_ERR(bl);
851
852 ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages);
853 io_put_bl(ctx, bl);
854 return ret;
855 }
856