1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_KBUF_H
3 #define IOU_KBUF_H
4
5 #include <uapi/linux/io_uring.h>
6
7 enum {
8 /* ring mapped provided buffers */
9 IOBL_BUF_RING = 1,
10 /* ring mapped provided buffers, but mmap'ed by application */
11 IOBL_MMAP = 2,
12 /* buffers are consumed incrementally rather than always fully */
13 IOBL_INC = 4,
14
15 };
16
17 struct io_buffer_list {
18 /*
19 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
20 * then these are classic provided buffers and ->buf_list is used.
21 */
22 union {
23 struct list_head buf_list;
24 struct {
25 struct page **buf_pages;
26 struct io_uring_buf_ring *buf_ring;
27 };
28 struct rcu_head rcu;
29 };
30 __u16 bgid;
31
32 /* below is for ring provided buffers */
33 __u16 buf_nr_pages;
34 __u16 nr_entries;
35 __u16 head;
36 __u16 mask;
37
38 __u16 flags;
39
40 atomic_t refs;
41 };
42
43 struct io_buffer {
44 struct list_head list;
45 __u64 addr;
46 __u32 len;
47 __u16 bid;
48 __u16 bgid;
49 };
50
51 enum {
52 /* can alloc a bigger vec */
53 KBUF_MODE_EXPAND = 1,
54 /* if bigger vec allocated, free old one */
55 KBUF_MODE_FREE = 2,
56 };
57
58 struct buf_sel_arg {
59 struct iovec *iovs;
60 size_t out_len;
61 size_t max_len;
62 unsigned short nr_iovs;
63 unsigned short mode;
64 unsigned short partial_map;
65 };
66
67 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
68 unsigned int issue_flags);
69 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
70 unsigned int issue_flags);
71 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg);
72 void io_destroy_buffers(struct io_ring_ctx *ctx);
73
74 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
75 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
76
77 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
78 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
79
80 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
81 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
82 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
83
84 void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags);
85
86 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
87
88 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
89 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
90 unsigned long bgid);
91 int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma);
92
io_kbuf_recycle_ring(struct io_kiocb * req)93 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
94 {
95 /*
96 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
97 * the flag and hence ensure that bl->head doesn't get incremented.
98 * If the tail has already been incremented, hang on to it.
99 * The exception is partial io, that case we should increment bl->head
100 * to monopolize the buffer.
101 */
102 if (req->buf_list) {
103 req->buf_index = req->buf_list->bgid;
104 req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
105 return true;
106 }
107 return false;
108 }
109
io_do_buffer_select(struct io_kiocb * req)110 static inline bool io_do_buffer_select(struct io_kiocb *req)
111 {
112 if (!(req->flags & REQ_F_BUFFER_SELECT))
113 return false;
114 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
115 }
116
io_kbuf_recycle(struct io_kiocb * req,unsigned issue_flags)117 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
118 {
119 if (req->flags & REQ_F_BL_NO_RECYCLE)
120 return false;
121 if (req->flags & REQ_F_BUFFER_SELECTED)
122 return io_kbuf_recycle_legacy(req, issue_flags);
123 if (req->flags & REQ_F_BUFFER_RING)
124 return io_kbuf_recycle_ring(req);
125 return false;
126 }
127
128 /* Mapped buffer ring, return io_uring_buf from head */
129 #define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)]
130
io_kbuf_commit(struct io_kiocb * req,struct io_buffer_list * bl,int len,int nr)131 static inline bool io_kbuf_commit(struct io_kiocb *req,
132 struct io_buffer_list *bl, int len, int nr)
133 {
134 if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
135 return true;
136
137 req->flags &= ~REQ_F_BUFFERS_COMMIT;
138
139 if (unlikely(len < 0))
140 return true;
141
142 if (bl->flags & IOBL_INC) {
143 struct io_uring_buf *buf;
144
145 buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
146 if (len > buf->len)
147 len = buf->len;
148 buf->len -= len;
149 if (buf->len) {
150 buf->addr += len;
151 return false;
152 }
153 }
154
155 bl->head += nr;
156 return true;
157 }
158
__io_put_kbuf_ring(struct io_kiocb * req,int len,int nr)159 static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
160 {
161 struct io_buffer_list *bl = req->buf_list;
162 bool ret = true;
163
164 if (bl) {
165 ret = io_kbuf_commit(req, bl, len, nr);
166 req->buf_index = bl->bgid;
167 }
168 req->flags &= ~REQ_F_BUFFER_RING;
169 return ret;
170 }
171
__io_put_kbuf_list(struct io_kiocb * req,int len,struct list_head * list)172 static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
173 struct list_head *list)
174 {
175 if (req->flags & REQ_F_BUFFER_RING) {
176 __io_put_kbuf_ring(req, len, 1);
177 } else {
178 req->buf_index = req->kbuf->bgid;
179 list_add(&req->kbuf->list, list);
180 req->flags &= ~REQ_F_BUFFER_SELECTED;
181 }
182 }
183
io_kbuf_drop(struct io_kiocb * req)184 static inline void io_kbuf_drop(struct io_kiocb *req)
185 {
186 lockdep_assert_held(&req->ctx->completion_lock);
187
188 if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
189 return;
190
191 /* len == 0 is fine here, non-ring will always drop all of it */
192 __io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
193 }
194
__io_put_kbufs(struct io_kiocb * req,int len,int nbufs,unsigned issue_flags)195 static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,
196 int nbufs, unsigned issue_flags)
197 {
198 unsigned int ret;
199
200 if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
201 return 0;
202
203 ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
204 if (req->flags & REQ_F_BUFFER_RING) {
205 if (!__io_put_kbuf_ring(req, len, nbufs))
206 ret |= IORING_CQE_F_BUF_MORE;
207 } else {
208 __io_put_kbuf(req, len, issue_flags);
209 }
210 return ret;
211 }
212
io_put_kbuf(struct io_kiocb * req,int len,unsigned issue_flags)213 static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
214 unsigned issue_flags)
215 {
216 return __io_put_kbufs(req, len, 1, issue_flags);
217 }
218
io_put_kbufs(struct io_kiocb * req,int len,int nbufs,unsigned issue_flags)219 static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
220 int nbufs, unsigned issue_flags)
221 {
222 return __io_put_kbufs(req, len, nbufs, issue_flags);
223 }
224 #endif
225