1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Description: basic read/write tests with polled IO
4 */
5 #include <errno.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <fcntl.h>
11 #include <sys/types.h>
12 #include <poll.h>
13 #include <sys/eventfd.h>
14 #include <sys/resource.h>
15 #include "helpers.h"
16 #include "liburing.h"
17 #include "../src/syscall.h"
18
19 #define FILE_SIZE (128 * 1024)
20 #define BS 4096
21 #define BUFFERS (FILE_SIZE / BS)
22
23 static struct iovec *vecs;
24 static int no_buf_select;
25 static int no_iopoll;
26
provide_buffers(struct io_uring * ring)27 static int provide_buffers(struct io_uring *ring)
28 {
29 struct io_uring_sqe *sqe;
30 struct io_uring_cqe *cqe;
31 int ret, i;
32
33 for (i = 0; i < BUFFERS; i++) {
34 sqe = io_uring_get_sqe(ring);
35 io_uring_prep_provide_buffers(sqe, vecs[i].iov_base,
36 vecs[i].iov_len, 1, 1, i);
37 }
38
39 ret = io_uring_submit(ring);
40 if (ret != BUFFERS) {
41 fprintf(stderr, "submit: %d\n", ret);
42 return 1;
43 }
44
45 for (i = 0; i < BUFFERS; i++) {
46 ret = io_uring_wait_cqe(ring, &cqe);
47 if (cqe->res < 0) {
48 fprintf(stderr, "cqe->res=%d\n", cqe->res);
49 return 1;
50 }
51 io_uring_cqe_seen(ring, cqe);
52 }
53
54 return 0;
55 }
56
__test_io(const char * file,struct io_uring * ring,int write,int sqthread,int fixed,int buf_select)57 static int __test_io(const char *file, struct io_uring *ring, int write, int sqthread,
58 int fixed, int buf_select)
59 {
60 struct io_uring_sqe *sqe;
61 struct io_uring_cqe *cqe;
62 int open_flags;
63 int i, fd = -1, ret;
64 off_t offset;
65
66 if (buf_select) {
67 write = 0;
68 fixed = 0;
69 }
70 if (buf_select && provide_buffers(ring))
71 return 1;
72
73 if (write)
74 open_flags = O_WRONLY;
75 else
76 open_flags = O_RDONLY;
77 open_flags |= O_DIRECT;
78
79 if (fixed) {
80 ret = t_register_buffers(ring, vecs, BUFFERS);
81 if (ret == T_SETUP_SKIP)
82 return 0;
83 if (ret != T_SETUP_OK) {
84 fprintf(stderr, "buffer reg failed: %d\n", ret);
85 goto err;
86 }
87 }
88 fd = open(file, open_flags);
89 if (fd < 0) {
90 if (errno == EINVAL || errno == EPERM || errno == EACCES)
91 return 0;
92 perror("file open");
93 goto err;
94 }
95 if (sqthread) {
96 ret = io_uring_register_files(ring, &fd, 1);
97 if (ret) {
98 fprintf(stderr, "file reg failed: %d\n", ret);
99 goto err;
100 }
101 }
102
103 offset = 0;
104 for (i = 0; i < BUFFERS; i++) {
105 sqe = io_uring_get_sqe(ring);
106 if (!sqe) {
107 fprintf(stderr, "sqe get failed\n");
108 goto err;
109 }
110 offset = BS * (rand() % BUFFERS);
111 if (write) {
112 int do_fixed = fixed;
113 int use_fd = fd;
114
115 if (sqthread)
116 use_fd = 0;
117 if (fixed && (i & 1))
118 do_fixed = 0;
119 if (do_fixed) {
120 io_uring_prep_write_fixed(sqe, use_fd, vecs[i].iov_base,
121 vecs[i].iov_len,
122 offset, i);
123 } else {
124 io_uring_prep_writev(sqe, use_fd, &vecs[i], 1,
125 offset);
126 }
127 } else {
128 int do_fixed = fixed;
129 int use_fd = fd;
130
131 if (sqthread)
132 use_fd = 0;
133 if (fixed && (i & 1))
134 do_fixed = 0;
135 if (do_fixed) {
136 io_uring_prep_read_fixed(sqe, use_fd, vecs[i].iov_base,
137 vecs[i].iov_len,
138 offset, i);
139 } else {
140 io_uring_prep_readv(sqe, use_fd, &vecs[i], 1,
141 offset);
142 }
143
144 }
145 if (sqthread)
146 sqe->flags |= IOSQE_FIXED_FILE;
147 if (buf_select) {
148 sqe->flags |= IOSQE_BUFFER_SELECT;
149 sqe->buf_group = buf_select;
150 sqe->user_data = i;
151 }
152 }
153
154 ret = io_uring_submit(ring);
155 if (ret != BUFFERS) {
156 ret = io_uring_peek_cqe(ring, &cqe);
157 if (!ret && cqe->res == -EOPNOTSUPP) {
158 no_iopoll = 1;
159 io_uring_cqe_seen(ring, cqe);
160 goto out;
161 }
162 fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
163 goto err;
164 }
165
166 for (i = 0; i < BUFFERS; i++) {
167 ret = io_uring_wait_cqe(ring, &cqe);
168 if (ret) {
169 fprintf(stderr, "wait_cqe=%d\n", ret);
170 goto err;
171 } else if (cqe->res == -EOPNOTSUPP) {
172 fprintf(stdout, "File/device/fs doesn't support polled IO\n");
173 no_iopoll = 1;
174 goto out;
175 } else if (cqe->res != BS) {
176 fprintf(stderr, "cqe res %d, wanted %d\n", cqe->res, BS);
177 goto err;
178 }
179 io_uring_cqe_seen(ring, cqe);
180 }
181
182 if (fixed) {
183 ret = io_uring_unregister_buffers(ring);
184 if (ret) {
185 fprintf(stderr, "buffer unreg failed: %d\n", ret);
186 goto err;
187 }
188 }
189 if (sqthread) {
190 ret = io_uring_unregister_files(ring);
191 if (ret) {
192 fprintf(stderr, "file unreg failed: %d\n", ret);
193 goto err;
194 }
195 }
196
197 out:
198 close(fd);
199 return 0;
200 err:
201 if (fd != -1)
202 close(fd);
203 return 1;
204 }
205
sig_alrm(int sig)206 static void sig_alrm(int sig)
207 {
208 fprintf(stderr, "Ran out of time for peek test!\n");
209 exit(T_EXIT_FAIL);
210 }
211
212 /*
213 * if we are polling, io_uring_cqe_peek() always needs to enter the kernel
214 */
test_io_uring_cqe_peek(const char * file)215 static int test_io_uring_cqe_peek(const char *file)
216 {
217 struct io_uring_cqe *cqe;
218 struct io_uring ring;
219 struct sigaction act;
220 int fd, i, ret = T_EXIT_FAIL;
221
222 if (no_iopoll)
223 return 0;
224
225 ret = io_uring_queue_init(64, &ring, IORING_SETUP_IOPOLL);
226 if (ret) {
227 fprintf(stderr, "ring create failed: %d\n", ret);
228 return 1;
229 }
230
231 fd = open(file, O_RDONLY | O_DIRECT);
232 if (fd < 0) {
233 if (errno == EINVAL || errno == EPERM || errno == EACCES) {
234 io_uring_queue_exit(&ring);
235 return T_EXIT_SKIP;
236 }
237 perror("file open");
238 goto err;
239 }
240
241 for (i = 0; i < BUFFERS; i++) {
242 struct io_uring_sqe *sqe;
243 off_t offset = BS * (rand() % BUFFERS);
244
245 sqe = io_uring_get_sqe(&ring);
246 io_uring_prep_readv(sqe, fd, &vecs[i], 1, offset);
247 sqe->user_data = 1;
248 }
249
250 /*
251 * Set alarm for 5 seconds, we should be done way before that
252 */
253 memset(&act, 0, sizeof(act));
254 act.sa_handler = sig_alrm;
255 sigaction(SIGALRM, &act, NULL);
256 alarm(5);
257
258 ret = io_uring_submit(&ring);
259 if (ret != BUFFERS) {
260 fprintf(stderr, "submit=%d\n", ret);
261 goto err;
262 }
263
264 ret = T_EXIT_PASS;
265 i = 0;
266 do {
267 ret = io_uring_peek_cqe(&ring, &cqe);
268 if (ret)
269 continue;
270 io_uring_cqe_seen(&ring, cqe);
271 i++;
272 } while (i < BUFFERS);
273
274 err:
275 if (fd != -1)
276 close(fd);
277 io_uring_queue_exit(&ring);
278 return ret;
279 }
280
281 /*
282 * if we are polling io_uring_submit needs to always enter the
283 * kernel to fetch events
284 */
test_io_uring_submit_enters(const char * file)285 static int test_io_uring_submit_enters(const char *file)
286 {
287 struct io_uring ring;
288 int fd, i, ret, ring_flags, open_flags;
289 unsigned head;
290 struct io_uring_cqe *cqe;
291
292 if (no_iopoll)
293 return 0;
294
295 ring_flags = IORING_SETUP_IOPOLL;
296 ret = io_uring_queue_init(64, &ring, ring_flags);
297 if (ret) {
298 fprintf(stderr, "ring create failed: %d\n", ret);
299 return 1;
300 }
301
302 open_flags = O_WRONLY | O_DIRECT;
303 fd = open(file, open_flags);
304 if (fd < 0) {
305 if (errno == EINVAL || errno == EPERM || errno == EACCES)
306 return T_EXIT_SKIP;
307 perror("file open");
308 goto err;
309 }
310
311 for (i = 0; i < BUFFERS; i++) {
312 struct io_uring_sqe *sqe;
313 off_t offset = BS * (rand() % BUFFERS);
314
315 sqe = io_uring_get_sqe(&ring);
316 io_uring_prep_writev(sqe, fd, &vecs[i], 1, offset);
317 sqe->user_data = 1;
318 }
319
320 /* submit manually to avoid adding IORING_ENTER_GETEVENTS */
321 ret = __sys_io_uring_enter(ring.ring_fd, __io_uring_flush_sq(&ring), 0,
322 0, NULL);
323 if (ret < 0)
324 goto err;
325
326 for (i = 0; i < 500; i++) {
327 ret = io_uring_submit(&ring);
328 if (ret != 0) {
329 fprintf(stderr, "still had %d sqes to submit, this is unexpected", ret);
330 goto err;
331 }
332
333 io_uring_for_each_cqe(&ring, head, cqe) {
334 /* runs after test_io so should not have happened */
335 if (cqe->res == -EOPNOTSUPP) {
336 fprintf(stdout, "File/device/fs doesn't support polled IO\n");
337 goto err;
338 }
339 goto ok;
340 }
341 usleep(10000);
342 }
343 err:
344 ret = 1;
345 if (fd != -1)
346 close(fd);
347
348 ok:
349 io_uring_queue_exit(&ring);
350 return ret;
351 }
352
test_io(const char * file,int write,int sqthread,int fixed,int buf_select,int defer)353 static int test_io(const char *file, int write, int sqthread, int fixed,
354 int buf_select, int defer)
355 {
356 struct io_uring ring;
357 int ret, ring_flags = IORING_SETUP_IOPOLL;
358
359 if (no_iopoll)
360 return 0;
361
362 if (defer)
363 ring_flags |= IORING_SETUP_SINGLE_ISSUER |
364 IORING_SETUP_DEFER_TASKRUN;
365
366 ret = t_create_ring(64, &ring, ring_flags);
367 if (ret == T_SETUP_SKIP)
368 return 0;
369 if (ret != T_SETUP_OK) {
370 fprintf(stderr, "ring create failed: %d\n", ret);
371 return 1;
372 }
373 ret = __test_io(file, &ring, write, sqthread, fixed, buf_select);
374 io_uring_queue_exit(&ring);
375 return ret;
376 }
377
probe_buf_select(void)378 static int probe_buf_select(void)
379 {
380 struct io_uring_probe *p;
381 struct io_uring ring;
382 int ret;
383
384 ret = io_uring_queue_init(1, &ring, 0);
385 if (ret) {
386 fprintf(stderr, "ring create failed: %d\n", ret);
387 return 1;
388 }
389
390 p = io_uring_get_probe_ring(&ring);
391 if (!p || !io_uring_opcode_supported(p, IORING_OP_PROVIDE_BUFFERS)) {
392 no_buf_select = 1;
393 fprintf(stdout, "Buffer select not supported, skipping\n");
394 return 0;
395 }
396 io_uring_free_probe(p);
397 return 0;
398 }
399
main(int argc,char * argv[])400 int main(int argc, char *argv[])
401 {
402 int i, ret, nr;
403 char buf[256];
404 char *fname;
405
406 if (probe_buf_select())
407 return T_EXIT_FAIL;
408
409 if (argc > 1) {
410 fname = argv[1];
411 } else {
412 srand((unsigned)time(NULL));
413 snprintf(buf, sizeof(buf), ".basic-rw-%u-%u",
414 (unsigned)rand(), (unsigned)getpid());
415 fname = buf;
416 t_create_file(fname, FILE_SIZE);
417 }
418
419 vecs = t_create_buffers(BUFFERS, BS);
420
421 nr = 32;
422 if (no_buf_select)
423 nr = 8;
424 else if (!t_probe_defer_taskrun())
425 nr = 16;
426 for (i = 0; i < nr; i++) {
427 int write = (i & 1) != 0;
428 int sqthread = (i & 2) != 0;
429 int fixed = (i & 4) != 0;
430 int buf_select = (i & 8) != 0;
431 int defer = (i & 16) != 0;
432
433 ret = test_io(fname, write, sqthread, fixed, buf_select, defer);
434 if (ret) {
435 fprintf(stderr, "test_io failed %d/%d/%d/%d/%d\n",
436 write, sqthread, fixed, buf_select, defer);
437 goto err;
438 }
439 if (no_iopoll)
440 break;
441 }
442
443 ret = test_io_uring_submit_enters(fname);
444 if (ret == T_EXIT_FAIL) {
445 fprintf(stderr, "test_io_uring_submit_enters failed\n");
446 goto err;
447 }
448
449 /*
450 * Keep this last, it exits on failure
451 */
452 ret = test_io_uring_cqe_peek(fname);
453 if (ret == T_EXIT_FAIL) {
454 fprintf(stderr, "test_io_uring_cqe_peek failed\n");
455 goto err;
456 }
457
458 if (fname != argv[1])
459 unlink(fname);
460 return T_EXIT_PASS;
461 err:
462 if (fname != argv[1])
463 unlink(fname);
464 return T_EXIT_FAIL;
465 }
466