1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Description: basic read/write tests with buffered, O_DIRECT, and SQPOLL
4 */
5 #include <errno.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <fcntl.h>
11 #include <sys/types.h>
12 #include <poll.h>
13 #include <sys/eventfd.h>
14 #include <sys/resource.h>
15
16 #include "helpers.h"
17 #include "liburing.h"
18
19 #define FILE_SIZE (256 * 1024)
20 #define BS 8192
21 #define BUFFERS (FILE_SIZE / BS)
22
23 static struct iovec *vecs;
24 static int no_read;
25 static int no_buf_select;
26 static int warned;
27
create_nonaligned_buffers(void)28 static int create_nonaligned_buffers(void)
29 {
30 int i;
31
32 vecs = t_malloc(BUFFERS * sizeof(struct iovec));
33 for (i = 0; i < BUFFERS; i++) {
34 char *p = t_malloc(3 * BS);
35
36 if (!p)
37 return 1;
38 vecs[i].iov_base = p + (rand() % BS);
39 vecs[i].iov_len = 1 + (rand() % BS);
40 }
41
42 return 0;
43 }
44
__test_io(const char * file,struct io_uring * ring,int write,int buffered,int sqthread,int fixed,int nonvec,int buf_select,int seq,int exp_len)45 static int __test_io(const char *file, struct io_uring *ring, int write,
46 int buffered, int sqthread, int fixed, int nonvec,
47 int buf_select, int seq, int exp_len)
48 {
49 struct io_uring_sqe *sqe;
50 struct io_uring_cqe *cqe;
51 int open_flags;
52 int i, fd = -1, ret;
53 off_t offset;
54
55 #ifdef VERBOSE
56 fprintf(stdout, "%s: start %d/%d/%d/%d/%d: ", __FUNCTION__, write,
57 buffered, sqthread,
58 fixed, nonvec);
59 #endif
60 if (write)
61 open_flags = O_WRONLY;
62 else
63 open_flags = O_RDONLY;
64 if (!buffered)
65 open_flags |= O_DIRECT;
66
67 if (fixed) {
68 ret = t_register_buffers(ring, vecs, BUFFERS);
69 if (ret == T_SETUP_SKIP)
70 return 0;
71 if (ret != T_SETUP_OK) {
72 fprintf(stderr, "buffer reg failed: %d\n", ret);
73 goto err;
74 }
75 }
76
77 fd = open(file, open_flags);
78 if (fd < 0) {
79 if (errno == EINVAL)
80 return 0;
81 perror("file open");
82 goto err;
83 }
84
85 if (sqthread) {
86 ret = io_uring_register_files(ring, &fd, 1);
87 if (ret) {
88 fprintf(stderr, "file reg failed: %d\n", ret);
89 goto err;
90 }
91 }
92
93 offset = 0;
94 for (i = 0; i < BUFFERS; i++) {
95 sqe = io_uring_get_sqe(ring);
96 if (!sqe) {
97 fprintf(stderr, "sqe get failed\n");
98 goto err;
99 }
100 if (!seq)
101 offset = BS * (rand() % BUFFERS);
102 if (write) {
103 int do_fixed = fixed;
104 int use_fd = fd;
105
106 if (sqthread)
107 use_fd = 0;
108 if (fixed && (i & 1))
109 do_fixed = 0;
110 if (do_fixed) {
111 io_uring_prep_write_fixed(sqe, use_fd, vecs[i].iov_base,
112 vecs[i].iov_len,
113 offset, i);
114 } else if (nonvec) {
115 io_uring_prep_write(sqe, use_fd, vecs[i].iov_base,
116 vecs[i].iov_len, offset);
117 } else {
118 io_uring_prep_writev(sqe, use_fd, &vecs[i], 1,
119 offset);
120 }
121 } else {
122 int do_fixed = fixed;
123 int use_fd = fd;
124
125 if (sqthread)
126 use_fd = 0;
127 if (fixed && (i & 1))
128 do_fixed = 0;
129 if (do_fixed) {
130 io_uring_prep_read_fixed(sqe, use_fd, vecs[i].iov_base,
131 vecs[i].iov_len,
132 offset, i);
133 } else if (nonvec) {
134 io_uring_prep_read(sqe, use_fd, vecs[i].iov_base,
135 vecs[i].iov_len, offset);
136 } else {
137 io_uring_prep_readv(sqe, use_fd, &vecs[i], 1,
138 offset);
139 }
140
141 }
142 sqe->user_data = i;
143 if (sqthread)
144 sqe->flags |= IOSQE_FIXED_FILE;
145 if (buf_select) {
146 if (nonvec)
147 sqe->addr = 0;
148 sqe->flags |= IOSQE_BUFFER_SELECT;
149 sqe->buf_group = buf_select;
150 }
151 if (seq)
152 offset += BS;
153 }
154
155 ret = io_uring_submit(ring);
156 if (ret != BUFFERS) {
157 fprintf(stderr, "submit got %d, wanted %d\n", ret, BUFFERS);
158 goto err;
159 }
160
161 for (i = 0; i < BUFFERS; i++) {
162 ret = io_uring_wait_cqe(ring, &cqe);
163 if (ret) {
164 fprintf(stderr, "wait_cqe=%d\n", ret);
165 goto err;
166 }
167 if (cqe->res == -EINVAL && nonvec) {
168 if (!warned) {
169 fprintf(stdout, "Non-vectored IO not "
170 "supported, skipping\n");
171 warned = 1;
172 no_read = 1;
173 }
174 } else if (exp_len == -1) {
175 int iov_len = vecs[cqe->user_data].iov_len;
176
177 if (cqe->res != iov_len) {
178 fprintf(stderr, "cqe res %d, wanted %d\n",
179 cqe->res, iov_len);
180 goto err;
181 }
182 } else if (cqe->res != exp_len) {
183 fprintf(stderr, "cqe res %d, wanted %d\n", cqe->res, exp_len);
184 goto err;
185 }
186 if (buf_select && exp_len == BS) {
187 int bid = cqe->flags >> 16;
188 unsigned char *ptr = vecs[bid].iov_base;
189 int j;
190
191 for (j = 0; j < BS; j++) {
192 if (ptr[j] == cqe->user_data)
193 continue;
194
195 fprintf(stderr, "Data mismatch! bid=%d, "
196 "wanted=%d, got=%d\n", bid,
197 (int)cqe->user_data, ptr[j]);
198 return 1;
199 }
200 }
201 io_uring_cqe_seen(ring, cqe);
202 }
203
204 if (fixed) {
205 ret = io_uring_unregister_buffers(ring);
206 if (ret) {
207 fprintf(stderr, "buffer unreg failed: %d\n", ret);
208 goto err;
209 }
210 }
211 if (sqthread) {
212 ret = io_uring_unregister_files(ring);
213 if (ret) {
214 fprintf(stderr, "file unreg failed: %d\n", ret);
215 goto err;
216 }
217 }
218
219 close(fd);
220 #ifdef VERBOSE
221 fprintf(stdout, "PASS\n");
222 #endif
223 return 0;
224 err:
225 #ifdef VERBOSE
226 fprintf(stderr, "FAILED\n");
227 #endif
228 if (fd != -1)
229 close(fd);
230 return 1;
231 }
test_io(const char * file,int write,int buffered,int sqthread,int fixed,int nonvec,int exp_len)232 static int test_io(const char *file, int write, int buffered, int sqthread,
233 int fixed, int nonvec, int exp_len)
234 {
235 struct io_uring ring;
236 int ret, ring_flags = 0;
237
238 if (sqthread)
239 ring_flags = IORING_SETUP_SQPOLL;
240
241 ret = t_create_ring(64, &ring, ring_flags);
242 if (ret == T_SETUP_SKIP)
243 return 0;
244 if (ret != T_SETUP_OK) {
245 fprintf(stderr, "ring create failed: %d\n", ret);
246 return 1;
247 }
248
249 ret = __test_io(file, &ring, write, buffered, sqthread, fixed, nonvec,
250 0, 0, exp_len);
251 io_uring_queue_exit(&ring);
252 return ret;
253 }
254
read_poll_link(const char * file)255 static int read_poll_link(const char *file)
256 {
257 struct __kernel_timespec ts;
258 struct io_uring_sqe *sqe;
259 struct io_uring_cqe *cqe;
260 struct io_uring ring;
261 int i, fd, ret, fds[2];
262
263 ret = io_uring_queue_init(8, &ring, 0);
264 if (ret)
265 return ret;
266
267 fd = open(file, O_WRONLY);
268 if (fd < 0) {
269 perror("open");
270 return 1;
271 }
272
273 if (pipe(fds)) {
274 perror("pipe");
275 return 1;
276 }
277
278 sqe = io_uring_get_sqe(&ring);
279 io_uring_prep_writev(sqe, fd, &vecs[0], 1, 0);
280 sqe->flags |= IOSQE_IO_LINK;
281 sqe->user_data = 1;
282
283 sqe = io_uring_get_sqe(&ring);
284 io_uring_prep_poll_add(sqe, fds[0], POLLIN);
285 sqe->flags |= IOSQE_IO_LINK;
286 sqe->user_data = 2;
287
288 ts.tv_sec = 1;
289 ts.tv_nsec = 0;
290 sqe = io_uring_get_sqe(&ring);
291 io_uring_prep_link_timeout(sqe, &ts, 0);
292 sqe->user_data = 3;
293
294 ret = io_uring_submit(&ring);
295 if (ret != 3) {
296 fprintf(stderr, "submitted %d\n", ret);
297 return 1;
298 }
299
300 for (i = 0; i < 3; i++) {
301 ret = io_uring_wait_cqe(&ring, &cqe);
302 if (ret) {
303 fprintf(stderr, "wait_cqe=%d\n", ret);
304 return 1;
305 }
306 io_uring_cqe_seen(&ring, cqe);
307 }
308
309 return 0;
310 }
311
has_nonvec_read(void)312 static int has_nonvec_read(void)
313 {
314 struct io_uring_probe *p;
315 struct io_uring ring;
316 int ret;
317
318 ret = io_uring_queue_init(1, &ring, 0);
319 if (ret) {
320 fprintf(stderr, "queue init failed: %d\n", ret);
321 exit(ret);
322 }
323
324 p = t_calloc(1, sizeof(*p) + 256 * sizeof(struct io_uring_probe_op));
325 ret = io_uring_register_probe(&ring, p, 256);
326 /* if we don't have PROBE_REGISTER, we don't have OP_READ/WRITE */
327 if (ret == -EINVAL) {
328 out:
329 io_uring_queue_exit(&ring);
330 return 0;
331 } else if (ret) {
332 fprintf(stderr, "register_probe: %d\n", ret);
333 goto out;
334 }
335
336 if (p->ops_len <= IORING_OP_READ)
337 goto out;
338 if (!(p->ops[IORING_OP_READ].flags & IO_URING_OP_SUPPORTED))
339 goto out;
340 io_uring_queue_exit(&ring);
341 return 1;
342 }
343
test_eventfd_read(void)344 static int test_eventfd_read(void)
345 {
346 struct io_uring ring;
347 int fd, ret;
348 eventfd_t event;
349 struct io_uring_sqe *sqe;
350 struct io_uring_cqe *cqe;
351
352 if (no_read)
353 return 0;
354 ret = io_uring_queue_init(8, &ring, 0);
355 if (ret)
356 return ret;
357
358 fd = eventfd(1, 0);
359 if (fd < 0) {
360 perror("eventfd");
361 return 1;
362 }
363 sqe = io_uring_get_sqe(&ring);
364 io_uring_prep_read(sqe, fd, &event, sizeof(eventfd_t), 0);
365 ret = io_uring_submit(&ring);
366 if (ret != 1) {
367 fprintf(stderr, "submitted %d\n", ret);
368 return 1;
369 }
370 eventfd_write(fd, 1);
371 ret = io_uring_wait_cqe(&ring, &cqe);
372 if (ret) {
373 fprintf(stderr, "wait_cqe=%d\n", ret);
374 return 1;
375 }
376 if (cqe->res == -EINVAL) {
377 fprintf(stdout, "eventfd IO not supported, skipping\n");
378 } else if (cqe->res != sizeof(eventfd_t)) {
379 fprintf(stderr, "cqe res %d, wanted %d\n", cqe->res,
380 (int) sizeof(eventfd_t));
381 return 1;
382 }
383 io_uring_cqe_seen(&ring, cqe);
384 return 0;
385 }
386
test_buf_select_short(const char * filename,int nonvec)387 static int test_buf_select_short(const char *filename, int nonvec)
388 {
389 struct io_uring_sqe *sqe;
390 struct io_uring_cqe *cqe;
391 struct io_uring ring;
392 int ret, i, exp_len;
393
394 if (no_buf_select)
395 return 0;
396
397 ret = io_uring_queue_init(64, &ring, 0);
398 if (ret) {
399 fprintf(stderr, "ring create failed: %d\n", ret);
400 return 1;
401 }
402
403 exp_len = 0;
404 for (i = 0; i < BUFFERS; i++) {
405 sqe = io_uring_get_sqe(&ring);
406 io_uring_prep_provide_buffers(sqe, vecs[i].iov_base,
407 vecs[i].iov_len / 2, 1, 1, i);
408 if (!exp_len)
409 exp_len = vecs[i].iov_len / 2;
410 }
411
412 ret = io_uring_submit(&ring);
413 if (ret != BUFFERS) {
414 fprintf(stderr, "submit: %d\n", ret);
415 return -1;
416 }
417
418 for (i = 0; i < BUFFERS; i++) {
419 ret = io_uring_wait_cqe(&ring, &cqe);
420 if (cqe->res < 0) {
421 fprintf(stderr, "cqe->res=%d\n", cqe->res);
422 return 1;
423 }
424 io_uring_cqe_seen(&ring, cqe);
425 }
426
427 ret = __test_io(filename, &ring, 0, 0, 0, 0, nonvec, 1, 1, exp_len);
428
429 io_uring_queue_exit(&ring);
430 return ret;
431 }
432
provide_buffers_iovec(struct io_uring * ring,int bgid)433 static int provide_buffers_iovec(struct io_uring *ring, int bgid)
434 {
435 struct io_uring_sqe *sqe;
436 struct io_uring_cqe *cqe;
437 int i, ret;
438
439 for (i = 0; i < BUFFERS; i++) {
440 sqe = io_uring_get_sqe(ring);
441 io_uring_prep_provide_buffers(sqe, vecs[i].iov_base,
442 vecs[i].iov_len, 1, bgid, i);
443 }
444
445 ret = io_uring_submit(ring);
446 if (ret != BUFFERS) {
447 fprintf(stderr, "submit: %d\n", ret);
448 return -1;
449 }
450
451 for (i = 0; i < BUFFERS; i++) {
452 ret = io_uring_wait_cqe(ring, &cqe);
453 if (ret) {
454 fprintf(stderr, "wait_cqe=%d\n", ret);
455 return 1;
456 }
457 if (cqe->res < 0) {
458 fprintf(stderr, "cqe->res=%d\n", cqe->res);
459 return 1;
460 }
461 io_uring_cqe_seen(ring, cqe);
462 }
463
464 return 0;
465 }
466
test_buf_select_pipe(void)467 static int test_buf_select_pipe(void)
468 {
469 struct io_uring_sqe *sqe;
470 struct io_uring_cqe *cqe;
471 struct io_uring ring;
472 int ret, i;
473 int fds[2];
474
475 if (no_buf_select)
476 return 0;
477
478 ret = io_uring_queue_init(64, &ring, 0);
479 if (ret) {
480 fprintf(stderr, "ring create failed: %d\n", ret);
481 return 1;
482 }
483
484 ret = provide_buffers_iovec(&ring, 0);
485 if (ret) {
486 fprintf(stderr, "provide buffers failed: %d\n", ret);
487 return 1;
488 }
489
490 ret = pipe(fds);
491 if (ret) {
492 fprintf(stderr, "pipe failed: %d\n", ret);
493 return 1;
494 }
495
496 for (i = 0; i < 5; i++) {
497 sqe = io_uring_get_sqe(&ring);
498 io_uring_prep_read(sqe, fds[0], NULL, 1 /* max read 1 per go */, -1);
499 sqe->flags |= IOSQE_BUFFER_SELECT;
500 sqe->buf_group = 0;
501 }
502 io_uring_submit(&ring);
503
504 ret = write(fds[1], "01234", 5);
505 if (ret != 5) {
506 fprintf(stderr, "pipe write failed %d\n", ret);
507 return 1;
508 }
509
510 for (i = 0; i < 5; i++) {
511 const char *buff;
512
513 if (io_uring_wait_cqe(&ring, &cqe)) {
514 fprintf(stderr, "bad wait %d\n", i);
515 return 1;
516 }
517 if (cqe->res != 1) {
518 fprintf(stderr, "expected read %d\n", cqe->res);
519 return 1;
520 }
521 if (!(cqe->flags & IORING_CQE_F_BUFFER)) {
522 fprintf(stderr, "no buffer %d\n", cqe->res);
523 return 1;
524 }
525 buff = vecs[cqe->flags >> 16].iov_base;
526 if (*buff != '0' + i) {
527 fprintf(stderr, "%d: expected %c, got %c\n", i, '0' + i, *buff);
528 return 1;
529 }
530 io_uring_cqe_seen(&ring, cqe);
531 }
532
533
534 close(fds[0]);
535 close(fds[1]);
536 io_uring_queue_exit(&ring);
537 return 0;
538 }
539
test_buf_select(const char * filename,int nonvec)540 static int test_buf_select(const char *filename, int nonvec)
541 {
542 struct io_uring_probe *p;
543 struct io_uring ring;
544 int ret, i;
545
546 ret = io_uring_queue_init(64, &ring, 0);
547 if (ret) {
548 fprintf(stderr, "ring create failed: %d\n", ret);
549 return 1;
550 }
551
552 p = io_uring_get_probe_ring(&ring);
553 if (!p || !io_uring_opcode_supported(p, IORING_OP_PROVIDE_BUFFERS)) {
554 no_buf_select = 1;
555 fprintf(stdout, "Buffer select not supported, skipping\n");
556 return 0;
557 }
558 io_uring_free_probe(p);
559
560 /*
561 * Write out data with known pattern
562 */
563 for (i = 0; i < BUFFERS; i++)
564 memset(vecs[i].iov_base, i, vecs[i].iov_len);
565
566 ret = __test_io(filename, &ring, 1, 0, 0, 0, 0, 0, 1, BS);
567 if (ret) {
568 fprintf(stderr, "failed writing data\n");
569 return 1;
570 }
571
572 for (i = 0; i < BUFFERS; i++)
573 memset(vecs[i].iov_base, 0x55, vecs[i].iov_len);
574
575 ret = provide_buffers_iovec(&ring, 1);
576 if (ret)
577 return ret;
578
579 ret = __test_io(filename, &ring, 0, 0, 0, 0, nonvec, 1, 1, BS);
580 io_uring_queue_exit(&ring);
581 return ret;
582 }
583
test_rem_buf(int batch,int sqe_flags)584 static int test_rem_buf(int batch, int sqe_flags)
585 {
586 struct io_uring_sqe *sqe;
587 struct io_uring_cqe *cqe;
588 struct io_uring ring;
589 int left, ret, nr = 0;
590 int bgid = 1;
591
592 if (no_buf_select)
593 return 0;
594
595 ret = io_uring_queue_init(64, &ring, 0);
596 if (ret) {
597 fprintf(stderr, "ring create failed: %d\n", ret);
598 return 1;
599 }
600
601 ret = provide_buffers_iovec(&ring, bgid);
602 if (ret)
603 return ret;
604
605 left = BUFFERS;
606 while (left) {
607 int to_rem = (left < batch) ? left : batch;
608
609 left -= to_rem;
610 sqe = io_uring_get_sqe(&ring);
611 io_uring_prep_remove_buffers(sqe, to_rem, bgid);
612 sqe->user_data = to_rem;
613 sqe->flags |= sqe_flags;
614 ++nr;
615 }
616
617 ret = io_uring_submit(&ring);
618 if (ret != nr) {
619 fprintf(stderr, "submit: %d\n", ret);
620 return -1;
621 }
622
623 for (; nr > 0; nr--) {
624 ret = io_uring_wait_cqe(&ring, &cqe);
625 if (ret) {
626 fprintf(stderr, "wait_cqe=%d\n", ret);
627 return 1;
628 }
629 if (cqe->res != cqe->user_data) {
630 fprintf(stderr, "cqe->res=%d\n", cqe->res);
631 return 1;
632 }
633 io_uring_cqe_seen(&ring, cqe);
634 }
635
636 io_uring_queue_exit(&ring);
637 return ret;
638 }
639
test_rem_buf_single(int to_rem)640 static int test_rem_buf_single(int to_rem)
641 {
642 struct io_uring_sqe *sqe;
643 struct io_uring_cqe *cqe;
644 struct io_uring ring;
645 int ret, expected;
646 int bgid = 1;
647
648 if (no_buf_select)
649 return 0;
650
651 ret = io_uring_queue_init(64, &ring, 0);
652 if (ret) {
653 fprintf(stderr, "ring create failed: %d\n", ret);
654 return 1;
655 }
656
657 ret = provide_buffers_iovec(&ring, bgid);
658 if (ret)
659 return ret;
660
661 expected = (to_rem > BUFFERS) ? BUFFERS : to_rem;
662
663 sqe = io_uring_get_sqe(&ring);
664 io_uring_prep_remove_buffers(sqe, to_rem, bgid);
665
666 ret = io_uring_submit(&ring);
667 if (ret != 1) {
668 fprintf(stderr, "submit: %d\n", ret);
669 return -1;
670 }
671
672 ret = io_uring_wait_cqe(&ring, &cqe);
673 if (ret) {
674 fprintf(stderr, "wait_cqe=%d\n", ret);
675 return 1;
676 }
677 if (cqe->res != expected) {
678 fprintf(stderr, "cqe->res=%d, expected=%d\n", cqe->res, expected);
679 return 1;
680 }
681 io_uring_cqe_seen(&ring, cqe);
682
683 io_uring_queue_exit(&ring);
684 return ret;
685 }
686
test_io_link(const char * file)687 static int test_io_link(const char *file)
688 {
689 const int nr_links = 100;
690 const int link_len = 100;
691 const int nr_sqes = nr_links * link_len;
692 struct io_uring_sqe *sqe;
693 struct io_uring_cqe *cqe;
694 struct io_uring ring;
695 int i, j, fd, ret;
696
697 fd = open(file, O_WRONLY);
698 if (fd < 0) {
699 perror("file open");
700 goto err;
701 }
702
703 ret = io_uring_queue_init(nr_sqes, &ring, 0);
704 if (ret) {
705 fprintf(stderr, "ring create failed: %d\n", ret);
706 goto err;
707 }
708
709 for (i = 0; i < nr_links; ++i) {
710 for (j = 0; j < link_len; ++j) {
711 sqe = io_uring_get_sqe(&ring);
712 if (!sqe) {
713 fprintf(stderr, "sqe get failed\n");
714 goto err;
715 }
716 io_uring_prep_writev(sqe, fd, &vecs[0], 1, 0);
717 sqe->flags |= IOSQE_ASYNC;
718 if (j != link_len - 1)
719 sqe->flags |= IOSQE_IO_LINK;
720 }
721 }
722
723 ret = io_uring_submit(&ring);
724 if (ret != nr_sqes) {
725 ret = io_uring_peek_cqe(&ring, &cqe);
726 if (!ret && cqe->res == -EINVAL) {
727 fprintf(stdout, "IOSQE_ASYNC not supported, skipped\n");
728 goto out;
729 }
730 fprintf(stderr, "submit got %d, wanted %d\n", ret, nr_sqes);
731 goto err;
732 }
733
734 for (i = 0; i < nr_sqes; i++) {
735 ret = io_uring_wait_cqe(&ring, &cqe);
736 if (ret) {
737 fprintf(stderr, "wait_cqe=%d\n", ret);
738 goto err;
739 }
740 if (cqe->res == -EINVAL) {
741 if (!warned) {
742 fprintf(stdout, "Non-vectored IO not "
743 "supported, skipping\n");
744 warned = 1;
745 no_read = 1;
746 }
747 } else if (cqe->res != BS) {
748 fprintf(stderr, "cqe res %d, wanted %d\n", cqe->res, BS);
749 goto err;
750 }
751 io_uring_cqe_seen(&ring, cqe);
752 }
753
754 out:
755 io_uring_queue_exit(&ring);
756 close(fd);
757 return 0;
758 err:
759 if (fd != -1)
760 close(fd);
761 return 1;
762 }
763
test_write_efbig(void)764 static int test_write_efbig(void)
765 {
766 struct io_uring_sqe *sqe;
767 struct io_uring_cqe *cqe;
768 struct io_uring ring;
769 struct rlimit rlim, old_rlim;
770 int i, fd, ret;
771 loff_t off;
772
773 if (geteuid()) {
774 fprintf(stdout, "Not root, skipping %s\n", __FUNCTION__);
775 return 0;
776 }
777
778 if (getrlimit(RLIMIT_FSIZE, &old_rlim) < 0) {
779 perror("getrlimit");
780 return 1;
781 }
782 rlim = old_rlim;
783 rlim.rlim_cur = 128 * 1024;
784 rlim.rlim_max = 128 * 1024;
785 if (setrlimit(RLIMIT_FSIZE, &rlim) < 0) {
786 perror("setrlimit");
787 return 1;
788 }
789
790 fd = open(".efbig", O_WRONLY | O_CREAT, 0644);
791 if (fd < 0) {
792 perror("file open");
793 goto err;
794 }
795 unlink(".efbig");
796
797 ret = io_uring_queue_init(32, &ring, 0);
798 if (ret) {
799 fprintf(stderr, "ring create failed: %d\n", ret);
800 goto err;
801 }
802
803 off = 0;
804 for (i = 0; i < 32; i++) {
805 sqe = io_uring_get_sqe(&ring);
806 if (!sqe) {
807 fprintf(stderr, "sqe get failed\n");
808 goto err;
809 }
810 io_uring_prep_writev(sqe, fd, &vecs[i], 1, off);
811 io_uring_sqe_set_data64(sqe, i);
812 off += BS;
813 }
814
815 ret = io_uring_submit(&ring);
816 if (ret != 32) {
817 fprintf(stderr, "submit got %d, wanted %d\n", ret, 32);
818 goto err;
819 }
820
821 for (i = 0; i < 32; i++) {
822 ret = io_uring_wait_cqe(&ring, &cqe);
823 if (ret) {
824 fprintf(stderr, "wait_cqe=%d\n", ret);
825 goto err;
826 }
827 if (cqe->user_data < 16) {
828 if (cqe->res != BS) {
829 fprintf(stderr, "bad write: %d\n", cqe->res);
830 goto err;
831 }
832 } else {
833 if (cqe->res != -EFBIG) {
834 fprintf(stderr, "Expected -EFBIG: %d\n", cqe->res);
835 goto err;
836 }
837 }
838 io_uring_cqe_seen(&ring, cqe);
839 }
840
841 io_uring_queue_exit(&ring);
842 close(fd);
843 unlink(".efbig");
844
845 if (setrlimit(RLIMIT_FSIZE, &old_rlim) < 0) {
846 perror("setrlimit");
847 return 1;
848 }
849 return 0;
850 err:
851 if (fd != -1)
852 close(fd);
853 return 1;
854 }
855
main(int argc,char * argv[])856 int main(int argc, char *argv[])
857 {
858 int i, ret, nr;
859 char buf[256];
860 char *fname;
861
862 if (argc > 1) {
863 fname = argv[1];
864 } else {
865 srand((unsigned)time(NULL));
866 snprintf(buf, sizeof(buf), ".basic-rw-%u-%u",
867 (unsigned)rand(), (unsigned)getpid());
868 fname = buf;
869 t_create_file(fname, FILE_SIZE);
870 }
871
872 signal(SIGXFSZ, SIG_IGN);
873
874 vecs = t_create_buffers(BUFFERS, BS);
875
876 /* if we don't have nonvec read, skip testing that */
877 nr = has_nonvec_read() ? 32 : 16;
878
879 for (i = 0; i < nr; i++) {
880 int write = (i & 1) != 0;
881 int buffered = (i & 2) != 0;
882 int sqthread = (i & 4) != 0;
883 int fixed = (i & 8) != 0;
884 int nonvec = (i & 16) != 0;
885
886 ret = test_io(fname, write, buffered, sqthread, fixed, nonvec,
887 BS);
888 if (ret) {
889 fprintf(stderr, "test_io failed %d/%d/%d/%d/%d\n",
890 write, buffered, sqthread, fixed, nonvec);
891 goto err;
892 }
893 }
894
895 ret = test_buf_select(fname, 1);
896 if (ret) {
897 fprintf(stderr, "test_buf_select nonvec failed\n");
898 goto err;
899 }
900
901 ret = test_buf_select(fname, 0);
902 if (ret) {
903 fprintf(stderr, "test_buf_select vec failed\n");
904 goto err;
905 }
906
907 ret = test_buf_select_short(fname, 1);
908 if (ret) {
909 fprintf(stderr, "test_buf_select_short nonvec failed\n");
910 goto err;
911 }
912
913 ret = test_buf_select_short(fname, 0);
914 if (ret) {
915 fprintf(stderr, "test_buf_select_short vec failed\n");
916 goto err;
917 }
918
919 ret = test_buf_select_pipe();
920 if (ret) {
921 fprintf(stderr, "test_buf_select_pipe failed\n");
922 goto err;
923 }
924
925 ret = test_eventfd_read();
926 if (ret) {
927 fprintf(stderr, "test_eventfd_read failed\n");
928 goto err;
929 }
930
931 ret = read_poll_link(fname);
932 if (ret) {
933 fprintf(stderr, "read_poll_link failed\n");
934 goto err;
935 }
936
937 ret = test_io_link(fname);
938 if (ret) {
939 fprintf(stderr, "test_io_link failed\n");
940 goto err;
941 }
942
943 ret = test_write_efbig();
944 if (ret) {
945 fprintf(stderr, "test_write_efbig failed\n");
946 goto err;
947 }
948
949 ret = test_rem_buf(1, 0);
950 if (ret) {
951 fprintf(stderr, "test_rem_buf by 1 failed\n");
952 goto err;
953 }
954
955 ret = test_rem_buf(10, 0);
956 if (ret) {
957 fprintf(stderr, "test_rem_buf by 10 failed\n");
958 goto err;
959 }
960
961 ret = test_rem_buf(2, IOSQE_IO_LINK);
962 if (ret) {
963 fprintf(stderr, "test_rem_buf link failed\n");
964 goto err;
965 }
966
967 ret = test_rem_buf(2, IOSQE_ASYNC);
968 if (ret) {
969 fprintf(stderr, "test_rem_buf async failed\n");
970 goto err;
971 }
972
973 srand((unsigned)time(NULL));
974 if (create_nonaligned_buffers()) {
975 fprintf(stderr, "file creation failed\n");
976 goto err;
977 }
978
979 /* test fixed bufs with non-aligned len/offset */
980 for (i = 0; i < nr; i++) {
981 int write = (i & 1) != 0;
982 int buffered = (i & 2) != 0;
983 int sqthread = (i & 4) != 0;
984 int fixed = (i & 8) != 0;
985 int nonvec = (i & 16) != 0;
986
987 /* direct IO requires alignment, skip it */
988 if (!buffered || !fixed || nonvec)
989 continue;
990
991 ret = test_io(fname, write, buffered, sqthread, fixed, nonvec,
992 -1);
993 if (ret) {
994 fprintf(stderr, "test_io failed %d/%d/%d/%d/%d\n",
995 write, buffered, sqthread, fixed, nonvec);
996 goto err;
997 }
998 }
999
1000 ret = test_rem_buf_single(BUFFERS + 1);
1001 if (ret) {
1002 fprintf(stderr, "test_rem_buf_single(BUFFERS + 1) failed\n");
1003 goto err;
1004 }
1005
1006 if (fname != argv[1])
1007 unlink(fname);
1008 return 0;
1009 err:
1010 if (fname != argv[1])
1011 unlink(fname);
1012 return 1;
1013 }
1014