1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2020 ARM Ltd. All rights reserved.
4 * Author: Vikas Kumar <vikas.kumar2@arm.com>
5 *
6 * Copyright (C) 2020 Cyril Hrubis <chrubis@suse.cz>
7 *
8 * Tests for asynchronous I/O raw API i.e io_uring_setup(), io_uring_register()
9 * and io_uring_enter(). This tests validate basic API operation by creating a
10 * submission queue and a completion queue using io_uring_setup(). User buffer
11 * registered in the kernel for long term operation using io_uring_register().
12 * This tests initiates I/O operations with the help of io_uring_enter().
13 */
14 #include <stdlib.h>
15 #include <errno.h>
16 #include <string.h>
17 #include <fcntl.h>
18 #include "config.h"
19 #include "tst_test.h"
20 #include "lapi/io_uring.h"
21
22 #define TEST_FILE "test_file"
23
24 #define QUEUE_DEPTH 1
25 #define BLOCK_SZ 1024
26
27 static struct tcase {
28 unsigned int setup_flags;
29 unsigned int register_opcode;
30 unsigned int enter_flags;
31 } tcases[] = {
32 {0, IORING_REGISTER_BUFFERS, IORING_OP_READ_FIXED},
33 };
34
35 struct io_sq_ring {
36 unsigned int *head;
37 unsigned int *tail;
38 unsigned int *ring_mask;
39 unsigned int *ring_entries;
40 unsigned int *flags;
41 unsigned int *array;
42 };
43
44 struct io_cq_ring {
45 unsigned int *head;
46 unsigned int *tail;
47 unsigned int *ring_mask;
48 unsigned int *ring_entries;
49 struct io_uring_cqe *cqes;
50 };
51
52 struct submitter {
53 int ring_fd;
54 struct io_sq_ring sq_ring;
55 struct io_uring_sqe *sqes;
56 struct io_cq_ring cq_ring;
57 };
58
59 static struct submitter sub_ring;
60 static struct submitter *s = &sub_ring;
61 static sigset_t sig;
62 static struct iovec *iov;
63
64
65 static void *sptr;
66 static size_t sptr_size;
67 static void *cptr;
68 static size_t cptr_size;
69
setup_io_uring_test(struct submitter * s,struct tcase * tc)70 static int setup_io_uring_test(struct submitter *s, struct tcase *tc)
71 {
72 struct io_sq_ring *sring = &s->sq_ring;
73 struct io_cq_ring *cring = &s->cq_ring;
74 struct io_uring_params p;
75
76 memset(&p, 0, sizeof(p));
77 p.flags |= tc->setup_flags;
78 s->ring_fd = io_uring_setup(QUEUE_DEPTH, &p);
79 if (s->ring_fd != -1) {
80 tst_res(TPASS, "io_uring_setup() passed");
81 } else {
82 tst_res(TFAIL | TERRNO, "io_uring_setup() failed");
83 return 1;
84 }
85
86 sptr_size = p.sq_off.array + p.sq_entries * sizeof(unsigned int);
87
88 /* Submission queue ring buffer mapping */
89 sptr = SAFE_MMAP(0, sptr_size,
90 PROT_READ | PROT_WRITE,
91 MAP_SHARED | MAP_POPULATE,
92 s->ring_fd, IORING_OFF_SQ_RING);
93
94 /* Save global submission queue struct info */
95 sring->head = sptr + p.sq_off.head;
96 sring->tail = sptr + p.sq_off.tail;
97 sring->ring_mask = sptr + p.sq_off.ring_mask;
98 sring->ring_entries = sptr + p.sq_off.ring_entries;
99 sring->flags = sptr + p.sq_off.flags;
100 sring->array = sptr + p.sq_off.array;
101
102 /* Submission queue entries ring buffer mapping */
103 s->sqes = SAFE_MMAP(0, p.sq_entries *
104 sizeof(struct io_uring_sqe),
105 PROT_READ | PROT_WRITE,
106 MAP_SHARED | MAP_POPULATE,
107 s->ring_fd, IORING_OFF_SQES);
108
109 cptr_size = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
110
111 /* Completion queue ring buffer mapping */
112 cptr = SAFE_MMAP(0, cptr_size,
113 PROT_READ | PROT_WRITE,
114 MAP_SHARED | MAP_POPULATE,
115 s->ring_fd, IORING_OFF_CQ_RING);
116
117 /* Save global completion queue struct info */
118 cring->head = cptr + p.cq_off.head;
119 cring->tail = cptr + p.cq_off.tail;
120 cring->ring_mask = cptr + p.cq_off.ring_mask;
121 cring->ring_entries = cptr + p.cq_off.ring_entries;
122 cring->cqes = cptr + p.cq_off.cqes;
123
124 return 0;
125 }
126
check_buffer(char * buffer,size_t len)127 static void check_buffer(char *buffer, size_t len)
128 {
129 size_t i;
130
131 for (i = 0; i < len; i++) {
132 if (buffer[i] != 'a') {
133 tst_res(TFAIL, "Wrong data at offset %zu", i);
134 break;
135 }
136 }
137
138 if (i == len)
139 tst_res(TPASS, "Buffer filled in correctly");
140 }
141
drain_uring_cq(struct submitter * s,unsigned int exp_events)142 static void drain_uring_cq(struct submitter *s, unsigned int exp_events)
143 {
144 struct io_cq_ring *cring = &s->cq_ring;
145 unsigned int head = *cring->head;
146 unsigned int events = 0;
147
148 for (head = *cring->head; head != *cring->tail; head++) {
149 struct io_uring_cqe *cqe = &cring->cqes[head & *s->cq_ring.ring_mask];
150
151 events++;
152
153 if (cqe->res < 0) {
154 tst_res(TFAIL, "CQE result %s", tst_strerrno(-cqe->res));
155 } else {
156 struct iovec *iovecs = (void*)cqe->user_data;
157
158 if (cqe->res == BLOCK_SZ)
159 tst_res(TPASS, "CQE result %i", cqe->res);
160 else
161 tst_res(TFAIL, "CQE result %i expected %i", cqe->res, BLOCK_SZ);
162
163 check_buffer(iovecs[0].iov_base, cqe->res);
164 }
165 }
166
167 *cring->head = head;
168
169 if (exp_events == events) {
170 tst_res(TPASS, "Got %u completion events", events);
171 return;
172 }
173
174 tst_res(TFAIL, "Got %u completion events expected %u",
175 events, exp_events);
176 }
177
submit_to_uring_sq(struct submitter * s,struct tcase * tc)178 static int submit_to_uring_sq(struct submitter *s, struct tcase *tc)
179 {
180 unsigned int index = 0, tail = 0, next_tail = 0;
181 struct io_sq_ring *sring = &s->sq_ring;
182 struct io_uring_sqe *sqe;
183 int ret;
184
185 memset(iov->iov_base, 0, iov->iov_len);
186
187 ret = io_uring_register(s->ring_fd, tc->register_opcode,
188 iov, QUEUE_DEPTH);
189 if (ret == 0) {
190 tst_res(TPASS, "io_uring_register() passed");
191 } else {
192 tst_res(TFAIL | TERRNO, "io_uring_register() failed");
193 return 1;
194 }
195
196 int fd = SAFE_OPEN(TEST_FILE, O_RDONLY);
197
198 /* Submission queue entry addition to SQE ring buffer tail */
199 tail = *sring->tail;
200 next_tail = tail + 1;
201 index = tail & *s->sq_ring.ring_mask;
202 sqe = &s->sqes[index];
203 sqe->flags = 0;
204 sqe->fd = fd;
205 sqe->opcode = tc->enter_flags;
206 sqe->addr = (unsigned long)iov->iov_base;
207 sqe->len = BLOCK_SZ;
208 sqe->off = 0;
209 sqe->user_data = (unsigned long long)iov;
210 sring->array[index] = index;
211 tail = next_tail;
212
213 /* Kernel to notice the tail update */
214 if (*sring->tail != tail)
215 *sring->tail = tail;
216
217 ret = io_uring_enter(s->ring_fd, 1, 1, IORING_ENTER_GETEVENTS, &sig);
218 if (ret == 1) {
219 tst_res(TPASS, "io_uring_enter() waited for 1 event");
220 } else {
221 tst_res(TFAIL | TERRNO, "io_uring_enter() returned %i", ret);
222 SAFE_CLOSE(fd);
223 return 1;
224 }
225
226 SAFE_CLOSE(fd);
227 return 0;
228 }
229
cleanup_io_uring_test(void)230 static void cleanup_io_uring_test(void)
231 {
232 io_uring_register(s->ring_fd, IORING_UNREGISTER_BUFFERS,
233 NULL, QUEUE_DEPTH);
234 SAFE_MUNMAP(s->sqes, sizeof(struct io_uring_sqe));
235 SAFE_MUNMAP(cptr, cptr_size);
236 SAFE_MUNMAP(sptr, sptr_size);
237 SAFE_CLOSE(s->ring_fd);
238 }
239
run(unsigned int n)240 static void run(unsigned int n)
241 {
242 struct tcase *tc = &tcases[n];
243
244 if (setup_io_uring_test(s, tc))
245 return;
246
247 if (!submit_to_uring_sq(s, tc))
248 drain_uring_cq(s, 1);
249
250 cleanup_io_uring_test();
251 }
252
setup(void)253 static void setup(void)
254 {
255 io_uring_setup_supported_by_kernel();
256 tst_fill_file(TEST_FILE, 'a', 1024, 1);
257 }
258
259 static struct tst_test test = {
260 .setup = setup,
261 .test = run,
262 .needs_tmpdir = 1,
263 .tcnt = ARRAY_SIZE(tcases),
264 .bufs = (struct tst_buffers []) {
265 {&iov, .iov_sizes = (int[]){BLOCK_SZ, -1}},
266 {}
267 }
268 };
269