1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Description: test io_uring_register_sync_cancel()
4 *
5 */
6 #include <errno.h>
7 #include <stdio.h>
8 #include <unistd.h>
9 #include <stdlib.h>
10 #include <string.h>
11 #include <fcntl.h>
12
13 #include "liburing.h"
14 #include "helpers.h"
15
16 static int no_sync_cancel, no_sync_cancel_op;
17
test_sync_cancel_timeout(struct io_uring * ring,int async,int by_op)18 static int test_sync_cancel_timeout(struct io_uring *ring, int async, int by_op)
19 {
20 struct io_uring_sync_cancel_reg reg = { };
21 struct io_uring_sqe *sqe;
22 struct io_uring_cqe *cqe;
23 int ret, fds[2], to_prep;
24 char buf[32];
25
26 if (pipe(fds) < 0) {
27 perror("pipe");
28 return 1;
29 }
30
31 to_prep = 1;
32 sqe = io_uring_get_sqe(ring);
33 io_uring_prep_read(sqe, fds[0], buf, sizeof(buf), 0);
34 sqe->user_data = 0x89;
35 if (async)
36 sqe->flags |= IOSQE_ASYNC;
37
38 ret = io_uring_submit(ring);
39 if (ret != to_prep) {
40 fprintf(stderr, "submit=%d\n", ret);
41 return 1;
42 }
43
44 usleep(10000);
45
46 reg.flags = IORING_ASYNC_CANCEL_OP;
47 reg.opcode = IORING_OP_READ;
48 reg.timeout.tv_nsec = 1;
49 ret = io_uring_register_sync_cancel(ring, ®);
50 /* earlier kernels had sync cancel, but not per-op */
51 if (ret == -EINVAL) {
52 no_sync_cancel_op = 1;
53 return 0;
54 }
55 if (async) {
56 /* we expect -ETIME here, but can race and get 0 */
57 if (ret != -ETIME && ret != 0) {
58 fprintf(stderr, "sync_cancel=%d\n", ret);
59 return 1;
60 }
61 } else {
62 if (ret < 0) {
63 fprintf(stderr, "sync_cancel=%d\n", ret);
64 return 1;
65 }
66 }
67
68 /*
69 * we could _almost_ use peek_cqe() here, but there is still
70 * a small gap where io-wq is done with the request and on
71 * its way to posting a completion, but hasn't done it just
72 * yet. the request is canceled and won't be doing any IO
73 * to buffers etc, but the cqe may not have quite arrived yet.
74 */
75 ret = io_uring_wait_cqe(ring, &cqe);
76 if (ret) {
77 fprintf(stderr, "peek=%d\n", ret);
78 return 1;
79 }
80 if (cqe->res >= 0) {
81 fprintf(stderr, "cqe->res=%d\n", cqe->res);
82 return 1;
83 }
84 io_uring_cqe_seen(ring, cqe);
85 return 0;
86 }
87
test_sync_cancel(struct io_uring * ring,int async,int nr_all,int use_fd,int by_op)88 static int test_sync_cancel(struct io_uring *ring, int async, int nr_all,
89 int use_fd, int by_op)
90 {
91 struct io_uring_sync_cancel_reg reg = { };
92 struct io_uring_sqe *sqe;
93 struct io_uring_cqe *cqe;
94 int ret, fds[2], to_prep, i;
95 char buf[32];
96
97 if (pipe(fds) < 0) {
98 perror("pipe");
99 return 1;
100 }
101
102 to_prep = 1;
103 if (nr_all)
104 to_prep = 4;
105 for (i = 0; i < to_prep; i++) {
106 sqe = io_uring_get_sqe(ring);
107 io_uring_prep_read(sqe, fds[0], buf, sizeof(buf), 0);
108 sqe->user_data = 0x89;
109 if (async)
110 sqe->flags |= IOSQE_ASYNC;
111 }
112
113 ret = io_uring_submit(ring);
114 if (ret != to_prep) {
115 fprintf(stderr, "submit=%d\n", ret);
116 return 1;
117 }
118
119 usleep(10000);
120
121 if (!use_fd)
122 reg.addr = 0x89;
123 else
124 reg.fd = fds[0];
125 reg.timeout.tv_sec = 200;
126 if (nr_all)
127 reg.flags |= IORING_ASYNC_CANCEL_ALL;
128 if (use_fd)
129 reg.flags |= IORING_ASYNC_CANCEL_FD;
130 ret = io_uring_register_sync_cancel(ring, ®);
131 if (ret < 0) {
132 if (ret == -EINVAL && !no_sync_cancel) {
133 no_sync_cancel = 1;
134 return 0;
135 }
136 fprintf(stderr, "sync_cancel=%d\n", ret);
137 return 1;
138 }
139
140 for (i = 0; i < to_prep; i++) {
141 /*
142 * we could _almost_ use peek_cqe() here, but there is still
143 * a small gap where io-wq is done with the request and on
144 * its way to posting a completion, but hasn't done it just
145 * yet. the request is canceled and won't be doing any IO
146 * to buffers etc, but the cqe may not have quite arrived yet.
147 */
148 ret = io_uring_wait_cqe(ring, &cqe);
149 if (ret) {
150 fprintf(stderr, "peek=%d\n", ret);
151 return 1;
152 }
153 if (cqe->res >= 0) {
154 fprintf(stderr, "cqe->res=%d\n", cqe->res);
155 return 1;
156 }
157 io_uring_cqe_seen(ring, cqe);
158 }
159
160 return 0;
161 }
162
main(int argc,char * argv[])163 int main(int argc, char *argv[])
164 {
165 struct io_uring ring;
166 int ret;
167
168 if (argc > 1)
169 return T_EXIT_SKIP;
170
171 ret = t_create_ring(7, &ring, 0);
172 if (ret == T_SETUP_SKIP)
173 return T_EXIT_SKIP;
174 else if (ret != T_SETUP_OK)
175 return ret;
176
177 ret = test_sync_cancel(&ring, 0, 0, 0, 0);
178 if (ret) {
179 fprintf(stderr, "test_sync_cancel 0 0 0 failed\n");
180 return T_EXIT_FAIL;
181 }
182 if (no_sync_cancel)
183 return T_EXIT_SKIP;
184
185 ret = test_sync_cancel(&ring, 0, 0, 0, 1);
186 if (ret) {
187 fprintf(stderr, "test_sync_cancel 0 0 1 failed\n");
188 return T_EXIT_FAIL;
189 }
190
191 ret = test_sync_cancel(&ring, 1, 0, 0, 0);
192 if (ret) {
193 fprintf(stderr, "test_sync_cancel 1 0 0 0 failed\n");
194 return T_EXIT_FAIL;
195 }
196
197 ret = test_sync_cancel(&ring, 1, 0, 0, 1);
198 if (ret) {
199 fprintf(stderr, "test_sync_cancel 1 0 0 1 failed\n");
200 return T_EXIT_FAIL;
201 }
202
203
204 ret = test_sync_cancel(&ring, 0, 1, 0, 0);
205 if (ret) {
206 fprintf(stderr, "test_sync_cancel 0 1 0 0 failed\n");
207 return T_EXIT_FAIL;
208 }
209
210 ret = test_sync_cancel(&ring, 0, 1, 0, 1);
211 if (ret) {
212 fprintf(stderr, "test_sync_cancel 0 1 0 1 failed\n");
213 return T_EXIT_FAIL;
214 }
215
216
217 ret = test_sync_cancel(&ring, 1, 1, 0, 0);
218 if (ret) {
219 fprintf(stderr, "test_sync_cancel 1 1 0 0 failed\n");
220 return T_EXIT_FAIL;
221 }
222
223 ret = test_sync_cancel(&ring, 0, 0, 1, 0);
224 if (ret) {
225 fprintf(stderr, "test_sync_cancel 0 0 1 0 failed\n");
226 return T_EXIT_FAIL;
227 }
228
229 ret = test_sync_cancel(&ring, 1, 0, 1, 0);
230 if (ret) {
231 fprintf(stderr, "test_sync_cancel 1 0 1 0 failed\n");
232 return T_EXIT_FAIL;
233 }
234
235 ret = test_sync_cancel(&ring, 0, 1, 1, 0);
236 if (ret) {
237 fprintf(stderr, "test_sync_cancel 0 1 1 0 failed\n");
238 return T_EXIT_FAIL;
239 }
240
241 ret = test_sync_cancel(&ring, 1, 1, 1, 0);
242 if (ret) {
243 fprintf(stderr, "test_sync_cancel 1 1 1 0 failed\n");
244 return T_EXIT_FAIL;
245 }
246
247 ret = test_sync_cancel_timeout(&ring, 0, 0);
248 if (ret) {
249 fprintf(stderr, "test_sync_cancel_timeout 0 0\n");
250 return T_EXIT_FAIL;
251 }
252 if (no_sync_cancel_op)
253 return T_EXIT_PASS;
254
255 ret = test_sync_cancel_timeout(&ring, 0, 1);
256 if (ret) {
257 fprintf(stderr, "test_sync_cancel_timeout 0 1\n");
258 return T_EXIT_FAIL;
259 }
260
261 /* must be last, leaves request */
262 ret = test_sync_cancel_timeout(&ring, 1, 0);
263 if (ret) {
264 fprintf(stderr, "test_sync_cancel_timeout 1\n");
265 return T_EXIT_FAIL;
266 }
267
268 return T_EXIT_PASS;
269 }
270