1 /*
2 * Copyright (C) 2024 HiHope Open Source Organization.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <cerrno>
17 #include <cstdio>
18 #include <cstdlib>
19 #include <string>
20 #include <vector>
21 #include <fcntl.h>
22 #include <unistd.h>
23 #include <arpa/inet.h>
24 #include <gtest/gtest.h>
25 #include <netinet/in.h>
26 #include <sys/stat.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
29 #include "securec.h"
30 #include <linux/io_uring.h>
31 #include <sys/syscall.h>
32 #include <sys/mman.h>
33
34 //Simplified io_uring_enter
io_uring_enter(int fd,unsigned to_submit,unsigned min_complete,unsigned flags)35 static int io_uring_enter(int fd, unsigned to_submit, unsigned min_complete, unsigned flags) {
36 return syscall(__NR_io_uring_enter, fd, to_submit, min_complete, flags, NULL, 0);
37 }
38
io_uring_setup(unsigned entries,struct io_uring_params * p)39 static int io_uring_setup(unsigned entries, struct io_uring_params *p) {
40 return syscall(__NR_io_uring_setup, entries, p);
41 }
42
43 using namespace testing::ext;
44
45 static const char *TEST_READ_FILE = "/data/local/tmp/splice_read_file.txt";
46 static const char *TEST_WRITE_FILE = "/data/local/tmp/splice_write_file.txt";
47 static const char *TEST_DATA = "Hello World!";
48 static const int TEST_DATA_LEN = strlen(TEST_DATA);
49 //static const int MAX_LEN = 128;
50
51 class HatsEnterTest : public testing::Test {
52 public:
53 static void SetUpTestCase();
54 static void TearDownTestCase();
55 void SetUp();
56 void TearDown();
57 private:
58 };
SetUp()59 void HatsEnterTest::SetUp()
60 {
61 int fd = open(TEST_READ_FILE, O_WRONLY | O_CREAT, 0644);
62 write(fd, TEST_DATA, TEST_DATA_LEN);
63 close(fd);
64 }
TearDown()65 void HatsEnterTest::TearDown()
66 {
67 (void)remove(TEST_READ_FILE);
68 (void)remove(TEST_WRITE_FILE);
69 }
SetUpTestCase()70 void HatsEnterTest::SetUpTestCase()
71 {
72 static io_uring_params p = {0};
73 io_uring_setup(1, &p);
74 if (errno == ENOSYS) {
75 GTEST_SKIP() << "Not support wearable, skip testCase";
76 }
77 }
TearDownTestCase()78 void HatsEnterTest::TearDownTestCase()
79 {
80 }
81
82 /*
83 * @tc.number : SUB_KERNEL_SYSCALL_ENTER_0100
84 * @tc.name : EnterSqeSuccess_0001
85 * @tc.desc : Enter submit sqe success.
86 * @tc.size : MediumTest
87 * @tc.type : Function
88 * @tc.level : Level 1
89 */
90 HWTEST_F(HatsEnterTest, EnterSqeSuccess_0001, Function | MediumTest | Level1)
91 {
92 struct io_uring_params p = {0};
93 int uringFd = io_uring_setup(4, &p);
94 EXPECT_TRUE(uringFd > 0);
95
96 int ret = io_uring_enter(uringFd, 1, 1, 0);
97 EXPECT_EQ(ret, 0);
98 }
99
100 /*
101 * @tc.number : SUB_KERNEL_SYSCALL_ENTER_0200
102 * @tc.name : EnterSqeSuccess_0002
103 * @tc.desc : Enter submit sqe success.
104 * @tc.size : MediumTest
105 * @tc.type : Function
106 * @tc.level : Level 1
107 */
108 HWTEST_F(HatsEnterTest, EnterSqeSuccess_0002, Function | MediumTest | Level1)
109 {
110 struct io_uring_params p = {0};
111 int uringFd = io_uring_setup(4, &p);
112 EXPECT_TRUE(uringFd > 0);
113
114 int ret = io_uring_enter(uringFd, 1, 1, IORING_ENTER_GETEVENTS);
115 EXPECT_EQ(ret, 0);
116 }
117
118 /*
119 * @tc.number : SUB_KERNEL_SYSCALL_ENTER_0300
120 * @tc.name : EnterSqeSuccess_0003
121 * @tc.desc : Enter submit sqe success.
122 * @tc.size : MediumTest
123 * @tc.type : Function
124 * @tc.level : Level 1
125 */
126 HWTEST_F(HatsEnterTest, EnterSqeSuccess_0003, Function | MediumTest | Level1)
127 {
128 struct io_uring_params p = {0};
129 int uringFd = io_uring_setup(4, &p);
130 EXPECT_TRUE(uringFd > 0);
131
132 int ret = io_uring_enter(uringFd, 0, 0, IORING_ENTER_GETEVENTS);
133 EXPECT_EQ(ret, 0);
134 }
135
136 /*
137 * @tc.number : SUB_KERNEL_SYSCALL_ENTER_0400
138 * @tc.name : EnterSqeInvalidFdFailed_0004
139 * @tc.desc : Enter was provide invaild file fd return failed,errno EBADF
140 * @tc.size : MediumTest
141 * @tc.type : Function
142 * @tc.level : Level 2
143 */
144 HWTEST_F(HatsEnterTest, EnterSqeInvalidFdFailed_0004, Function | MediumTest | Level2)
145 {
146 struct io_uring_params p = {0};
147 int uringFd = io_uring_setup(4, &p);
148 EXPECT_TRUE(uringFd > 0);
149
150 //fd is invalid fd,failed
151 int ret = io_uring_enter(-1, 1, 1, 0);
152 EXPECT_EQ(ret, -1);
153 EXPECT_EQ(errno, EBADF);
154 }
155
156 /*
157 * @tc.number : SUB_KERNEL_SYSCALL_ENTER_0500
158 * @tc.name : EnterSqeNotSupportFdFailed_0005
159 * @tc.desc : Enter was provide not support file fd return failed,errno EOPNOTSUPP
160 * @tc.size : MediumTest
161 * @tc.type : Function
162 * @tc.level : Level 2
163 */
164 HWTEST_F(HatsEnterTest, EnterSqeNotSupportFdFailed_0005, Function | MediumTest | Level2)
165 {
166 struct io_uring_params p = {0};
167 int uringFd = io_uring_setup(4, &p);
168 EXPECT_TRUE(uringFd > 0);
169
170 //fd is 0,failed
171 int ret = io_uring_enter(0, 1, 1, 0);
172 EXPECT_EQ(ret, -1);
173 EXPECT_EQ(errno, EOPNOTSUPP);
174 }
175
176 /*
177 * @tc.number : SUB_KERNEL_SYSCALL_ENTER_0600
178 * @tc.name : EnterSqeAbnormalFdFailed_0006
179 * @tc.desc : Enter was provide notabnormal file fd return failed
180 * @tc.size : MediumTest
181 * @tc.type : Function
182 * @tc.level : Level 2
183 */
184 HWTEST_F(HatsEnterTest, EnterSqeAbnormalFdFailed_0006, Function | MediumTest | Level2)
185 {
186 struct io_uring_params p = {0};
187 int uringFd = io_uring_setup(4, &p);
188 EXPECT_TRUE(uringFd > 0);
189
190 //fd is not normal fd,failed
191 int ret = io_uring_enter(uringFd + 10, 1, 1, 0);
192 EXPECT_EQ(ret, -1);
193 }
194
195 /*
196 * @tc.number : SUB_KERNEL_SYSCALL_ENTER_0700
197 * @tc.name : EnterSqeInvalidFlagFailed_0007
198 * @tc.desc : Enter was provide invalid flag return failed, errno EINVAL
199 * @tc.size : MediumTest
200 * @tc.type : Function
201 * @tc.level : Level 2
202 */
203 HWTEST_F(HatsEnterTest, EnterSqeInvalidFlagFailed_0007, Function | MediumTest | Level2)
204 {
205 struct io_uring_params p = {0};
206 int uringFd = io_uring_setup(4, &p);
207 EXPECT_TRUE(uringFd > 0);
208
209 //flag is invalid -1,failed
210 int ret = io_uring_enter(uringFd, 1, 1, -1);
211 EXPECT_EQ(ret, -1);
212 EXPECT_EQ(errno, EINVAL);
213 }
214
215 /*
216 * @tc.number : SUB_KERNEL_SYSCALL_ENTER_0800
217 * @tc.name : EnterSqeInvalidStatxFailed_0008
218 * @tc.desc : Enter was provide invalid statx
219 * @tc.size : MediumTest
220 * @tc.type : Function
221 * @tc.level : Level 2
222 */
223 HWTEST_F(HatsEnterTest, EnterSqeInvalidStatxFailed_0008, Function | MediumTest | Level2)
224 {
225 int ret;
226 int res;
227 struct io_uring_params p = {0};
228 int fd = open(TEST_READ_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
229 EXPECT_TRUE(fd > 0);
230
231 int uringFd = io_uring_setup(16, &p);
232 EXPECT_TRUE(uringFd > 0);
233
234 //Map SQ and CQ rings
235 size_t sqSize = p.sq_off.array + p.sq_entries * sizeof(unsigned);
236 size_t cqSize = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
237 EXPECT_TRUE(cqSize > 0);
238
239 void*sqPtr = mmap(NULL, sqSize, PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_SQ_RING);
240 void*cqPtr = mmap(NULL, sqSize, PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_CQ_RING);
241
242 //Map SQES
243 struct io_uring_sqe *sqes = (struct io_uring_sqe *)mmap(NULL, p.sq_entries *sizeof(struct io_uring_sqe),
244 PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_SQES);
245
246 //Prepare data
247 const char *writeData = "Hello,linked io_uring!";
248 size_t dataLen = strlen(writeData);
249 char *readBuf = (char*) malloc(dataLen + 1);
250 res = memset_s(readBuf, dataLen + 1, 0, dataLen + 1);
251 EXPECT_EQ(res, 0);
252
253 //Get SQ ring pointers
254 unsigned *sqHead = (unsigned *)((char*)sqPtr + p.sq_off.head);
255 EXPECT_TRUE(sqHead != nullptr);
256 unsigned *sqTail = (unsigned *)((char*)sqPtr + p.sq_off.tail);
257 unsigned *sqRingMask = (unsigned *)((char*)sqPtr + p.sq_off.ring_mask);
258 unsigned *sqArray = (unsigned *)((char*)sqPtr + p.sq_off.array);
259
260 //Get CQ ring pointers
261 unsigned *cqHead = (unsigned *)((char*)cqPtr + p.cq_off.head);
262 unsigned *cqTail = (unsigned *)((char*)cqPtr + p.cq_off.tail);
263 unsigned *cqRingMask = (unsigned *)((char*)cqPtr + p.cq_off.ring_mask);
264 struct io_uring_cqe *cqes = (struct io_uring_cqe *)((char *)cqPtr + p.cq_off.cqes);
265
266 //Prepare write SQE(first operation)
267 unsigned sqIndex = *sqTail & *sqRingMask;
268 struct io_uring_sqe *sqe = &sqes[sqIndex];
269 res = memset_s(sqe, sizeof(*sqe), 0, sizeof(*sqe));
270 EXPECT_EQ(res, 0);
271
272 sqe->opcode = IORING_OP_STATX;
273 sqe->fd = fd;
274 sqe->addr = reinterpret_cast<unsigned long>(writeData);
275 sqe->len = dataLen;
276 sqe->user_data = 1;
277 sqe->flags = IOSQE_ASYNC;
278
279 //Add to submission queue
280 sqArray[sqIndex] = sqIndex;
281
282 //Update tal(submit 2 entries)
283 *sqTail += 1;
284
285 ret = io_uring_enter(uringFd, 1, 1, 0);
286 EXPECT_EQ(ret, 1);
287
288 //Process CQES
289 while (*cqHead != *cqTail) {
290 unsigned index = *cqHead & *cqRingMask;
291 struct io_uring_cqe *cqe = &cqes[index];
292 EXPECT_EQ(cqe->res, 0);
293 (*cqHead)++;
294 }
295
296 free(readBuf);
297 munmap(sqes, p.sq_entries * sizeof(struct io_uring_sqe));
298 munmap(sqPtr, sqSize);
299 munmap(cqPtr, cqSize);
300 close(uringFd);
301 close(fd);
302 unlink(TEST_READ_FILE);
303 }
304
305 /*
306 * @tc.number : SUB_KERNEL_SYSCALL_ENTER_0900
307 * @tc.name : EnterSqeIlleagelFlagStatxFailed_0009
308 * @tc.desc : Enter was provide illeagel statx
309 * @tc.size : MediumTest
310 * @tc.type : Function
311 * @tc.level : Level 2
312 */
313 HWTEST_F(HatsEnterTest, EnterSqeIlleagelFlagStatxFailed_0009, Function | MediumTest | Level2)
314 {
315 int ret;
316 int res;
317 struct io_uring_params p = {0};
318 int fd = open(TEST_READ_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
319 EXPECT_TRUE(fd > 0);
320
321 int uringFd = io_uring_setup(16, &p);
322 EXPECT_TRUE(uringFd > 0);
323
324 //Map SQ and CQ rings
325 size_t sqSize = p.sq_off.array + p.sq_entries * sizeof(unsigned);
326 size_t cqSize = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
327 EXPECT_TRUE(cqSize > 0);
328
329 void*sqPtr = mmap(NULL, sqSize, PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_SQ_RING);
330 void*cqPtr = mmap(NULL, sqSize, PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_CQ_RING);
331
332 //Map SQES
333 struct io_uring_sqe *sqes = (struct io_uring_sqe *)mmap(NULL, p.sq_entries *sizeof(struct io_uring_sqe),
334 PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_SQES);
335
336 //Prepare data
337 const char *writeData = "Hello,linked io_uring!";
338 size_t dataLen = strlen(writeData);
339 char *readBuf = (char*) malloc(dataLen + 1);
340 res = memset_s(readBuf, dataLen + 1, 0, dataLen + 1);
341 EXPECT_EQ(res, 0);
342
343 //Get SQ ring pointers
344 unsigned *sqHead = (unsigned *)((char*)sqPtr + p.sq_off.head);
345 EXPECT_TRUE(sqHead != nullptr);
346 unsigned *sqTail = (unsigned *)((char*)sqPtr + p.sq_off.tail);
347 unsigned *sqRingMask = (unsigned *)((char*)sqPtr + p.sq_off.ring_mask);
348 unsigned *sqArray = (unsigned *)((char*)sqPtr + p.sq_off.array);
349
350 //Get CQ ring pointers
351 unsigned *cqHead = (unsigned *)((char*)cqPtr + p.cq_off.head);
352 unsigned *cqTail = (unsigned *)((char*)cqPtr + p.cq_off.tail);
353 unsigned *cqRingMask = (unsigned *)((char*)cqPtr + p.cq_off.ring_mask);
354 struct io_uring_cqe *cqes = (struct io_uring_cqe *)((char *)cqPtr + p.cq_off.cqes);
355
356 //Prepare write SQE(first operation)
357 unsigned sqIndex = *sqTail & *sqRingMask;
358 struct io_uring_sqe *sqe = &sqes[sqIndex];
359 res = memset_s(sqe, sizeof(*sqe), 0, sizeof(*sqe));
360 EXPECT_EQ(res, 0);
361
362 sqe->opcode = IORING_OP_STATX;
363 sqe->fd = fd;
364 sqe->addr = reinterpret_cast<unsigned long>(writeData);
365 sqe->len = dataLen;
366 sqe->user_data = 1;
367 sqe->flags = -1;
368
369 //Add to submission queue
370 sqArray[sqIndex] = sqIndex;
371
372 //Update tal(submit 2 entries)
373 *sqTail += 1;
374
375 ret = io_uring_enter(uringFd, 1, 1, 0);
376 EXPECT_EQ(ret, 1);
377
378 //Process CQES
379 while (*cqHead != *cqTail) {
380 unsigned index = *cqHead & *cqRingMask;
381 struct io_uring_cqe *cqe = &cqes[index];
382 EXPECT_EQ(cqe->res, -22);
383 (*cqHead)++;
384 }
385
386 free(readBuf);
387 munmap(sqes, p.sq_entries * sizeof(struct io_uring_sqe));
388 munmap(sqPtr, sqSize);
389 munmap(cqPtr, cqSize);
390 close(uringFd);
391 close(fd);
392 unlink(TEST_READ_FILE);
393 }
394
395
396 /*
397 * @tc.number : SUB_KERNEL_SYSCALL_ENTER_1000
398 * @tc.name : EnterSqeFlagSpliceFailed_0010
399 * @tc.desc : Enter was provide error flag splice
400 * @tc.size : MediumTest
401 * @tc.type : Function
402 * @tc.level : Level 2
403 */
404 HWTEST_F(HatsEnterTest, EnterSqeFlagSpliceFailed_0010, Function | MediumTest | Level2)
405 {
406 int ret;
407 int res;
408 struct io_uring_params p = {0};
409 int fd = open(TEST_READ_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
410 EXPECT_TRUE(fd > 0);
411
412 int uringFd = io_uring_setup(16, &p);
413 EXPECT_TRUE(uringFd > 0);
414
415 //Map SQ and CQ rings
416 size_t sqSize = p.sq_off.array + p.sq_entries * sizeof(unsigned);
417 size_t cqSize = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
418 EXPECT_TRUE(cqSize > 0);
419
420 void*sqPtr = mmap(NULL, sqSize, PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_SQ_RING);
421 void*cqPtr = mmap(NULL, sqSize, PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_CQ_RING);
422
423 //Map SQES
424 struct io_uring_sqe *sqes = (struct io_uring_sqe *)mmap(NULL, p.sq_entries *sizeof(struct io_uring_sqe),
425 PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_SQES);
426
427 //Prepare data
428 const char *writeData = "Hello,linked io_uring!";
429 size_t dataLen = strlen(writeData);
430 char *readBuf = (char*) malloc(dataLen + 1);
431 res = memset_s(readBuf, dataLen + 1, 0, dataLen + 1);
432 EXPECT_EQ(res, 0);
433
434 //Get SQ ring pointers
435 unsigned *sqHead = (unsigned *)((char*)sqPtr + p.sq_off.head);
436 EXPECT_TRUE(sqHead != nullptr);
437 unsigned *sqTail = (unsigned *)((char*)sqPtr + p.sq_off.tail);
438 unsigned *sqRingMask = (unsigned *)((char*)sqPtr + p.sq_off.ring_mask);
439 unsigned *sqArray = (unsigned *)((char*)sqPtr + p.sq_off.array);
440
441 //Get CQ ring pointers
442 unsigned *cqHead = (unsigned *)((char*)cqPtr + p.cq_off.head);
443 unsigned *cqTail = (unsigned *)((char*)cqPtr + p.cq_off.tail);
444 unsigned *cqRingMask = (unsigned *)((char*)cqPtr + p.cq_off.ring_mask);
445 struct io_uring_cqe *cqes = (struct io_uring_cqe *)((char *)cqPtr + p.cq_off.cqes);
446
447 //Prepare write SQE(first operation)
448 unsigned sqIndex = *sqTail & *sqRingMask;
449 struct io_uring_sqe *sqe = &sqes[sqIndex];
450 res = memset_s(sqe, sizeof(*sqe), 0, sizeof(*sqe));
451 EXPECT_EQ(res, 0);
452
453 int pipeFd[2];
454 ret = pipe(pipeFd);
455 EXPECT_EQ(ret, 0);
456
457 sqe->fd = pipeFd[1];
458 sqe->off = -1;
459 sqe->splice_off_in = 0;
460 sqe->len = dataLen;
461 sqe->opcode = IORING_OP_SPLICE;
462 sqe->flags = 0xff;
463 sqe->splice_fd_in = 0;
464
465 //Add to submission queue
466 sqArray[sqIndex] = sqIndex;
467
468 //Update tal(submit 2 entries)
469 *sqTail += 1;
470
471 ret = io_uring_enter(uringFd, 1, 1, 0);
472 EXPECT_EQ(ret, 1);
473
474 //Process CQES
475 while (*cqHead != *cqTail) {
476 unsigned index = *cqHead & *cqRingMask;
477 struct io_uring_cqe *cqe = &cqes[index];
478 EXPECT_EQ(cqe->res, -22);
479 (*cqHead)++;
480 }
481
482 free(readBuf);
483 munmap(sqes, p.sq_entries * sizeof(struct io_uring_sqe));
484 munmap(sqPtr, sqSize);
485 munmap(cqPtr, cqSize);
486 close(uringFd);
487 close(fd);
488 unlink(TEST_READ_FILE);
489 }
490
491 /*
492 * @tc.number : SUB_KERNEL_SYSCALL_ENTER_1100
493 * @tc.name : EnterSqeFlagIOSQE_IO_LINK_0011
494 * @tc.desc : Enter was provide SQE flag is IOSQE_IO_LINK
495 * @tc.size : MediumTest
496 * @tc.type : Function
497 * @tc.level : Level 2
498 */
499 HWTEST_F(HatsEnterTest, EnterSqeFlagIOSQE_IO_LINK_0011, Function | MediumTest | Level2)
500 {
501 int ret;
502 int res;
503 struct io_uring_params p = {0};
504 int fd = open(TEST_READ_FILE, O_RDWR | O_CREAT | O_TRUNC, 0644);
505 EXPECT_TRUE(fd > 0);
506
507 int uringFd = io_uring_setup(4, &p);
508 EXPECT_TRUE(uringFd > 0);
509
510 //Map SQ and CQ rings
511 size_t sqSize = p.sq_off.array + p.sq_entries * sizeof(unsigned);
512 size_t cqSize = p.cq_off.cqes + p.cq_entries * sizeof(struct io_uring_cqe);
513 EXPECT_TRUE(cqSize > 0);
514
515 void*sqPtr = mmap(NULL, sqSize, PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_SQ_RING);
516 void*cqPtr = mmap(NULL, sqSize, PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_CQ_RING);
517
518 //Map SQES
519 struct io_uring_sqe *sqes = (struct io_uring_sqe *)mmap(NULL, p.sq_entries *sizeof(struct io_uring_sqe),
520 PROT_READ | PROT_WRITE, MAP_SHARED, uringFd, IORING_OFF_SQES);
521
522 //Prepare data
523 const char *writeData = "Hello,linked io_uring!";
524 size_t dataLen = strlen(writeData);
525 char *readBuf = (char*) malloc(dataLen + 1);
526 res = memset_s(readBuf, dataLen + 1, 0, dataLen + 1);
527 EXPECT_EQ(res, 0);
528
529 //Get SQ ring pointers
530 unsigned *sqHead = (unsigned *)((char*)sqPtr + p.sq_off.head);
531 EXPECT_TRUE(sqHead != nullptr);
532 unsigned *sqTail = (unsigned *)((char*)sqPtr + p.sq_off.tail);
533 unsigned *sqRingMask = (unsigned *)((char*)sqPtr + p.sq_off.ring_mask);
534 unsigned *sqArray = (unsigned *)((char*)sqPtr + p.sq_off.array);
535
536 //Get CQ ring pointers
537 unsigned *cqHead = (unsigned *)((char*)cqPtr + p.cq_off.head);
538 unsigned *cqTail = (unsigned *)((char*)cqPtr + p.cq_off.tail);
539 unsigned *cqRingMask = (unsigned *)((char*)cqPtr + p.cq_off.ring_mask);
540 struct io_uring_cqe *cqes = (struct io_uring_cqe *)((char *)cqPtr + p.cq_off.cqes);
541
542 //Prepare write SQE(first operation)
543 unsigned sqIndex = *sqTail & *sqRingMask;
544 struct io_uring_sqe *writeSqe = &sqes[sqIndex];
545 res = memset_s(writeSqe, sizeof(*writeSqe), 0, sizeof(*writeSqe));
546 EXPECT_EQ(res, 0);
547
548 writeSqe->opcode = IORING_OP_WRITE;
549 writeSqe->fd = fd;
550 writeSqe->addr = reinterpret_cast<unsigned long>(writeData);
551 writeSqe->len = dataLen;
552 writeSqe->user_data = 1;
553 writeSqe->flags = IOSQE_IO_LINK;
554
555 //Prepare read SQE(second operation)
556 unsigned nextSqIndex = (*sqTail + 1) & *sqRingMask;
557 struct io_uring_sqe *readSqe = &sqes[nextSqIndex];
558 res = memset_s(readSqe, sizeof(*readSqe), 0, sizeof(*readSqe));
559 EXPECT_EQ(res, 0);
560
561 readSqe->opcode = IORING_OP_READ;
562 readSqe->fd = fd;
563 readSqe->addr = reinterpret_cast<unsigned long>(readBuf);
564 readSqe->len = dataLen;
565 readSqe->user_data = 2;
566 readSqe->off = 0;
567
568 //Add to submission queue
569 sqArray[sqIndex] = sqIndex;
570 sqArray[nextSqIndex] = nextSqIndex;
571
572 //Update tal(submit 2 entries)
573 *sqTail += 2;
574
575 ret = io_uring_enter(uringFd, 2, 2, 0);
576 EXPECT_EQ(ret, 2);
577
578 int attempts = 0;
579 while (*cqHead == *cqTail && attempts++ < 10) {
580 usleep(1000);
581 }
582
583 //Process CQES
584 while (*cqHead != *cqTail) {
585 unsigned index = *cqHead & *cqRingMask;
586 struct io_uring_cqe *cqe = &cqes[index];
587 EXPECT_EQ(cqe->res, 22);
588 (*cqHead)++;
589 }
590
591 free(readBuf);
592 munmap(sqes, p.sq_entries * sizeof(struct io_uring_sqe));
593 munmap(sqPtr, sqSize);
594 munmap(cqPtr, cqSize);
595 close(uringFd);
596 close(fd);
597 unlink(TEST_READ_FILE);
598 }