1 /*
2 * Copyright (c) 2025 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #ifndef UNITTEST_HYPERAIO_INCLUDE_LIBURING_H
17 #define UNITTEST_HYPERAIO_INCLUDE_LIBURING_H
18
19 #include <chrono>
20 #include <thread>
21 namespace OHOS {
22 namespace HyperAio {
23 #define O_RDWR 02
24 inline bool sqe_flag = true;
25 inline bool init_flag = true;
26 inline bool wait_flag = true;
27 inline bool cqe_res_flag = true;
28 inline bool submit_flag = true;
29 struct io_uring_sqe {
30 int32_t data;
31 };
32
33 struct io_uring_cqe {
34 int32_t data;
35 uint64_t user_data;
36 int32_t res;
37 uint32_t flags;
38 };
39
40 struct io_uring {
41 std::vector<std::unique_ptr<io_uring_sqe>> sqe_list;
io_uringio_uring42 io_uring() {}
~io_uringio_uring43 ~io_uring() {}
io_uring_get_sqeio_uring44 inline io_uring_sqe *io_uring_get_sqe()
45 {
46 auto sqe = std::make_unique<io_uring_sqe>();
47 io_uring_sqe *raw_sqe = sqe.get();
48 sqe_list.push_back(std::move(sqe));
49 return raw_sqe;
50 }
clear_sqesio_uring51 void clear_sqes()
52 {
53 sqe_list.clear();
54 }
55 };
56
io_uring_get_sqe(struct io_uring * ring)57 inline struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring)
58 {
59 if (sqe_flag) {
60 sqe_flag = !sqe_flag;
61 return ring->io_uring_get_sqe();
62 }
63 return nullptr;
64 }
65
io_uring_submit(struct io_uring * ring)66 inline int io_uring_submit(struct io_uring *ring)
67 {
68 if (submit_flag) {
69 return 1;
70 }
71 return -1;
72 }
73
io_uring_queue_init(unsigned entries,struct io_uring * ring,unsigned flags)74 inline int io_uring_queue_init(unsigned entries, struct io_uring *ring, unsigned flags)
75 {
76 sqe_flag = true;
77 if (init_flag) {
78 return 1;
79 }
80 return -1;
81 }
82
io_uring_sqe_set_data(struct io_uring_sqe * sqe,void * data)83 inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data)
84 {
85 return;
86 }
87
io_uring_prep_openat(struct io_uring_sqe * sqe,int dfd,const char * path,int flags,mode_t mode)88 inline void io_uring_prep_openat(struct io_uring_sqe *sqe, int dfd,
89 const char *path, int flags, mode_t mode)
90 {
91 return;
92 }
93
io_uring_prep_read(struct io_uring_sqe * sqe,int fd,void * buf,unsigned nbytes,uint64_t offset)94 inline void io_uring_prep_read(struct io_uring_sqe *sqe, int fd,
95 void *buf, unsigned nbytes, uint64_t offset)
96 {
97 return;
98 }
99
io_uring_prep_cancel(struct io_uring_sqe * sqe,void * user_data,int flags)100 inline void io_uring_prep_cancel(struct io_uring_sqe *sqe,
101 void *user_data, int flags)
102 {
103 return;
104 }
105
io_uring_wait_cqe(struct io_uring * ring,struct io_uring_cqe ** cqe_ptr)106 inline int io_uring_wait_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr)
107 {
108 std::this_thread::sleep_for(std::chrono::seconds(1));
109 if (!wait_flag) {
110 wait_flag = true;
111 return -1;
112 }
113 *cqe_ptr = new io_uring_cqe();
114 (*cqe_ptr)->res = cqe_res_flag ? 0 : -1;
115 cqe_res_flag = true;
116 return 1;
117 }
118
io_uring_cqe_seen(struct io_uring * ring,struct io_uring_cqe * cqe)119 inline void io_uring_cqe_seen(struct io_uring *ring, struct io_uring_cqe *cqe)
120 {
121 delete cqe;
122 return;
123 }
124
io_uring_queue_exit(struct io_uring * ring)125 inline void io_uring_queue_exit(struct io_uring *ring)
126 {
127 return;
128 }
129
130 }
131 }
132 #endif