• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Description: Helpers for tests.
4  */
5 #include <stdlib.h>
6 #include <assert.h>
7 #include <string.h>
8 #include <stdio.h>
9 #include <fcntl.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <sys/types.h>
13 
14 #include <arpa/inet.h>
15 #include <netinet/ip.h>
16 #include <netinet/tcp.h>
17 
18 #include "helpers.h"
19 #include "liburing.h"
20 
21 /*
22  * Helper for allocating memory in tests.
23  */
t_malloc(size_t size)24 void *t_malloc(size_t size)
25 {
26 	void *ret;
27 	ret = malloc(size);
28 	assert(ret);
29 	return ret;
30 }
31 
32 /*
33  * Helper for binding socket to an ephemeral port.
34  * The port number to be bound is returned in @addr->sin_port.
35  */
t_bind_ephemeral_port(int fd,struct sockaddr_in * addr)36 int t_bind_ephemeral_port(int fd, struct sockaddr_in *addr)
37 {
38 	socklen_t addrlen;
39 	int ret;
40 
41 	addr->sin_port = 0;
42 	if (bind(fd, (struct sockaddr *)addr, sizeof(*addr)))
43 		return -errno;
44 
45 	addrlen = sizeof(*addr);
46 	ret = getsockname(fd, (struct sockaddr *)addr, &addrlen);
47 	assert(!ret);
48 	assert(addr->sin_port != 0);
49 	return 0;
50 }
51 
52 /*
53  * Helper for allocating size bytes aligned on a boundary.
54  */
t_posix_memalign(void ** memptr,size_t alignment,size_t size)55 void t_posix_memalign(void **memptr, size_t alignment, size_t size)
56 {
57 	int ret;
58 	ret = posix_memalign(memptr, alignment, size);
59 	assert(!ret);
60 }
61 
62 /*
63  * Helper for allocating space for an array of nmemb elements
64  * with size bytes for each element.
65  */
t_calloc(size_t nmemb,size_t size)66 void *t_calloc(size_t nmemb, size_t size)
67 {
68 	void *ret;
69 	ret = calloc(nmemb, size);
70 	assert(ret);
71 	return ret;
72 }
73 
74 /*
75  * Helper for creating file and write @size byte buf with 0xaa value in the file.
76  */
__t_create_file(const char * file,size_t size,char pattern)77 static void __t_create_file(const char *file, size_t size, char pattern)
78 {
79 	ssize_t ret;
80 	char *buf;
81 	int fd;
82 
83 	buf = t_malloc(size);
84 	memset(buf, pattern, size);
85 
86 	fd = open(file, O_WRONLY | O_CREAT, 0644);
87 	assert(fd >= 0);
88 
89 	ret = write(fd, buf, size);
90 	fsync(fd);
91 	close(fd);
92 	free(buf);
93 	assert(ret == size);
94 }
95 
t_create_file(const char * file,size_t size)96 void t_create_file(const char *file, size_t size)
97 {
98 	__t_create_file(file, size, 0xaa);
99 }
100 
t_create_file_pattern(const char * file,size_t size,char pattern)101 void t_create_file_pattern(const char *file, size_t size, char pattern)
102 {
103 	__t_create_file(file, size, pattern);
104 }
105 
106 /*
107  * Helper for creating @buf_num number of iovec
108  * with @buf_size bytes buffer of each iovec.
109  */
t_create_buffers(size_t buf_num,size_t buf_size)110 struct iovec *t_create_buffers(size_t buf_num, size_t buf_size)
111 {
112 	struct iovec *vecs;
113 	int i;
114 
115 	vecs = t_malloc(buf_num * sizeof(struct iovec));
116 	for (i = 0; i < buf_num; i++) {
117 		t_posix_memalign(&vecs[i].iov_base, buf_size, buf_size);
118 		vecs[i].iov_len = buf_size;
119 	}
120 	return vecs;
121 }
122 
123 /*
124  * Helper for setting up an io_uring instance, skipping if the given user isn't
125  * allowed to.
126  */
t_create_ring_params(int depth,struct io_uring * ring,struct io_uring_params * p)127 enum t_setup_ret t_create_ring_params(int depth, struct io_uring *ring,
128 				      struct io_uring_params *p)
129 {
130 	int ret;
131 
132 	ret = io_uring_queue_init_params(depth, ring, p);
133 	if (!ret)
134 		return T_SETUP_OK;
135 	if ((p->flags & IORING_SETUP_SQPOLL) && ret == -EPERM && geteuid()) {
136 		fprintf(stdout, "SQPOLL skipped for regular user\n");
137 		return T_SETUP_SKIP;
138 	}
139 
140 	if (ret != -EINVAL)
141 		fprintf(stderr, "queue_init: %s\n", strerror(-ret));
142 	return ret;
143 }
144 
t_create_ring(int depth,struct io_uring * ring,unsigned int flags)145 enum t_setup_ret t_create_ring(int depth, struct io_uring *ring,
146 			       unsigned int flags)
147 {
148 	struct io_uring_params p = { };
149 
150 	p.flags = flags;
151 	return t_create_ring_params(depth, ring, &p);
152 }
153 
t_register_buffers(struct io_uring * ring,const struct iovec * iovecs,unsigned nr_iovecs)154 enum t_setup_ret t_register_buffers(struct io_uring *ring,
155 				    const struct iovec *iovecs,
156 				    unsigned nr_iovecs)
157 {
158 	int ret;
159 
160 	ret = io_uring_register_buffers(ring, iovecs, nr_iovecs);
161 	if (!ret)
162 		return T_SETUP_OK;
163 
164 	if ((ret == -EPERM || ret == -ENOMEM) && geteuid()) {
165 		fprintf(stdout, "too large non-root buffer registration, skip\n");
166 		return T_SETUP_SKIP;
167 	}
168 
169 	fprintf(stderr, "buffer register failed: %s\n", strerror(-ret));
170 	return ret;
171 }
172 
t_create_socket_pair(int fd[2],bool stream)173 int t_create_socket_pair(int fd[2], bool stream)
174 {
175 	int ret;
176 	int type = stream ? SOCK_STREAM : SOCK_DGRAM;
177 	int val;
178 	struct sockaddr_in serv_addr;
179 	struct sockaddr *paddr;
180 	socklen_t paddrlen;
181 
182 	type |= SOCK_CLOEXEC;
183 	fd[0] = socket(AF_INET, type, 0);
184 	if (fd[0] < 0)
185 		return errno;
186 	fd[1] = socket(AF_INET, type, 0);
187 	if (fd[1] < 0) {
188 		ret = errno;
189 		close(fd[0]);
190 		return ret;
191 	}
192 
193 	val = 1;
194 	if (setsockopt(fd[0], SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val)))
195 		goto errno_cleanup;
196 
197 	memset(&serv_addr, 0, sizeof(serv_addr));
198 	serv_addr.sin_family = AF_INET;
199 	serv_addr.sin_port = 0;
200 	inet_pton(AF_INET, "127.0.0.1", &serv_addr.sin_addr);
201 
202 	paddr = (struct sockaddr *)&serv_addr;
203 	paddrlen = sizeof(serv_addr);
204 
205 	if (bind(fd[0], paddr, paddrlen)) {
206 		fprintf(stderr, "bind failed\n");
207 		goto errno_cleanup;
208 	}
209 
210 	if (stream && listen(fd[0], 16)) {
211 		fprintf(stderr, "listen failed\n");
212 		goto errno_cleanup;
213 	}
214 
215 	if (getsockname(fd[0], (struct sockaddr *)&serv_addr,
216 			(socklen_t *)&paddrlen)) {
217 		fprintf(stderr, "getsockname failed\n");
218 		goto errno_cleanup;
219 	}
220 	inet_pton(AF_INET, "127.0.0.1", &serv_addr.sin_addr);
221 
222 	if (connect(fd[1], (struct sockaddr *)&serv_addr, paddrlen)) {
223 		fprintf(stderr, "connect failed\n");
224 		goto errno_cleanup;
225 	}
226 
227 	if (!stream) {
228 		/* connect the other udp side */
229 		if (getsockname(fd[1], (struct sockaddr *)&serv_addr,
230 				(socklen_t *)&paddrlen)) {
231 			fprintf(stderr, "getsockname failed\n");
232 			goto errno_cleanup;
233 		}
234 		inet_pton(AF_INET, "127.0.0.1", &serv_addr.sin_addr);
235 
236 		if (connect(fd[0], (struct sockaddr *)&serv_addr, paddrlen)) {
237 			fprintf(stderr, "connect failed\n");
238 			goto errno_cleanup;
239 		}
240 		return 0;
241 	}
242 
243 	/* for stream case we must accept and cleanup the listen socket */
244 
245 	ret = accept(fd[0], NULL, NULL);
246 	if (ret < 0)
247 		goto errno_cleanup;
248 
249 	close(fd[0]);
250 	fd[0] = ret;
251 
252 	return 0;
253 
254 errno_cleanup:
255 	ret = errno;
256 	close(fd[0]);
257 	close(fd[1]);
258 	return ret;
259 }
260 
t_probe_defer_taskrun(void)261 bool t_probe_defer_taskrun(void)
262 {
263 	struct io_uring ring;
264 	int ret;
265 
266 	ret = io_uring_queue_init(1, &ring, IORING_SETUP_SINGLE_ISSUER |
267 					    IORING_SETUP_DEFER_TASKRUN);
268 	if (ret < 0)
269 		return false;
270 	io_uring_queue_exit(&ring);
271 	return true;
272 }
273 
274 /*
275  * Sync internal state with kernel ring state on the SQ side. Returns the
276  * number of pending items in the SQ ring, for the shared ring.
277  */
__io_uring_flush_sq(struct io_uring * ring)278 unsigned __io_uring_flush_sq(struct io_uring *ring)
279 {
280 	struct io_uring_sq *sq = &ring->sq;
281 	unsigned tail = sq->sqe_tail;
282 
283 	if (sq->sqe_head != tail) {
284 		sq->sqe_head = tail;
285 		/*
286 		 * Ensure kernel sees the SQE updates before the tail update.
287 		 */
288 		if (!(ring->flags & IORING_SETUP_SQPOLL))
289 			*sq->ktail = tail;
290 		else
291 			io_uring_smp_store_release(sq->ktail, tail);
292 	}
293 	/*
294 	* This load needs to be atomic, since sq->khead is written concurrently
295 	* by the kernel, but it doesn't need to be load_acquire, since the
296 	* kernel doesn't store to the submission queue; it advances khead just
297 	* to indicate that it's finished reading the submission queue entries
298 	* so they're available for us to write to.
299 	*/
300 	return tail - IO_URING_READ_ONCE(*sq->khead);
301 }
302 
303 /*
304  * Implementation of error(3), prints an error message and exits.
305  */
t_error(int status,int errnum,const char * format,...)306 void t_error(int status, int errnum, const char *format, ...)
307 {
308 	va_list args;
309     	va_start(args, format);
310 
311 	vfprintf(stderr, format, args);
312     	if (errnum)
313         	fprintf(stderr, ": %s", strerror(errnum));
314 
315 	fprintf(stderr, "\n");
316 	va_end(args);
317     	exit(status);
318 }
319 
mtime_since(const struct timeval * s,const struct timeval * e)320 unsigned long long mtime_since(const struct timeval *s, const struct timeval *e)
321 {
322 	long long sec, usec;
323 
324 	sec = e->tv_sec - s->tv_sec;
325 	usec = (e->tv_usec - s->tv_usec);
326 	if (sec > 0 && usec < 0) {
327 		sec--;
328 		usec += 1000000;
329 	}
330 
331 	sec *= 1000;
332 	usec /= 1000;
333 	return sec + usec;
334 }
335 
mtime_since_now(struct timeval * tv)336 unsigned long long mtime_since_now(struct timeval *tv)
337 {
338 	struct timeval end;
339 
340 	gettimeofday(&end, NULL);
341 	return mtime_since(tv, &end);
342 }
343 
utime_since(const struct timeval * s,const struct timeval * e)344 unsigned long long utime_since(const struct timeval *s, const struct timeval *e)
345 {
346 	long long sec, usec;
347 
348 	sec = e->tv_sec - s->tv_sec;
349 	usec = (e->tv_usec - s->tv_usec);
350 	if (sec > 0 && usec < 0) {
351 		sec--;
352 		usec += 1000000;
353 	}
354 
355 	sec *= 1000000;
356 	return sec + usec;
357 }
358 
utime_since_now(struct timeval * tv)359 unsigned long long utime_since_now(struct timeval *tv)
360 {
361 	struct timeval end;
362 
363 	gettimeofday(&end, NULL);
364 	return utime_since(tv, &end);
365 }
366