• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <aio.h>
2 #include <pthread.h>
3 #include <semaphore.h>
4 #include <limits.h>
5 #include <errno.h>
6 #include <unistd.h>
7 #include <stdlib.h>
8 #include <sys/auxv.h>
9 #include <unsupported_api.h>
10 #include "syscall.h"
11 #include "atomic.h"
12 #include "pthread_impl.h"
13 
14 /* The following is a threads-based implementation of AIO with minimal
15  * dependence on implementation details. Most synchronization is
16  * performed with pthread primitives, but atomics and futex operations
17  * are used for notification in a couple places where the pthread
18  * primitives would be inefficient or impractical.
19  *
20  * For each fd with outstanding aio operations, an aio_queue structure
21  * is maintained. These are reference-counted and destroyed by the last
22  * aio worker thread to exit. Accessing any member of the aio_queue
23  * structure requires a lock on the aio_queue. Adding and removing aio
24  * queues themselves requires a write lock on the global map object,
25  * a 4-level table mapping file descriptor numbers to aio queues. A
26  * read lock on the map is used to obtain locks on existing queues by
27  * excluding destruction of the queue by a different thread while it is
28  * being locked.
29  *
30  * Each aio queue has a list of active threads/operations. Presently there
31  * is a one to one relationship between threads and operations. The only
32  * members of the aio_thread structure which are accessed by other threads
33  * are the linked list pointers, op (which is immutable), running (which
34  * is updated atomically), and err (which is synchronized via running),
35  * so no locking is necessary. Most of the other other members are used
36  * for sharing data between the main flow of execution and cancellation
37  * cleanup handler.
38  *
39  * Taking any aio locks requires having all signals blocked. This is
40  * necessary because aio_cancel is needed by close, and close is required
41  * to be async-signal safe. All aio worker threads run with all signals
42  * blocked permanently.
43  */
44 
45 struct aio_thread {
46 	pthread_t td;
47 	struct aiocb *cb;
48 	struct aio_thread *next, *prev;
49 	struct aio_queue *q;
50 	volatile int running;
51 	int err, op;
52 	ssize_t ret;
53 };
54 
55 struct aio_queue {
56 	int fd, seekable, append, ref, init;
57 	pthread_mutex_t lock;
58 	pthread_cond_t cond;
59 	struct aio_thread *head;
60 };
61 
62 struct aio_args {
63 	struct aiocb *cb;
64 	struct aio_queue *q;
65 	int op;
66 	sem_t sem;
67 };
68 
69 static pthread_rwlock_t maplock = PTHREAD_RWLOCK_INITIALIZER;
70 static struct aio_queue *****map;
71 static volatile int aio_fd_cnt;
72 volatile int __aio_fut;
73 
__aio_get_queue(int fd,int need)74 static struct aio_queue *__aio_get_queue(int fd, int need)
75 {
76 	if (fd < 0) {
77 		errno = EBADF;
78 		return 0;
79 	}
80 	int a=fd>>24;
81 	unsigned char b=fd>>16, c=fd>>8, d=fd;
82 	struct aio_queue *q = 0;
83 	pthread_rwlock_rdlock(&maplock);
84 	if ((!map || !map[a] || !map[a][b] || !map[a][b][c] || !(q=map[a][b][c][d])) && need) {
85 		pthread_rwlock_unlock(&maplock);
86 		if (fcntl(fd, F_GETFD) < 0) return 0;
87 		pthread_rwlock_wrlock(&maplock);
88 		if (!map) map = calloc(sizeof *map, (-1U/2+1)>>24);
89 		if (!map) goto out;
90 		if (!map[a]) map[a] = calloc(sizeof **map, 256);
91 		if (!map[a]) goto out;
92 		if (!map[a][b]) map[a][b] = calloc(sizeof ***map, 256);
93 		if (!map[a][b]) goto out;
94 		if (!map[a][b][c]) map[a][b][c] = calloc(sizeof ****map, 256);
95 		if (!map[a][b][c]) goto out;
96 		if (!(q = map[a][b][c][d])) {
97 			map[a][b][c][d] = q = calloc(sizeof *****map, 1);
98 			if (q) {
99 				q->fd = fd;
100 				pthread_mutex_init(&q->lock, 0);
101 				pthread_cond_init(&q->cond, 0);
102 				a_inc(&aio_fd_cnt);
103 			}
104 		}
105 	}
106 	if (q) pthread_mutex_lock(&q->lock);
107 out:
108 	pthread_rwlock_unlock(&maplock);
109 	return q;
110 }
111 
__aio_unref_queue(struct aio_queue * q)112 static void __aio_unref_queue(struct aio_queue *q)
113 {
114 	if (q->ref > 1) {
115 		q->ref--;
116 		pthread_mutex_unlock(&q->lock);
117 		return;
118 	}
119 
120 	/* This is potentially the last reference, but a new reference
121 	 * may arrive since we cannot free the queue object without first
122 	 * taking the maplock, which requires releasing the queue lock. */
123 	pthread_mutex_unlock(&q->lock);
124 	pthread_rwlock_wrlock(&maplock);
125 	pthread_mutex_lock(&q->lock);
126 	if (q->ref == 1) {
127 		int fd=q->fd;
128 		int a=fd>>24;
129 		unsigned char b=fd>>16, c=fd>>8, d=fd;
130 		map[a][b][c][d] = 0;
131 		a_dec(&aio_fd_cnt);
132 		pthread_rwlock_unlock(&maplock);
133 		pthread_mutex_unlock(&q->lock);
134 		free(q);
135 	} else {
136 		q->ref--;
137 		pthread_rwlock_unlock(&maplock);
138 		pthread_mutex_unlock(&q->lock);
139 	}
140 }
141 
cleanup(void * ctx)142 static void cleanup(void *ctx)
143 {
144 	struct aio_thread *at = ctx;
145 	struct aio_queue *q = at->q;
146 	struct aiocb *cb = at->cb;
147 	struct sigevent sev = cb->aio_sigevent;
148 
149 	/* There are four potential types of waiters we could need to wake:
150 	 *   1. Callers of aio_cancel/close.
151 	 *   2. Callers of aio_suspend with a single aiocb.
152 	 *   3. Callers of aio_suspend with a list.
153 	 *   4. AIO worker threads waiting for sequenced operations.
154 	 * Types 1-3 are notified via atomics/futexes, mainly for AS-safety
155 	 * considerations. Type 4 is notified later via a cond var. */
156 
157 	cb->__ret = at->ret;
158 	if (a_swap(&at->running, 0) < 0)
159 		__wake(&at->running, -1, 1);
160 	if (a_swap(&cb->__err, at->err) != EINPROGRESS)
161 		__wake(&cb->__err, -1, 1);
162 	if (a_swap(&__aio_fut, 0))
163 		__wake(&__aio_fut, -1, 1);
164 
165 	pthread_mutex_lock(&q->lock);
166 
167 	if (at->next) at->next->prev = at->prev;
168 	if (at->prev) at->prev->next = at->next;
169 	else q->head = at->next;
170 
171 	/* Signal aio worker threads waiting for sequenced operations. */
172 	pthread_cond_broadcast(&q->cond);
173 
174 	__aio_unref_queue(q);
175 
176 	if (sev.sigev_notify == SIGEV_SIGNAL) {
177 		siginfo_t si = {
178 			.si_signo = sev.sigev_signo,
179 			.si_value = sev.sigev_value,
180 			.si_code = SI_ASYNCIO,
181 			.si_pid = getpid(),
182 			.si_uid = getuid()
183 		};
184 		__syscall(SYS_rt_sigqueueinfo, si.si_pid, si.si_signo, &si);
185 	}
186 	if (sev.sigev_notify == SIGEV_THREAD) {
187 		a_store(&__pthread_self()->cancel, 0);
188 		sev.sigev_notify_function(sev.sigev_value);
189 	}
190 }
191 
io_thread_func(void * ctx)192 static void *io_thread_func(void *ctx)
193 {
194 	struct aio_thread at, *p;
195 
196 	struct aio_args *args = ctx;
197 	struct aiocb *cb = args->cb;
198 	int fd = cb->aio_fildes;
199 	int op = args->op;
200 	void *buf = (void *)cb->aio_buf;
201 	size_t len = cb->aio_nbytes;
202 	off_t off = cb->aio_offset;
203 
204 	struct aio_queue *q = args->q;
205 	ssize_t ret;
206 
207 	pthread_mutex_lock(&q->lock);
208 	sem_post(&args->sem);
209 
210 	at.op = op;
211 	at.running = 1;
212 	at.ret = -1;
213 	at.err = ECANCELED;
214 	at.q = q;
215 	at.td = __pthread_self();
216 	at.cb = cb;
217 	at.prev = 0;
218 	if ((at.next = q->head)) at.next->prev = &at;
219 	q->head = &at;
220 
221 	if (!q->init) {
222 		int seekable = lseek(fd, 0, SEEK_CUR) >= 0;
223 		q->seekable = seekable;
224 		q->append = !seekable || (fcntl(fd, F_GETFL) & O_APPEND);
225 		q->init = 1;
226 	}
227 
228 	pthread_cleanup_push(cleanup, &at);
229 
230 	/* Wait for sequenced operations. */
231 	if (op!=LIO_READ && (op!=LIO_WRITE || q->append)) {
232 		for (;;) {
233 			for (p=at.next; p && p->op!=LIO_WRITE; p=p->next);
234 			if (!p) break;
235 			pthread_cond_wait(&q->cond, &q->lock);
236 		}
237 	}
238 
239 	pthread_mutex_unlock(&q->lock);
240 
241 	switch (op) {
242 	case LIO_WRITE:
243 		ret = q->append ? write(fd, buf, len) : pwrite(fd, buf, len, off);
244 		break;
245 	case LIO_READ:
246 		ret = !q->seekable ? read(fd, buf, len) : pread(fd, buf, len, off);
247 		break;
248 	case O_SYNC:
249 		ret = fsync(fd);
250 		break;
251 	case O_DSYNC:
252 		ret = fdatasync(fd);
253 		break;
254 	}
255 	at.ret = ret;
256 	at.err = ret<0 ? errno : 0;
257 
258 	pthread_cleanup_pop(1);
259 
260 	return 0;
261 }
262 
263 static size_t io_thread_stack_size = MINSIGSTKSZ+2048;
264 static pthread_once_t init_stack_size_once;
265 
init_stack_size()266 static void init_stack_size()
267 {
268 	unsigned long val = __getauxval(AT_MINSIGSTKSZ);
269 	if (val > MINSIGSTKSZ) io_thread_stack_size = val + 512;
270 }
271 
submit(struct aiocb * cb,int op)272 static int submit(struct aiocb *cb, int op)
273 {
274 	int ret = 0;
275 	pthread_attr_t a;
276 	sigset_t allmask, origmask;
277 	pthread_t td;
278 	struct aio_queue *q = __aio_get_queue(cb->aio_fildes, 1);
279 	struct aio_args args = { .cb = cb, .op = op, .q = q };
280 	sem_init(&args.sem, 0, 0);
281 
282 	if (!q) {
283 		if (errno != EBADF) errno = EAGAIN;
284 		cb->__ret = -1;
285 		cb->__err = errno;
286 		return -1;
287 	}
288 	q->ref++;
289 	pthread_mutex_unlock(&q->lock);
290 
291 	if (cb->aio_sigevent.sigev_notify == SIGEV_THREAD) {
292 		if (cb->aio_sigevent.sigev_notify_attributes)
293 			a = *cb->aio_sigevent.sigev_notify_attributes;
294 		else
295 			pthread_attr_init(&a);
296 	} else {
297 		pthread_once(&init_stack_size_once, init_stack_size);
298 		pthread_attr_init(&a);
299 		pthread_attr_setstacksize(&a, io_thread_stack_size);
300 		pthread_attr_setguardsize(&a, 0);
301 	}
302 	pthread_attr_setdetachstate(&a, PTHREAD_CREATE_DETACHED);
303 	sigfillset(&allmask);
304 	pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
305 	cb->__err = EINPROGRESS;
306 	if (pthread_create(&td, &a, io_thread_func, &args)) {
307 		pthread_mutex_lock(&q->lock);
308 		__aio_unref_queue(q);
309 		cb->__err = errno = EAGAIN;
310 		cb->__ret = ret = -1;
311 	}
312 	pthread_sigmask(SIG_SETMASK, &origmask, 0);
313 
314 	if (!ret) {
315 		while (sem_wait(&args.sem));
316 	}
317 
318 	return ret;
319 }
320 
aio_read(struct aiocb * cb)321 int aio_read(struct aiocb *cb)
322 {
323 	unsupported_api(__FUNCTION__);
324 	return submit(cb, LIO_READ);
325 }
326 
aio_write(struct aiocb * cb)327 int aio_write(struct aiocb *cb)
328 {
329 	unsupported_api(__FUNCTION__);
330 	return submit(cb, LIO_WRITE);
331 }
332 
aio_fsync(int op,struct aiocb * cb)333 int aio_fsync(int op, struct aiocb *cb)
334 {
335 	unsupported_api(__FUNCTION__);
336 	if (op != O_SYNC && op != O_DSYNC) {
337 		errno = EINVAL;
338 		return -1;
339 	}
340 	return submit(cb, op);
341 }
342 
aio_return(struct aiocb * cb)343 ssize_t aio_return(struct aiocb *cb)
344 {
345 	unsupported_api(__FUNCTION__);
346 	return cb->__ret;
347 }
348 
aio_error(const struct aiocb * cb)349 int aio_error(const struct aiocb *cb)
350 {
351 	unsupported_api(__FUNCTION__);
352 	a_barrier();
353 	return cb->__err & 0x7fffffff;
354 }
355 
aio_cancel(int fd,struct aiocb * cb)356 int aio_cancel(int fd, struct aiocb *cb)
357 {
358 	sigset_t allmask, origmask;
359 	int ret = AIO_ALLDONE;
360 	struct aio_thread *p;
361 	struct aio_queue *q;
362 
363 	unsupported_api(__FUNCTION__);
364 	/* Unspecified behavior case. Report an error. */
365 	if (cb && fd != cb->aio_fildes) {
366 		errno = EINVAL;
367 		return -1;
368 	}
369 
370 	sigfillset(&allmask);
371 	pthread_sigmask(SIG_BLOCK, &allmask, &origmask);
372 
373 	errno = ENOENT;
374 	if (!(q = __aio_get_queue(fd, 0))) {
375 		if (errno == EBADF) ret = -1;
376 		goto done;
377 	}
378 
379 	for (p = q->head; p; p = p->next) {
380 		if (cb && cb != p->cb) continue;
381 		/* Transition target from running to running-with-waiters */
382 		if (a_cas(&p->running, 1, -1)) {
383 			pthread_cancel(p->td);
384 			__wait(&p->running, 0, -1, 1);
385 			if (p->err == ECANCELED) ret = AIO_CANCELED;
386 		}
387 	}
388 
389 	pthread_mutex_unlock(&q->lock);
390 done:
391 	pthread_sigmask(SIG_SETMASK, &origmask, 0);
392 	return ret;
393 }
394 
__aio_close(int fd)395 int __aio_close(int fd)
396 {
397 	a_barrier();
398 	if (aio_fd_cnt) aio_cancel(fd, 0);
399 	return fd;
400 }
401 
402 weak_alias(aio_cancel, aio_cancel64);
403 weak_alias(aio_error, aio_error64);
404 weak_alias(aio_fsync, aio_fsync64);
405 weak_alias(aio_read, aio_read64);
406 weak_alias(aio_write, aio_write64);
407 weak_alias(aio_return, aio_return64);
408