1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * fs/eventfd.c
4 *
5 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
6 *
7 */
8
9 #include <linux/file.h>
10 #include <linux/poll.h>
11 #include <linux/init.h>
12 #include <linux/fs.h>
13 #include <linux/sched/signal.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/syscalls.h>
20 #include <linux/export.h>
21 #include <linux/kref.h>
22 #include <linux/eventfd.h>
23 #include <linux/proc_fs.h>
24 #include <linux/seq_file.h>
25 #include <linux/idr.h>
26 #include <linux/uio.h>
27
28 DEFINE_PER_CPU(int, eventfd_wake_count);
29
30 static DEFINE_IDA(eventfd_ida);
31
32 struct eventfd_ctx {
33 struct kref kref;
34 wait_queue_head_t wqh;
35 /*
36 * Every time that a write(2) is performed on an eventfd, the
37 * value of the __u64 being written is added to "count" and a
38 * wakeup is performed on "wqh". A read(2) will return the "count"
39 * value to userspace, and will reset "count" to zero. The kernel
40 * side eventfd_signal() also, adds to the "count" counter and
41 * issue a wakeup.
42 */
43 __u64 count;
44 unsigned int flags;
45 int id;
46 };
47
eventfd_signal_mask(struct eventfd_ctx * ctx,__u64 n,unsigned mask)48 __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, unsigned mask)
49 {
50 unsigned long flags;
51
52 /*
53 * Deadlock or stack overflow issues can happen if we recurse here
54 * through waitqueue wakeup handlers. If the caller users potentially
55 * nested waitqueues with custom wakeup handlers, then it should
56 * check eventfd_signal_count() before calling this function. If
57 * it returns true, the eventfd_signal() call should be deferred to a
58 * safe context.
59 */
60 if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
61 return 0;
62
63 spin_lock_irqsave(&ctx->wqh.lock, flags);
64 this_cpu_inc(eventfd_wake_count);
65 if (ULLONG_MAX - ctx->count < n)
66 n = ULLONG_MAX - ctx->count;
67 ctx->count += n;
68 if (waitqueue_active(&ctx->wqh))
69 wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
70 this_cpu_dec(eventfd_wake_count);
71 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
72
73 return n;
74 }
75
76 /**
77 * eventfd_signal - Adds @n to the eventfd counter.
78 * @ctx: [in] Pointer to the eventfd context.
79 * @n: [in] Value of the counter to be added to the eventfd internal counter.
80 * The value cannot be negative.
81 *
82 * This function is supposed to be called by the kernel in paths that do not
83 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
84 * value, and we signal this as overflow condition by returning a EPOLLERR
85 * to poll(2).
86 *
87 * Returns the amount by which the counter was incremented. This will be less
88 * than @n if the counter has overflowed.
89 */
eventfd_signal(struct eventfd_ctx * ctx,__u64 n)90 __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
91 {
92 return eventfd_signal_mask(ctx, n, 0);
93 }
94 EXPORT_SYMBOL_GPL(eventfd_signal);
95
eventfd_free_ctx(struct eventfd_ctx * ctx)96 static void eventfd_free_ctx(struct eventfd_ctx *ctx)
97 {
98 if (ctx->id >= 0)
99 ida_simple_remove(&eventfd_ida, ctx->id);
100 kfree(ctx);
101 }
102
eventfd_free(struct kref * kref)103 static void eventfd_free(struct kref *kref)
104 {
105 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
106
107 eventfd_free_ctx(ctx);
108 }
109
110 /**
111 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
112 * @ctx: [in] Pointer to eventfd context.
113 *
114 * The eventfd context reference must have been previously acquired either
115 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
116 */
eventfd_ctx_put(struct eventfd_ctx * ctx)117 void eventfd_ctx_put(struct eventfd_ctx *ctx)
118 {
119 kref_put(&ctx->kref, eventfd_free);
120 }
121 EXPORT_SYMBOL_GPL(eventfd_ctx_put);
122
eventfd_release(struct inode * inode,struct file * file)123 static int eventfd_release(struct inode *inode, struct file *file)
124 {
125 struct eventfd_ctx *ctx = file->private_data;
126
127 wake_up_poll(&ctx->wqh, EPOLLHUP);
128 eventfd_ctx_put(ctx);
129 return 0;
130 }
131
eventfd_poll(struct file * file,poll_table * wait)132 static __poll_t eventfd_poll(struct file *file, poll_table *wait)
133 {
134 struct eventfd_ctx *ctx = file->private_data;
135 __poll_t events = 0;
136 u64 count;
137
138 poll_wait(file, &ctx->wqh, wait);
139
140 /*
141 * All writes to ctx->count occur within ctx->wqh.lock. This read
142 * can be done outside ctx->wqh.lock because we know that poll_wait
143 * takes that lock (through add_wait_queue) if our caller will sleep.
144 *
145 * The read _can_ therefore seep into add_wait_queue's critical
146 * section, but cannot move above it! add_wait_queue's spin_lock acts
147 * as an acquire barrier and ensures that the read be ordered properly
148 * against the writes. The following CAN happen and is safe:
149 *
150 * poll write
151 * ----------------- ------------
152 * lock ctx->wqh.lock (in poll_wait)
153 * count = ctx->count
154 * __add_wait_queue
155 * unlock ctx->wqh.lock
156 * lock ctx->qwh.lock
157 * ctx->count += n
158 * if (waitqueue_active)
159 * wake_up_locked_poll
160 * unlock ctx->qwh.lock
161 * eventfd_poll returns 0
162 *
163 * but the following, which would miss a wakeup, cannot happen:
164 *
165 * poll write
166 * ----------------- ------------
167 * count = ctx->count (INVALID!)
168 * lock ctx->qwh.lock
169 * ctx->count += n
170 * **waitqueue_active is false**
171 * **no wake_up_locked_poll!**
172 * unlock ctx->qwh.lock
173 * lock ctx->wqh.lock (in poll_wait)
174 * __add_wait_queue
175 * unlock ctx->wqh.lock
176 * eventfd_poll returns 0
177 */
178 count = READ_ONCE(ctx->count);
179
180 if (count > 0)
181 events |= EPOLLIN;
182 if (count == ULLONG_MAX)
183 events |= EPOLLERR;
184 if (ULLONG_MAX - 1 > count)
185 events |= EPOLLOUT;
186
187 return events;
188 }
189
eventfd_ctx_do_read(struct eventfd_ctx * ctx,__u64 * cnt)190 static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
191 {
192 *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
193 ctx->count -= *cnt;
194 }
195
196 /**
197 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
198 * @ctx: [in] Pointer to eventfd context.
199 * @wait: [in] Wait queue to be removed.
200 * @cnt: [out] Pointer to the 64-bit counter value.
201 *
202 * Returns %0 if successful, or the following error codes:
203 *
204 * -EAGAIN : The operation would have blocked.
205 *
206 * This is used to atomically remove a wait queue entry from the eventfd wait
207 * queue head, and read/reset the counter value.
208 */
eventfd_ctx_remove_wait_queue(struct eventfd_ctx * ctx,wait_queue_entry_t * wait,__u64 * cnt)209 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
210 __u64 *cnt)
211 {
212 unsigned long flags;
213
214 spin_lock_irqsave(&ctx->wqh.lock, flags);
215 eventfd_ctx_do_read(ctx, cnt);
216 __remove_wait_queue(&ctx->wqh, wait);
217 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
218 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
219 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
220
221 return *cnt != 0 ? 0 : -EAGAIN;
222 }
223 EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
224
eventfd_read(struct kiocb * iocb,struct iov_iter * to)225 static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
226 {
227 struct file *file = iocb->ki_filp;
228 struct eventfd_ctx *ctx = file->private_data;
229 __u64 ucnt = 0;
230 DECLARE_WAITQUEUE(wait, current);
231
232 if (iov_iter_count(to) < sizeof(ucnt))
233 return -EINVAL;
234 spin_lock_irq(&ctx->wqh.lock);
235 if (!ctx->count) {
236 if ((file->f_flags & O_NONBLOCK) ||
237 (iocb->ki_flags & IOCB_NOWAIT)) {
238 spin_unlock_irq(&ctx->wqh.lock);
239 return -EAGAIN;
240 }
241 __add_wait_queue(&ctx->wqh, &wait);
242 for (;;) {
243 set_current_state(TASK_INTERRUPTIBLE);
244 if (ctx->count)
245 break;
246 if (signal_pending(current)) {
247 __remove_wait_queue(&ctx->wqh, &wait);
248 __set_current_state(TASK_RUNNING);
249 spin_unlock_irq(&ctx->wqh.lock);
250 return -ERESTARTSYS;
251 }
252 spin_unlock_irq(&ctx->wqh.lock);
253 schedule();
254 spin_lock_irq(&ctx->wqh.lock);
255 }
256 __remove_wait_queue(&ctx->wqh, &wait);
257 __set_current_state(TASK_RUNNING);
258 }
259 eventfd_ctx_do_read(ctx, &ucnt);
260 if (waitqueue_active(&ctx->wqh))
261 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
262 spin_unlock_irq(&ctx->wqh.lock);
263 if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
264 return -EFAULT;
265
266 return sizeof(ucnt);
267 }
268
eventfd_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)269 static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
270 loff_t *ppos)
271 {
272 struct eventfd_ctx *ctx = file->private_data;
273 ssize_t res;
274 __u64 ucnt;
275 DECLARE_WAITQUEUE(wait, current);
276
277 if (count < sizeof(ucnt))
278 return -EINVAL;
279 if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
280 return -EFAULT;
281 if (ucnt == ULLONG_MAX)
282 return -EINVAL;
283 spin_lock_irq(&ctx->wqh.lock);
284 res = -EAGAIN;
285 if (ULLONG_MAX - ctx->count > ucnt)
286 res = sizeof(ucnt);
287 else if (!(file->f_flags & O_NONBLOCK)) {
288 __add_wait_queue(&ctx->wqh, &wait);
289 for (res = 0;;) {
290 set_current_state(TASK_INTERRUPTIBLE);
291 if (ULLONG_MAX - ctx->count > ucnt) {
292 res = sizeof(ucnt);
293 break;
294 }
295 if (signal_pending(current)) {
296 res = -ERESTARTSYS;
297 break;
298 }
299 spin_unlock_irq(&ctx->wqh.lock);
300 schedule();
301 spin_lock_irq(&ctx->wqh.lock);
302 }
303 __remove_wait_queue(&ctx->wqh, &wait);
304 __set_current_state(TASK_RUNNING);
305 }
306 if (likely(res > 0)) {
307 ctx->count += ucnt;
308 if (waitqueue_active(&ctx->wqh))
309 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
310 }
311 spin_unlock_irq(&ctx->wqh.lock);
312
313 return res;
314 }
315
316 #ifdef CONFIG_PROC_FS
eventfd_show_fdinfo(struct seq_file * m,struct file * f)317 static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
318 {
319 struct eventfd_ctx *ctx = f->private_data;
320
321 spin_lock_irq(&ctx->wqh.lock);
322 seq_printf(m, "eventfd-count: %16llx\n",
323 (unsigned long long)ctx->count);
324 spin_unlock_irq(&ctx->wqh.lock);
325 seq_printf(m, "eventfd-id: %d\n", ctx->id);
326 }
327 #endif
328
329 static const struct file_operations eventfd_fops = {
330 #ifdef CONFIG_PROC_FS
331 .show_fdinfo = eventfd_show_fdinfo,
332 #endif
333 .release = eventfd_release,
334 .poll = eventfd_poll,
335 .read_iter = eventfd_read,
336 .write = eventfd_write,
337 .llseek = noop_llseek,
338 };
339
340 /**
341 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
342 * @fd: [in] Eventfd file descriptor.
343 *
344 * Returns a pointer to the eventfd file structure in case of success, or the
345 * following error pointer:
346 *
347 * -EBADF : Invalid @fd file descriptor.
348 * -EINVAL : The @fd file descriptor is not an eventfd file.
349 */
eventfd_fget(int fd)350 struct file *eventfd_fget(int fd)
351 {
352 struct file *file;
353
354 file = fget(fd);
355 if (!file)
356 return ERR_PTR(-EBADF);
357 if (file->f_op != &eventfd_fops) {
358 fput(file);
359 return ERR_PTR(-EINVAL);
360 }
361
362 return file;
363 }
364 EXPORT_SYMBOL_GPL(eventfd_fget);
365
366 /**
367 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
368 * @fd: [in] Eventfd file descriptor.
369 *
370 * Returns a pointer to the internal eventfd context, otherwise the error
371 * pointers returned by the following functions:
372 *
373 * eventfd_fget
374 */
eventfd_ctx_fdget(int fd)375 struct eventfd_ctx *eventfd_ctx_fdget(int fd)
376 {
377 struct eventfd_ctx *ctx;
378 struct fd f = fdget(fd);
379 if (!f.file)
380 return ERR_PTR(-EBADF);
381 ctx = eventfd_ctx_fileget(f.file);
382 fdput(f);
383 return ctx;
384 }
385 EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
386
387 /**
388 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
389 * @file: [in] Eventfd file pointer.
390 *
391 * Returns a pointer to the internal eventfd context, otherwise the error
392 * pointer:
393 *
394 * -EINVAL : The @fd file descriptor is not an eventfd file.
395 */
eventfd_ctx_fileget(struct file * file)396 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
397 {
398 struct eventfd_ctx *ctx;
399
400 if (file->f_op != &eventfd_fops)
401 return ERR_PTR(-EINVAL);
402
403 ctx = file->private_data;
404 kref_get(&ctx->kref);
405 return ctx;
406 }
407 EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
408
do_eventfd(unsigned int count,int flags)409 static int do_eventfd(unsigned int count, int flags)
410 {
411 struct eventfd_ctx *ctx;
412 struct file *file;
413 int fd;
414
415 /* Check the EFD_* constants for consistency. */
416 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
417 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
418
419 if (flags & ~EFD_FLAGS_SET)
420 return -EINVAL;
421
422 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
423 if (!ctx)
424 return -ENOMEM;
425
426 kref_init(&ctx->kref);
427 init_waitqueue_head(&ctx->wqh);
428 ctx->count = count;
429 ctx->flags = flags;
430 ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
431
432 flags &= EFD_SHARED_FCNTL_FLAGS;
433 flags |= O_RDWR;
434 fd = get_unused_fd_flags(flags);
435 if (fd < 0)
436 goto err;
437
438 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
439 if (IS_ERR(file)) {
440 put_unused_fd(fd);
441 fd = PTR_ERR(file);
442 goto err;
443 }
444
445 file->f_mode |= FMODE_NOWAIT;
446 fd_install(fd, file);
447 return fd;
448 err:
449 eventfd_free_ctx(ctx);
450 return fd;
451 }
452
SYSCALL_DEFINE2(eventfd2,unsigned int,count,int,flags)453 SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
454 {
455 return do_eventfd(count, flags);
456 }
457
SYSCALL_DEFINE1(eventfd,unsigned int,count)458 SYSCALL_DEFINE1(eventfd, unsigned int, count)
459 {
460 return do_eventfd(count, 0);
461 }
462
463