1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/poll.h>
9 #include <linux/hashtable.h>
10 #include <linux/io_uring.h>
11
12 #include <trace/events/io_uring.h>
13
14 #include <uapi/linux/io_uring.h>
15
16 #include "io_uring.h"
17 #include "alloc_cache.h"
18 #include "refs.h"
19 #include "napi.h"
20 #include "opdef.h"
21 #include "kbuf.h"
22 #include "poll.h"
23 #include "cancel.h"
24
25 struct io_poll_update {
26 struct file *file;
27 u64 old_user_data;
28 u64 new_user_data;
29 __poll_t events;
30 bool update_events;
31 bool update_user_data;
32 };
33
34 struct io_poll_table {
35 struct poll_table_struct pt;
36 struct io_kiocb *req;
37 int nr_entries;
38 int error;
39 bool owning;
40 /* output value, set only if arm poll returns >0 */
41 __poll_t result_mask;
42 };
43
44 #define IO_POLL_CANCEL_FLAG BIT(31)
45 #define IO_POLL_RETRY_FLAG BIT(30)
46 #define IO_POLL_REF_MASK GENMASK(29, 0)
47
48 /*
49 * We usually have 1-2 refs taken, 128 is more than enough and we want to
50 * maximise the margin between this amount and the moment when it overflows.
51 */
52 #define IO_POLL_REF_BIAS 128
53
54 #define IO_WQE_F_DOUBLE 1
55
56 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
57 void *key);
58
wqe_to_req(struct wait_queue_entry * wqe)59 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe)
60 {
61 unsigned long priv = (unsigned long)wqe->private;
62
63 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE);
64 }
65
wqe_is_double(struct wait_queue_entry * wqe)66 static inline bool wqe_is_double(struct wait_queue_entry *wqe)
67 {
68 unsigned long priv = (unsigned long)wqe->private;
69
70 return priv & IO_WQE_F_DOUBLE;
71 }
72
io_poll_get_ownership_slowpath(struct io_kiocb * req)73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
74 {
75 int v;
76
77 /*
78 * poll_refs are already elevated and we don't have much hope for
79 * grabbing the ownership. Instead of incrementing set a retry flag
80 * to notify the loop that there might have been some change.
81 */
82 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
83 if (v & IO_POLL_REF_MASK)
84 return false;
85 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
86 }
87
88 /*
89 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
90 * bump it and acquire ownership. It's disallowed to modify requests while not
91 * owning it, that prevents from races for enqueueing task_work's and b/w
92 * arming poll and wakeups.
93 */
io_poll_get_ownership(struct io_kiocb * req)94 static inline bool io_poll_get_ownership(struct io_kiocb *req)
95 {
96 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
97 return io_poll_get_ownership_slowpath(req);
98 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
99 }
100
io_poll_mark_cancelled(struct io_kiocb * req)101 static void io_poll_mark_cancelled(struct io_kiocb *req)
102 {
103 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
104 }
105
io_poll_get_double(struct io_kiocb * req)106 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
107 {
108 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
109 if (req->opcode == IORING_OP_POLL_ADD)
110 return req->async_data;
111 return req->apoll->double_poll;
112 }
113
io_poll_get_single(struct io_kiocb * req)114 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
115 {
116 if (req->opcode == IORING_OP_POLL_ADD)
117 return io_kiocb_to_cmd(req, struct io_poll);
118 return &req->apoll->poll;
119 }
120
io_poll_req_insert(struct io_kiocb * req)121 static void io_poll_req_insert(struct io_kiocb *req)
122 {
123 struct io_hash_table *table = &req->ctx->cancel_table;
124 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
125 struct io_hash_bucket *hb = &table->hbs[index];
126
127 spin_lock(&hb->lock);
128 hlist_add_head(&req->hash_node, &hb->list);
129 spin_unlock(&hb->lock);
130 }
131
io_poll_req_delete(struct io_kiocb * req,struct io_ring_ctx * ctx)132 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx)
133 {
134 struct io_hash_table *table = &req->ctx->cancel_table;
135 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
136 spinlock_t *lock = &table->hbs[index].lock;
137
138 spin_lock(lock);
139 hash_del(&req->hash_node);
140 spin_unlock(lock);
141 }
142
io_poll_req_insert_locked(struct io_kiocb * req)143 static void io_poll_req_insert_locked(struct io_kiocb *req)
144 {
145 struct io_hash_table *table = &req->ctx->cancel_table_locked;
146 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
147
148 lockdep_assert_held(&req->ctx->uring_lock);
149
150 hlist_add_head(&req->hash_node, &table->hbs[index].list);
151 }
152
io_poll_tw_hash_eject(struct io_kiocb * req,struct io_tw_state * ts)153 static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
154 {
155 struct io_ring_ctx *ctx = req->ctx;
156
157 if (req->flags & REQ_F_HASH_LOCKED) {
158 /*
159 * ->cancel_table_locked is protected by ->uring_lock in
160 * contrast to per bucket spinlocks. Likely, tctx_task_work()
161 * already grabbed the mutex for us, but there is a chance it
162 * failed.
163 */
164 io_tw_lock(ctx, ts);
165 hash_del(&req->hash_node);
166 req->flags &= ~REQ_F_HASH_LOCKED;
167 } else {
168 io_poll_req_delete(req, ctx);
169 }
170 }
171
io_init_poll_iocb(struct io_poll * poll,__poll_t events)172 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
173 {
174 poll->head = NULL;
175 #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
176 /* mask in events that we always want/need */
177 poll->events = events | IO_POLL_UNMASK;
178 INIT_LIST_HEAD(&poll->wait.entry);
179 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
180 }
181
io_poll_remove_entry(struct io_poll * poll)182 static inline void io_poll_remove_entry(struct io_poll *poll)
183 {
184 struct wait_queue_head *head = smp_load_acquire(&poll->head);
185
186 if (head) {
187 spin_lock_irq(&head->lock);
188 list_del_init(&poll->wait.entry);
189 poll->head = NULL;
190 spin_unlock_irq(&head->lock);
191 }
192 }
193
io_poll_remove_entries(struct io_kiocb * req)194 static void io_poll_remove_entries(struct io_kiocb *req)
195 {
196 /*
197 * Nothing to do if neither of those flags are set. Avoid dipping
198 * into the poll/apoll/double cachelines if we can.
199 */
200 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
201 return;
202
203 /*
204 * While we hold the waitqueue lock and the waitqueue is nonempty,
205 * wake_up_pollfree() will wait for us. However, taking the waitqueue
206 * lock in the first place can race with the waitqueue being freed.
207 *
208 * We solve this as eventpoll does: by taking advantage of the fact that
209 * all users of wake_up_pollfree() will RCU-delay the actual free. If
210 * we enter rcu_read_lock() and see that the pointer to the queue is
211 * non-NULL, we can then lock it without the memory being freed out from
212 * under us.
213 *
214 * Keep holding rcu_read_lock() as long as we hold the queue lock, in
215 * case the caller deletes the entry from the queue, leaving it empty.
216 * In that case, only RCU prevents the queue memory from being freed.
217 */
218 rcu_read_lock();
219 if (req->flags & REQ_F_SINGLE_POLL)
220 io_poll_remove_entry(io_poll_get_single(req));
221 if (req->flags & REQ_F_DOUBLE_POLL)
222 io_poll_remove_entry(io_poll_get_double(req));
223 rcu_read_unlock();
224 }
225
226 enum {
227 IOU_POLL_DONE = 0,
228 IOU_POLL_NO_ACTION = 1,
229 IOU_POLL_REMOVE_POLL_USE_RES = 2,
230 IOU_POLL_REISSUE = 3,
231 IOU_POLL_REQUEUE = 4,
232 };
233
__io_poll_execute(struct io_kiocb * req,int mask)234 static void __io_poll_execute(struct io_kiocb *req, int mask)
235 {
236 unsigned flags = 0;
237
238 io_req_set_res(req, mask, 0);
239 req->io_task_work.func = io_poll_task_func;
240
241 trace_io_uring_task_add(req, mask);
242
243 if (!(req->flags & REQ_F_POLL_NO_LAZY))
244 flags = IOU_F_TWQ_LAZY_WAKE;
245 __io_req_task_work_add(req, flags);
246 }
247
io_poll_execute(struct io_kiocb * req,int res)248 static inline void io_poll_execute(struct io_kiocb *req, int res)
249 {
250 if (io_poll_get_ownership(req))
251 __io_poll_execute(req, res);
252 }
253
254 /*
255 * All poll tw should go through this. Checks for poll events, manages
256 * references, does rewait, etc.
257 *
258 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
259 * require, which is either spurious wakeup or multishot CQE is served.
260 * IOU_POLL_DONE when it's done with the request, then the mask is stored in
261 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
262 * poll and that the result is stored in req->cqe.
263 */
io_poll_check_events(struct io_kiocb * req,struct io_tw_state * ts)264 static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
265 {
266 int v;
267
268 if (unlikely(io_should_terminate_tw(req->ctx)))
269 return -ECANCELED;
270
271 do {
272 v = atomic_read(&req->poll_refs);
273
274 if (unlikely(v != 1)) {
275 /* tw should be the owner and so have some refs */
276 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
277 return IOU_POLL_NO_ACTION;
278 if (v & IO_POLL_CANCEL_FLAG)
279 return -ECANCELED;
280 /*
281 * cqe.res contains only events of the first wake up
282 * and all others are to be lost. Redo vfs_poll() to get
283 * up to date state.
284 */
285 if ((v & IO_POLL_REF_MASK) != 1)
286 req->cqe.res = 0;
287
288 if (v & IO_POLL_RETRY_FLAG) {
289 req->cqe.res = 0;
290 /*
291 * We won't find new events that came in between
292 * vfs_poll and the ref put unless we clear the
293 * flag in advance.
294 */
295 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
296 v &= ~IO_POLL_RETRY_FLAG;
297 }
298 }
299
300 /* the mask was stashed in __io_poll_execute */
301 if (!req->cqe.res) {
302 struct poll_table_struct pt = { ._key = req->apoll_events };
303 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
304 /*
305 * We got woken with a mask, but someone else got to
306 * it first. The above vfs_poll() doesn't add us back
307 * to the waitqueue, so if we get nothing back, we
308 * should be safe and attempt a reissue.
309 */
310 if (unlikely(!req->cqe.res)) {
311 /* Multishot armed need not reissue */
312 if (!(req->apoll_events & EPOLLONESHOT))
313 continue;
314 return IOU_POLL_REISSUE;
315 }
316 }
317 if (req->apoll_events & EPOLLONESHOT)
318 return IOU_POLL_DONE;
319
320 /* multishot, just fill a CQE and proceed */
321 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
322 __poll_t mask = mangle_poll(req->cqe.res &
323 req->apoll_events);
324
325 if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
326 io_req_set_res(req, mask, 0);
327 return IOU_POLL_REMOVE_POLL_USE_RES;
328 }
329 } else {
330 int ret = io_poll_issue(req, ts);
331 if (ret == IOU_STOP_MULTISHOT)
332 return IOU_POLL_REMOVE_POLL_USE_RES;
333 else if (ret == IOU_REQUEUE)
334 return IOU_POLL_REQUEUE;
335 if (ret < 0)
336 return ret;
337 }
338
339 /* force the next iteration to vfs_poll() */
340 req->cqe.res = 0;
341
342 /*
343 * Release all references, retry if someone tried to restart
344 * task_work while we were executing it.
345 */
346 v &= IO_POLL_REF_MASK;
347 } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
348
349 io_napi_add(req);
350 return IOU_POLL_NO_ACTION;
351 }
352
io_poll_task_func(struct io_kiocb * req,struct io_tw_state * ts)353 void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
354 {
355 int ret;
356
357 ret = io_poll_check_events(req, ts);
358 if (ret == IOU_POLL_NO_ACTION) {
359 io_kbuf_recycle(req, 0);
360 return;
361 } else if (ret == IOU_POLL_REQUEUE) {
362 io_kbuf_recycle(req, 0);
363 __io_poll_execute(req, 0);
364 return;
365 }
366 io_poll_remove_entries(req);
367 io_poll_tw_hash_eject(req, ts);
368
369 if (req->opcode == IORING_OP_POLL_ADD) {
370 if (ret == IOU_POLL_DONE) {
371 struct io_poll *poll;
372
373 poll = io_kiocb_to_cmd(req, struct io_poll);
374 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
375 } else if (ret == IOU_POLL_REISSUE) {
376 io_req_task_submit(req, ts);
377 return;
378 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
379 req->cqe.res = ret;
380 req_set_fail(req);
381 }
382
383 io_req_set_res(req, req->cqe.res, 0);
384 io_req_task_complete(req, ts);
385 } else {
386 io_tw_lock(req->ctx, ts);
387
388 if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
389 io_req_task_complete(req, ts);
390 else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
391 io_req_task_submit(req, ts);
392 else
393 io_req_defer_failed(req, ret);
394 }
395 }
396
io_poll_cancel_req(struct io_kiocb * req)397 static void io_poll_cancel_req(struct io_kiocb *req)
398 {
399 io_poll_mark_cancelled(req);
400 /* kick tw, which should complete the request */
401 io_poll_execute(req, 0);
402 }
403
404 #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI)
405
io_pollfree_wake(struct io_kiocb * req,struct io_poll * poll)406 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
407 {
408 io_poll_mark_cancelled(req);
409 /* we have to kick tw in case it's not already */
410 io_poll_execute(req, 0);
411
412 /*
413 * If the waitqueue is being freed early but someone is already
414 * holds ownership over it, we have to tear down the request as
415 * best we can. That means immediately removing the request from
416 * its waitqueue and preventing all further accesses to the
417 * waitqueue via the request.
418 */
419 list_del_init(&poll->wait.entry);
420
421 /*
422 * Careful: this *must* be the last step, since as soon
423 * as req->head is NULL'ed out, the request can be
424 * completed and freed, since aio_poll_complete_work()
425 * will no longer need to take the waitqueue lock.
426 */
427 smp_store_release(&poll->head, NULL);
428 return 1;
429 }
430
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)431 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
432 void *key)
433 {
434 struct io_kiocb *req = wqe_to_req(wait);
435 struct io_poll *poll = container_of(wait, struct io_poll, wait);
436 __poll_t mask = key_to_poll(key);
437
438 if (unlikely(mask & POLLFREE))
439 return io_pollfree_wake(req, poll);
440
441 /* for instances that support it check for an event match first */
442 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
443 return 0;
444
445 if (io_poll_get_ownership(req)) {
446 /*
447 * If we trigger a multishot poll off our own wakeup path,
448 * disable multishot as there is a circular dependency between
449 * CQ posting and triggering the event.
450 */
451 if (mask & EPOLL_URING_WAKE)
452 poll->events |= EPOLLONESHOT;
453
454 /* optional, saves extra locking for removal in tw handler */
455 if (mask && poll->events & EPOLLONESHOT) {
456 list_del_init(&poll->wait.entry);
457 poll->head = NULL;
458 if (wqe_is_double(wait))
459 req->flags &= ~REQ_F_DOUBLE_POLL;
460 else
461 req->flags &= ~REQ_F_SINGLE_POLL;
462 }
463 __io_poll_execute(req, mask);
464 }
465 return 1;
466 }
467
468 /* fails only when polling is already completing by the first entry */
io_poll_double_prepare(struct io_kiocb * req)469 static bool io_poll_double_prepare(struct io_kiocb *req)
470 {
471 struct wait_queue_head *head;
472 struct io_poll *poll = io_poll_get_single(req);
473
474 /* head is RCU protected, see io_poll_remove_entries() comments */
475 rcu_read_lock();
476 head = smp_load_acquire(&poll->head);
477 /*
478 * poll arm might not hold ownership and so race for req->flags with
479 * io_poll_wake(). There is only one poll entry queued, serialise with
480 * it by taking its head lock. As we're still arming the tw hanlder
481 * is not going to be run, so there are no races with it.
482 */
483 if (head) {
484 spin_lock_irq(&head->lock);
485 req->flags |= REQ_F_DOUBLE_POLL;
486 if (req->opcode == IORING_OP_POLL_ADD)
487 req->flags |= REQ_F_ASYNC_DATA;
488 spin_unlock_irq(&head->lock);
489 }
490 rcu_read_unlock();
491 return !!head;
492 }
493
__io_queue_proc(struct io_poll * poll,struct io_poll_table * pt,struct wait_queue_head * head,struct io_poll ** poll_ptr)494 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
495 struct wait_queue_head *head,
496 struct io_poll **poll_ptr)
497 {
498 struct io_kiocb *req = pt->req;
499 unsigned long wqe_private = (unsigned long) req;
500
501 /*
502 * The file being polled uses multiple waitqueues for poll handling
503 * (e.g. one for read, one for write). Setup a separate io_poll
504 * if this happens.
505 */
506 if (unlikely(pt->nr_entries)) {
507 struct io_poll *first = poll;
508
509 /* double add on the same waitqueue head, ignore */
510 if (first->head == head)
511 return;
512 /* already have a 2nd entry, fail a third attempt */
513 if (*poll_ptr) {
514 if ((*poll_ptr)->head == head)
515 return;
516 pt->error = -EINVAL;
517 return;
518 }
519
520 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
521 if (!poll) {
522 pt->error = -ENOMEM;
523 return;
524 }
525
526 /* mark as double wq entry */
527 wqe_private |= IO_WQE_F_DOUBLE;
528 io_init_poll_iocb(poll, first->events);
529 if (!io_poll_double_prepare(req)) {
530 /* the request is completing, just back off */
531 kfree(poll);
532 return;
533 }
534 *poll_ptr = poll;
535 } else {
536 /* fine to modify, there is no poll queued to race with us */
537 req->flags |= REQ_F_SINGLE_POLL;
538 }
539
540 pt->nr_entries++;
541 poll->head = head;
542 poll->wait.private = (void *) wqe_private;
543
544 if (poll->events & EPOLLEXCLUSIVE) {
545 add_wait_queue_exclusive(head, &poll->wait);
546 } else {
547 add_wait_queue(head, &poll->wait);
548 }
549 }
550
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)551 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
552 struct poll_table_struct *p)
553 {
554 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
555 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
556
557 __io_queue_proc(poll, pt, head,
558 (struct io_poll **) &pt->req->async_data);
559 }
560
io_poll_can_finish_inline(struct io_kiocb * req,struct io_poll_table * pt)561 static bool io_poll_can_finish_inline(struct io_kiocb *req,
562 struct io_poll_table *pt)
563 {
564 return pt->owning || io_poll_get_ownership(req);
565 }
566
io_poll_add_hash(struct io_kiocb * req)567 static void io_poll_add_hash(struct io_kiocb *req)
568 {
569 if (req->flags & REQ_F_HASH_LOCKED)
570 io_poll_req_insert_locked(req);
571 else
572 io_poll_req_insert(req);
573 }
574
575 /*
576 * Returns 0 when it's handed over for polling. The caller owns the requests if
577 * it returns non-zero, but otherwise should not touch it. Negative values
578 * contain an error code. When the result is >0, the polling has completed
579 * inline and ipt.result_mask is set to the mask.
580 */
__io_arm_poll_handler(struct io_kiocb * req,struct io_poll * poll,struct io_poll_table * ipt,__poll_t mask,unsigned issue_flags)581 static int __io_arm_poll_handler(struct io_kiocb *req,
582 struct io_poll *poll,
583 struct io_poll_table *ipt, __poll_t mask,
584 unsigned issue_flags)
585 {
586 INIT_HLIST_NODE(&req->hash_node);
587 io_init_poll_iocb(poll, mask);
588 poll->file = req->file;
589 req->apoll_events = poll->events;
590
591 ipt->pt._key = mask;
592 ipt->req = req;
593 ipt->error = 0;
594 ipt->nr_entries = 0;
595 /*
596 * Polling is either completed here or via task_work, so if we're in the
597 * task context we're naturally serialised with tw by merit of running
598 * the same task. When it's io-wq, take the ownership to prevent tw
599 * from running. However, when we're in the task context, skip taking
600 * it as an optimisation.
601 *
602 * Note: even though the request won't be completed/freed, without
603 * ownership we still can race with io_poll_wake().
604 * io_poll_can_finish_inline() tries to deal with that.
605 */
606 ipt->owning = issue_flags & IO_URING_F_UNLOCKED;
607 atomic_set(&req->poll_refs, (int)ipt->owning);
608
609 /* io-wq doesn't hold uring_lock */
610 if (issue_flags & IO_URING_F_UNLOCKED)
611 req->flags &= ~REQ_F_HASH_LOCKED;
612
613
614 /*
615 * Exclusive waits may only wake a limited amount of entries
616 * rather than all of them, this may interfere with lazy
617 * wake if someone does wait(events > 1). Ensure we don't do
618 * lazy wake for those, as we need to process each one as they
619 * come in.
620 */
621 if (poll->events & EPOLLEXCLUSIVE)
622 req->flags |= REQ_F_POLL_NO_LAZY;
623
624 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
625
626 if (unlikely(ipt->error || !ipt->nr_entries)) {
627 io_poll_remove_entries(req);
628
629 if (!io_poll_can_finish_inline(req, ipt)) {
630 io_poll_mark_cancelled(req);
631 return 0;
632 } else if (mask && (poll->events & EPOLLET)) {
633 ipt->result_mask = mask;
634 return 1;
635 }
636 return ipt->error ?: -EINVAL;
637 }
638
639 if (mask &&
640 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
641 if (!io_poll_can_finish_inline(req, ipt)) {
642 io_poll_add_hash(req);
643 return 0;
644 }
645 io_poll_remove_entries(req);
646 ipt->result_mask = mask;
647 /* no one else has access to the req, forget about the ref */
648 return 1;
649 }
650
651 io_poll_add_hash(req);
652
653 if (mask && (poll->events & EPOLLET) &&
654 io_poll_can_finish_inline(req, ipt)) {
655 __io_poll_execute(req, mask);
656 return 0;
657 }
658 io_napi_add(req);
659
660 if (ipt->owning) {
661 /*
662 * Try to release ownership. If we see a change of state, e.g.
663 * poll was waken up, queue up a tw, it'll deal with it.
664 */
665 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
666 __io_poll_execute(req, 0);
667 }
668 return 0;
669 }
670
io_async_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)671 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
672 struct poll_table_struct *p)
673 {
674 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
675 struct async_poll *apoll = pt->req->apoll;
676
677 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
678 }
679
680 /*
681 * We can't reliably detect loops in repeated poll triggers and issue
682 * subsequently failing. But rather than fail these immediately, allow a
683 * certain amount of retries before we give up. Given that this condition
684 * should _rarely_ trigger even once, we should be fine with a larger value.
685 */
686 #define APOLL_MAX_RETRY 128
687
io_req_alloc_apoll(struct io_kiocb * req,unsigned issue_flags)688 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
689 unsigned issue_flags)
690 {
691 struct io_ring_ctx *ctx = req->ctx;
692 struct async_poll *apoll;
693
694 if (req->flags & REQ_F_POLLED) {
695 apoll = req->apoll;
696 kfree(apoll->double_poll);
697 } else if (!(issue_flags & IO_URING_F_UNLOCKED)) {
698 apoll = io_alloc_cache_get(&ctx->apoll_cache);
699 if (!apoll)
700 goto alloc_apoll;
701 apoll->poll.retries = APOLL_MAX_RETRY;
702 } else {
703 alloc_apoll:
704 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
705 if (unlikely(!apoll))
706 return NULL;
707 apoll->poll.retries = APOLL_MAX_RETRY;
708 }
709 apoll->double_poll = NULL;
710 req->apoll = apoll;
711 if (unlikely(!--apoll->poll.retries))
712 return NULL;
713 return apoll;
714 }
715
io_arm_poll_handler(struct io_kiocb * req,unsigned issue_flags)716 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
717 {
718 const struct io_issue_def *def = &io_issue_defs[req->opcode];
719 struct async_poll *apoll;
720 struct io_poll_table ipt;
721 __poll_t mask = POLLPRI | POLLERR | EPOLLET;
722 int ret;
723
724 /*
725 * apoll requests already grab the mutex to complete in the tw handler,
726 * so removal from the mutex-backed hash is free, use it by default.
727 */
728 req->flags |= REQ_F_HASH_LOCKED;
729
730 if (!def->pollin && !def->pollout)
731 return IO_APOLL_ABORTED;
732 if (!io_file_can_poll(req))
733 return IO_APOLL_ABORTED;
734 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
735 mask |= EPOLLONESHOT;
736
737 if (def->pollin) {
738 mask |= EPOLLIN | EPOLLRDNORM;
739
740 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
741 if (req->flags & REQ_F_CLEAR_POLLIN)
742 mask &= ~EPOLLIN;
743 } else {
744 mask |= EPOLLOUT | EPOLLWRNORM;
745 }
746 if (def->poll_exclusive)
747 mask |= EPOLLEXCLUSIVE;
748
749 apoll = io_req_alloc_apoll(req, issue_flags);
750 if (!apoll)
751 return IO_APOLL_ABORTED;
752 req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
753 req->flags |= REQ_F_POLLED;
754 ipt.pt._qproc = io_async_queue_proc;
755
756 io_kbuf_recycle(req, issue_flags);
757
758 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
759 if (ret)
760 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED;
761 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
762 return IO_APOLL_OK;
763 }
764
io_poll_remove_all_table(struct task_struct * tsk,struct io_hash_table * table,bool cancel_all)765 static __cold bool io_poll_remove_all_table(struct task_struct *tsk,
766 struct io_hash_table *table,
767 bool cancel_all)
768 {
769 unsigned nr_buckets = 1U << table->hash_bits;
770 struct hlist_node *tmp;
771 struct io_kiocb *req;
772 bool found = false;
773 int i;
774
775 for (i = 0; i < nr_buckets; i++) {
776 struct io_hash_bucket *hb = &table->hbs[i];
777
778 spin_lock(&hb->lock);
779 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
780 if (io_match_task_safe(req, tsk, cancel_all)) {
781 hlist_del_init(&req->hash_node);
782 io_poll_cancel_req(req);
783 found = true;
784 }
785 }
786 spin_unlock(&hb->lock);
787 }
788 return found;
789 }
790
791 /*
792 * Returns true if we found and killed one or more poll requests
793 */
io_poll_remove_all(struct io_ring_ctx * ctx,struct task_struct * tsk,bool cancel_all)794 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
795 bool cancel_all)
796 __must_hold(&ctx->uring_lock)
797 {
798 bool ret;
799
800 ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all);
801 ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all);
802 return ret;
803 }
804
io_poll_find(struct io_ring_ctx * ctx,bool poll_only,struct io_cancel_data * cd,struct io_hash_table * table,struct io_hash_bucket ** out_bucket)805 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
806 struct io_cancel_data *cd,
807 struct io_hash_table *table,
808 struct io_hash_bucket **out_bucket)
809 {
810 struct io_kiocb *req;
811 u32 index = hash_long(cd->data, table->hash_bits);
812 struct io_hash_bucket *hb = &table->hbs[index];
813
814 *out_bucket = NULL;
815
816 spin_lock(&hb->lock);
817 hlist_for_each_entry(req, &hb->list, hash_node) {
818 if (cd->data != req->cqe.user_data)
819 continue;
820 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
821 continue;
822 if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
823 if (io_cancel_match_sequence(req, cd->seq))
824 continue;
825 }
826 *out_bucket = hb;
827 return req;
828 }
829 spin_unlock(&hb->lock);
830 return NULL;
831 }
832
io_poll_file_find(struct io_ring_ctx * ctx,struct io_cancel_data * cd,struct io_hash_table * table,struct io_hash_bucket ** out_bucket)833 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
834 struct io_cancel_data *cd,
835 struct io_hash_table *table,
836 struct io_hash_bucket **out_bucket)
837 {
838 unsigned nr_buckets = 1U << table->hash_bits;
839 struct io_kiocb *req;
840 int i;
841
842 *out_bucket = NULL;
843
844 for (i = 0; i < nr_buckets; i++) {
845 struct io_hash_bucket *hb = &table->hbs[i];
846
847 spin_lock(&hb->lock);
848 hlist_for_each_entry(req, &hb->list, hash_node) {
849 if (io_cancel_req_match(req, cd)) {
850 *out_bucket = hb;
851 return req;
852 }
853 }
854 spin_unlock(&hb->lock);
855 }
856 return NULL;
857 }
858
io_poll_disarm(struct io_kiocb * req)859 static int io_poll_disarm(struct io_kiocb *req)
860 {
861 if (!req)
862 return -ENOENT;
863 if (!io_poll_get_ownership(req))
864 return -EALREADY;
865 io_poll_remove_entries(req);
866 hash_del(&req->hash_node);
867 return 0;
868 }
869
__io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,struct io_hash_table * table)870 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
871 struct io_hash_table *table)
872 {
873 struct io_hash_bucket *bucket;
874 struct io_kiocb *req;
875
876 if (cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP |
877 IORING_ASYNC_CANCEL_ANY))
878 req = io_poll_file_find(ctx, cd, table, &bucket);
879 else
880 req = io_poll_find(ctx, false, cd, table, &bucket);
881
882 if (req)
883 io_poll_cancel_req(req);
884 if (bucket)
885 spin_unlock(&bucket->lock);
886 return req ? 0 : -ENOENT;
887 }
888
io_poll_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,unsigned issue_flags)889 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
890 unsigned issue_flags)
891 {
892 int ret;
893
894 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table);
895 if (ret != -ENOENT)
896 return ret;
897
898 io_ring_submit_lock(ctx, issue_flags);
899 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked);
900 io_ring_submit_unlock(ctx, issue_flags);
901 return ret;
902 }
903
io_poll_parse_events(const struct io_uring_sqe * sqe,unsigned int flags)904 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
905 unsigned int flags)
906 {
907 u32 events;
908
909 events = READ_ONCE(sqe->poll32_events);
910 #ifdef __BIG_ENDIAN
911 events = swahw32(events);
912 #endif
913 if (!(flags & IORING_POLL_ADD_MULTI))
914 events |= EPOLLONESHOT;
915 if (!(flags & IORING_POLL_ADD_LEVEL))
916 events |= EPOLLET;
917 return demangle_poll(events) |
918 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET));
919 }
920
io_poll_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)921 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
922 {
923 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
924 u32 flags;
925
926 if (sqe->buf_index || sqe->splice_fd_in)
927 return -EINVAL;
928 flags = READ_ONCE(sqe->len);
929 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
930 IORING_POLL_ADD_MULTI))
931 return -EINVAL;
932 /* meaningless without update */
933 if (flags == IORING_POLL_ADD_MULTI)
934 return -EINVAL;
935
936 upd->old_user_data = READ_ONCE(sqe->addr);
937 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS;
938 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA;
939
940 upd->new_user_data = READ_ONCE(sqe->off);
941 if (!upd->update_user_data && upd->new_user_data)
942 return -EINVAL;
943 if (upd->update_events)
944 upd->events = io_poll_parse_events(sqe, flags);
945 else if (sqe->poll32_events)
946 return -EINVAL;
947
948 return 0;
949 }
950
io_poll_add_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)951 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
952 {
953 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
954 u32 flags;
955
956 if (sqe->buf_index || sqe->off || sqe->addr)
957 return -EINVAL;
958 flags = READ_ONCE(sqe->len);
959 if (flags & ~IORING_POLL_ADD_MULTI)
960 return -EINVAL;
961 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
962 return -EINVAL;
963
964 poll->events = io_poll_parse_events(sqe, flags);
965 return 0;
966 }
967
io_poll_add(struct io_kiocb * req,unsigned int issue_flags)968 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
969 {
970 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
971 struct io_poll_table ipt;
972 int ret;
973
974 ipt.pt._qproc = io_poll_queue_proc;
975
976 /*
977 * If sqpoll or single issuer, there is no contention for ->uring_lock
978 * and we'll end up holding it in tw handlers anyway.
979 */
980 if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER))
981 req->flags |= REQ_F_HASH_LOCKED;
982
983 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
984 if (ret > 0) {
985 io_req_set_res(req, ipt.result_mask, 0);
986 return IOU_OK;
987 }
988 return ret ?: IOU_ISSUE_SKIP_COMPLETE;
989 }
990
io_poll_remove(struct io_kiocb * req,unsigned int issue_flags)991 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
992 {
993 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
994 struct io_ring_ctx *ctx = req->ctx;
995 struct io_cancel_data cd = { .ctx = ctx, .data = poll_update->old_user_data, };
996 struct io_hash_bucket *bucket;
997 struct io_kiocb *preq;
998 int ret2, ret = 0;
999
1000 io_ring_submit_lock(ctx, issue_flags);
1001 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
1002 ret2 = io_poll_disarm(preq);
1003 if (bucket)
1004 spin_unlock(&bucket->lock);
1005 if (!ret2)
1006 goto found;
1007 if (ret2 != -ENOENT) {
1008 ret = ret2;
1009 goto out;
1010 }
1011
1012 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
1013 ret2 = io_poll_disarm(preq);
1014 if (bucket)
1015 spin_unlock(&bucket->lock);
1016 if (ret2) {
1017 ret = ret2;
1018 goto out;
1019 }
1020
1021 found:
1022 if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) {
1023 ret = -EFAULT;
1024 goto out;
1025 }
1026
1027 if (poll_update->update_events || poll_update->update_user_data) {
1028 /* only mask one event flags, keep behavior flags */
1029 if (poll_update->update_events) {
1030 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1031
1032 poll->events &= ~0xffff;
1033 poll->events |= poll_update->events & 0xffff;
1034 poll->events |= IO_POLL_UNMASK;
1035 }
1036 if (poll_update->update_user_data)
1037 preq->cqe.user_data = poll_update->new_user_data;
1038
1039 ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
1040 /* successfully updated, don't complete poll request */
1041 if (!ret2 || ret2 == -EIOCBQUEUED)
1042 goto out;
1043 }
1044
1045 req_set_fail(preq);
1046 io_req_set_res(preq, -ECANCELED, 0);
1047 preq->io_task_work.func = io_req_task_complete;
1048 io_req_task_work_add(preq);
1049 out:
1050 io_ring_submit_unlock(ctx, issue_flags);
1051 if (ret < 0) {
1052 req_set_fail(req);
1053 return ret;
1054 }
1055 /* complete update request, we're done with it */
1056 io_req_set_res(req, ret, 0);
1057 return IOU_OK;
1058 }
1059