• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use alloc::sync::{Arc, Weak};
2 use core::cell::UnsafeCell;
3 use core::sync::atomic::Ordering::{self, Relaxed, SeqCst};
4 use core::sync::atomic::{AtomicBool, AtomicPtr};
5 
6 use super::abort::abort;
7 use super::ReadyToRunQueue;
8 use crate::task::{waker_ref, ArcWake, WakerRef};
9 
10 pub(super) struct Task<Fut> {
11     // The future
12     pub(super) future: UnsafeCell<Option<Fut>>,
13 
14     // Next pointer for linked list tracking all active tasks (use
15     // `spin_next_all` to read when access is shared across threads)
16     pub(super) next_all: AtomicPtr<Task<Fut>>,
17 
18     // Previous task in linked list tracking all active tasks
19     pub(super) prev_all: UnsafeCell<*const Task<Fut>>,
20 
21     // Length of the linked list tracking all active tasks when this node was
22     // inserted (use `spin_next_all` to synchronize before reading when access
23     // is shared across threads)
24     pub(super) len_all: UnsafeCell<usize>,
25 
26     // Next pointer in ready to run queue
27     pub(super) next_ready_to_run: AtomicPtr<Task<Fut>>,
28 
29     // Queue that we'll be enqueued to when woken
30     pub(super) ready_to_run_queue: Weak<ReadyToRunQueue<Fut>>,
31 
32     // Whether or not this task is currently in the ready to run queue
33     pub(super) queued: AtomicBool,
34 
35     // Whether the future was awoken during polling
36     // It is possible for this flag to be set to true after the polling,
37     // but it will be ignored.
38     pub(super) woken: AtomicBool,
39 }
40 
41 // `Task` can be sent across threads safely because it ensures that
42 // the underlying `Fut` type isn't touched from any of its methods.
43 //
44 // The parent (`super`) module is trusted not to access `future`
45 // across different threads.
46 unsafe impl<Fut> Send for Task<Fut> {}
47 unsafe impl<Fut> Sync for Task<Fut> {}
48 
49 impl<Fut> ArcWake for Task<Fut> {
wake_by_ref(arc_self: &Arc<Self>)50     fn wake_by_ref(arc_self: &Arc<Self>) {
51         let inner = match arc_self.ready_to_run_queue.upgrade() {
52             Some(inner) => inner,
53             None => return,
54         };
55 
56         arc_self.woken.store(true, Relaxed);
57 
58         // It's our job to enqueue this task it into the ready to run queue. To
59         // do this we set the `queued` flag, and if successful we then do the
60         // actual queueing operation, ensuring that we're only queued once.
61         //
62         // Once the task is inserted call `wake` to notify the parent task,
63         // as it'll want to come along and run our task later.
64         //
65         // Note that we don't change the reference count of the task here,
66         // we merely enqueue the raw pointer. The `FuturesUnordered`
67         // implementation guarantees that if we set the `queued` flag that
68         // there's a reference count held by the main `FuturesUnordered` queue
69         // still.
70         let prev = arc_self.queued.swap(true, SeqCst);
71         if !prev {
72             inner.enqueue(Arc::as_ptr(arc_self));
73             inner.waker.wake();
74         }
75     }
76 }
77 
78 impl<Fut> Task<Fut> {
79     /// Returns a waker reference for this task without cloning the Arc.
waker_ref(this: &Arc<Self>) -> WakerRef<'_>80     pub(super) fn waker_ref(this: &Arc<Self>) -> WakerRef<'_> {
81         waker_ref(this)
82     }
83 
84     /// Spins until `next_all` is no longer set to `pending_next_all`.
85     ///
86     /// The temporary `pending_next_all` value is typically overwritten fairly
87     /// quickly after a node is inserted into the list of all futures, so this
88     /// should rarely spin much.
89     ///
90     /// When it returns, the correct `next_all` value is returned.
91     ///
92     /// `Relaxed` or `Acquire` ordering can be used. `Acquire` ordering must be
93     /// used before `len_all` can be safely read.
94     #[inline]
spin_next_all( &self, pending_next_all: *mut Self, ordering: Ordering, ) -> *const Self95     pub(super) fn spin_next_all(
96         &self,
97         pending_next_all: *mut Self,
98         ordering: Ordering,
99     ) -> *const Self {
100         loop {
101             let next = self.next_all.load(ordering);
102             if next != pending_next_all {
103                 return next;
104             }
105         }
106     }
107 }
108 
109 impl<Fut> Drop for Task<Fut> {
drop(&mut self)110     fn drop(&mut self) {
111         // Since `Task<Fut>` is sent across all threads for any lifetime,
112         // regardless of `Fut`, we, to guarantee memory safety, can't actually
113         // touch `Fut` at any time except when we have a reference to the
114         // `FuturesUnordered` itself .
115         //
116         // Consequently it *should* be the case that we always drop futures from
117         // the `FuturesUnordered` instance. This is a bomb, just in case there's
118         // a bug in that logic.
119         unsafe {
120             if (*self.future.get()).is_some() {
121                 abort("future still here when dropping");
122             }
123         }
124     }
125 }
126