• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! Timer state structures.
2 //!
3 //! This module contains the heart of the intrusive timer implementation, and as
4 //! such the structures inside are full of tricky concurrency and unsafe code.
5 //!
6 //! # Ground rules
7 //!
8 //! The heart of the timer implementation here is the [`TimerShared`] structure,
9 //! shared between the [`TimerEntry`] and the driver. Generally, we permit access
10 //! to [`TimerShared`] ONLY via either 1) a mutable reference to [`TimerEntry`] or
11 //! 2) a held driver lock.
12 //!
13 //! It follows from this that any changes made while holding BOTH 1 and 2 will
14 //! be reliably visible, regardless of ordering. This is because of the acq/rel
15 //! fences on the driver lock ensuring ordering with 2, and rust mutable
16 //! reference rules for 1 (a mutable reference to an object can't be passed
17 //! between threads without an acq/rel barrier, and same-thread we have local
18 //! happens-before ordering).
19 //!
20 //! # State field
21 //!
22 //! Each timer has a state field associated with it. This field contains either
23 //! the current scheduled time, or a special flag value indicating its state.
24 //! This state can either indicate that the timer is on the 'pending' queue (and
25 //! thus will be fired with an `Ok(())` result soon) or that it has already been
26 //! fired/deregistered.
27 //!
28 //! This single state field allows for code that is firing the timer to
29 //! synchronize with any racing `reset` calls reliably.
30 //!
31 //! # Cached vs true timeouts
32 //!
33 //! To allow for the use case of a timeout that is periodically reset before
34 //! expiration to be as lightweight as possible, we support optimistically
35 //! lock-free timer resets, in the case where a timer is rescheduled to a later
36 //! point than it was originally scheduled for.
37 //!
38 //! This is accomplished by lazily rescheduling timers. That is, we update the
39 //! state field with the true expiration of the timer from the holder of
40 //! the [`TimerEntry`]. When the driver services timers (ie, whenever it's
41 //! walking lists of timers), it checks this "true when" value, and reschedules
42 //! based on it.
43 //!
44 //! We do, however, also need to track what the expiration time was when we
45 //! originally registered the timer; this is used to locate the right linked
46 //! list when the timer is being cancelled. This is referred to as the "cached
47 //! when" internally.
48 //!
49 //! There is of course a race condition between timer reset and timer
50 //! expiration. If the driver fails to observe the updated expiration time, it
51 //! could trigger expiration of the timer too early. However, because
52 //! [`mark_pending`][mark_pending] performs a compare-and-swap, it will identify this race and
53 //! refuse to mark the timer as pending.
54 //!
55 //! [mark_pending]: TimerHandle::mark_pending
56 
57 use crate::loom::cell::UnsafeCell;
58 use crate::loom::sync::atomic::AtomicU64;
59 use crate::loom::sync::atomic::Ordering;
60 
61 use crate::runtime::scheduler;
62 use crate::sync::AtomicWaker;
63 use crate::time::Instant;
64 use crate::util::linked_list;
65 
66 use std::cell::UnsafeCell as StdUnsafeCell;
67 use std::task::{Context, Poll, Waker};
68 use std::{marker::PhantomPinned, pin::Pin, ptr::NonNull};
69 
70 type TimerResult = Result<(), crate::time::error::Error>;
71 
72 const STATE_DEREGISTERED: u64 = u64::MAX;
73 const STATE_PENDING_FIRE: u64 = STATE_DEREGISTERED - 1;
74 const STATE_MIN_VALUE: u64 = STATE_PENDING_FIRE;
75 
76 /// This structure holds the current shared state of the timer - its scheduled
77 /// time (if registered), or otherwise the result of the timer completing, as
78 /// well as the registered waker.
79 ///
80 /// Generally, the StateCell is only permitted to be accessed from two contexts:
81 /// Either a thread holding the corresponding &mut TimerEntry, or a thread
82 /// holding the timer driver lock. The write actions on the StateCell amount to
83 /// passing "ownership" of the StateCell between these contexts; moving a timer
84 /// from the TimerEntry to the driver requires _both_ holding the &mut
85 /// TimerEntry and the driver lock, while moving it back (firing the timer)
86 /// requires only the driver lock.
87 pub(super) struct StateCell {
88     /// Holds either the scheduled expiration time for this timer, or (if the
89     /// timer has been fired and is unregistered), `u64::MAX`.
90     state: AtomicU64,
91     /// If the timer is fired (an Acquire order read on state shows
92     /// `u64::MAX`), holds the result that should be returned from
93     /// polling the timer. Otherwise, the contents are unspecified and reading
94     /// without holding the driver lock is undefined behavior.
95     result: UnsafeCell<TimerResult>,
96     /// The currently-registered waker
97     waker: CachePadded<AtomicWaker>,
98 }
99 
100 impl Default for StateCell {
default() -> Self101     fn default() -> Self {
102         Self::new()
103     }
104 }
105 
106 impl std::fmt::Debug for StateCell {
fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result107     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
108         write!(f, "StateCell({:?})", self.read_state())
109     }
110 }
111 
112 impl StateCell {
new() -> Self113     fn new() -> Self {
114         Self {
115             state: AtomicU64::new(STATE_DEREGISTERED),
116             result: UnsafeCell::new(Ok(())),
117             waker: CachePadded(AtomicWaker::new()),
118         }
119     }
120 
is_pending(&self) -> bool121     fn is_pending(&self) -> bool {
122         self.state.load(Ordering::Relaxed) == STATE_PENDING_FIRE
123     }
124 
125     /// Returns the current expiration time, or None if not currently scheduled.
when(&self) -> Option<u64>126     fn when(&self) -> Option<u64> {
127         let cur_state = self.state.load(Ordering::Relaxed);
128 
129         if cur_state == u64::MAX {
130             None
131         } else {
132             Some(cur_state)
133         }
134     }
135 
136     /// If the timer is completed, returns the result of the timer. Otherwise,
137     /// returns None and registers the waker.
poll(&self, waker: &Waker) -> Poll<TimerResult>138     fn poll(&self, waker: &Waker) -> Poll<TimerResult> {
139         // We must register first. This ensures that either `fire` will
140         // observe the new waker, or we will observe a racing fire to have set
141         // the state, or both.
142         self.waker.0.register_by_ref(waker);
143 
144         self.read_state()
145     }
146 
read_state(&self) -> Poll<TimerResult>147     fn read_state(&self) -> Poll<TimerResult> {
148         let cur_state = self.state.load(Ordering::Acquire);
149 
150         if cur_state == STATE_DEREGISTERED {
151             // SAFETY: The driver has fired this timer; this involves writing
152             // the result, and then writing (with release ordering) the state
153             // field.
154             Poll::Ready(unsafe { self.result.with(|p| *p) })
155         } else {
156             Poll::Pending
157         }
158     }
159 
160     /// Marks this timer as being moved to the pending list, if its scheduled
161     /// time is not after `not_after`.
162     ///
163     /// If the timer is scheduled for a time after not_after, returns an Err
164     /// containing the current scheduled time.
165     ///
166     /// SAFETY: Must hold the driver lock.
mark_pending(&self, not_after: u64) -> Result<(), u64>167     unsafe fn mark_pending(&self, not_after: u64) -> Result<(), u64> {
168         // Quick initial debug check to see if the timer is already fired. Since
169         // firing the timer can only happen with the driver lock held, we know
170         // we shouldn't be able to "miss" a transition to a fired state, even
171         // with relaxed ordering.
172         let mut cur_state = self.state.load(Ordering::Relaxed);
173 
174         loop {
175             // improve the error message for things like
176             // https://github.com/tokio-rs/tokio/issues/3675
177             assert!(
178                 cur_state < STATE_MIN_VALUE,
179                 "mark_pending called when the timer entry is in an invalid state"
180             );
181 
182             if cur_state > not_after {
183                 break Err(cur_state);
184             }
185 
186             match self.state.compare_exchange(
187                 cur_state,
188                 STATE_PENDING_FIRE,
189                 Ordering::AcqRel,
190                 Ordering::Acquire,
191             ) {
192                 Ok(_) => {
193                     break Ok(());
194                 }
195                 Err(actual_state) => {
196                     cur_state = actual_state;
197                 }
198             }
199         }
200     }
201 
202     /// Fires the timer, setting the result to the provided result.
203     ///
204     /// Returns:
205     /// * `Some(waker) - if fired and a waker needs to be invoked once the
206     ///   driver lock is released
207     /// * `None` - if fired and a waker does not need to be invoked, or if
208     ///   already fired
209     ///
210     /// SAFETY: The driver lock must be held.
fire(&self, result: TimerResult) -> Option<Waker>211     unsafe fn fire(&self, result: TimerResult) -> Option<Waker> {
212         // Quick initial check to see if the timer is already fired. Since
213         // firing the timer can only happen with the driver lock held, we know
214         // we shouldn't be able to "miss" a transition to a fired state, even
215         // with relaxed ordering.
216         let cur_state = self.state.load(Ordering::Relaxed);
217         if cur_state == STATE_DEREGISTERED {
218             return None;
219         }
220 
221         // SAFETY: We assume the driver lock is held and the timer is not
222         // fired, so only the driver is accessing this field.
223         //
224         // We perform a release-ordered store to state below, to ensure this
225         // write is visible before the state update is visible.
226         unsafe { self.result.with_mut(|p| *p = result) };
227 
228         self.state.store(STATE_DEREGISTERED, Ordering::Release);
229 
230         self.waker.0.take_waker()
231     }
232 
233     /// Marks the timer as registered (poll will return None) and sets the
234     /// expiration time.
235     ///
236     /// While this function is memory-safe, it should only be called from a
237     /// context holding both `&mut TimerEntry` and the driver lock.
set_expiration(&self, timestamp: u64)238     fn set_expiration(&self, timestamp: u64) {
239         debug_assert!(timestamp < STATE_MIN_VALUE);
240 
241         // We can use relaxed ordering because we hold the driver lock and will
242         // fence when we release the lock.
243         self.state.store(timestamp, Ordering::Relaxed);
244     }
245 
246     /// Attempts to adjust the timer to a new timestamp.
247     ///
248     /// If the timer has already been fired, is pending firing, or the new
249     /// timestamp is earlier than the old timestamp, (or occasionally
250     /// spuriously) returns Err without changing the timer's state. In this
251     /// case, the timer must be deregistered and re-registered.
extend_expiration(&self, new_timestamp: u64) -> Result<(), ()>252     fn extend_expiration(&self, new_timestamp: u64) -> Result<(), ()> {
253         let mut prior = self.state.load(Ordering::Relaxed);
254         loop {
255             if new_timestamp < prior || prior >= STATE_MIN_VALUE {
256                 return Err(());
257             }
258 
259             match self.state.compare_exchange_weak(
260                 prior,
261                 new_timestamp,
262                 Ordering::AcqRel,
263                 Ordering::Acquire,
264             ) {
265                 Ok(_) => {
266                     return Ok(());
267                 }
268                 Err(true_prior) => {
269                     prior = true_prior;
270                 }
271             }
272         }
273     }
274 
275     /// Returns true if the state of this timer indicates that the timer might
276     /// be registered with the driver. This check is performed with relaxed
277     /// ordering, but is conservative - if it returns false, the timer is
278     /// definitely _not_ registered.
might_be_registered(&self) -> bool279     pub(super) fn might_be_registered(&self) -> bool {
280         self.state.load(Ordering::Relaxed) != u64::MAX
281     }
282 }
283 
284 /// A timer entry.
285 ///
286 /// This is the handle to a timer that is controlled by the requester of the
287 /// timer. As this participates in intrusive data structures, it must be pinned
288 /// before polling.
289 #[derive(Debug)]
290 pub(crate) struct TimerEntry {
291     /// Arc reference to the runtime handle. We can only free the driver after
292     /// deregistering everything from their respective timer wheels.
293     driver: scheduler::Handle,
294     /// Shared inner structure; this is part of an intrusive linked list, and
295     /// therefore other references can exist to it while mutable references to
296     /// Entry exist.
297     ///
298     /// This is manipulated only under the inner mutex. TODO: Can we use loom
299     /// cells for this?
300     inner: StdUnsafeCell<TimerShared>,
301     /// Initial deadline for the timer. This is used to register on the first
302     /// poll, as we can't register prior to being pinned.
303     initial_deadline: Option<Instant>,
304     /// Ensure the type is !Unpin
305     _m: std::marker::PhantomPinned,
306 }
307 
308 unsafe impl Send for TimerEntry {}
309 unsafe impl Sync for TimerEntry {}
310 
311 /// An TimerHandle is the (non-enforced) "unique" pointer from the driver to the
312 /// timer entry. Generally, at most one TimerHandle exists for a timer at a time
313 /// (enforced by the timer state machine).
314 ///
315 /// SAFETY: An TimerHandle is essentially a raw pointer, and the usual caveats
316 /// of pointer safety apply. In particular, TimerHandle does not itself enforce
317 /// that the timer does still exist; however, normally an TimerHandle is created
318 /// immediately before registering the timer, and is consumed when firing the
319 /// timer, to help minimize mistakes. Still, because TimerHandle cannot enforce
320 /// memory safety, all operations are unsafe.
321 #[derive(Debug)]
322 pub(crate) struct TimerHandle {
323     inner: NonNull<TimerShared>,
324 }
325 
326 pub(super) type EntryList = crate::util::linked_list::LinkedList<TimerShared, TimerShared>;
327 
328 /// The shared state structure of a timer. This structure is shared between the
329 /// frontend (`Entry`) and driver backend.
330 ///
331 /// Note that this structure is located inside the `TimerEntry` structure.
332 #[derive(Debug)]
333 #[repr(C)]
334 pub(crate) struct TimerShared {
335     /// Data manipulated by the driver thread itself, only.
336     driver_state: CachePadded<TimerSharedPadded>,
337 
338     /// Current state. This records whether the timer entry is currently under
339     /// the ownership of the driver, and if not, its current state (not
340     /// complete, fired, error, etc).
341     state: StateCell,
342 
343     _p: PhantomPinned,
344 }
345 
346 generate_addr_of_methods! {
347     impl<> TimerShared {
348         unsafe fn addr_of_pointers(self: NonNull<Self>) -> NonNull<linked_list::Pointers<TimerShared>> {
349             &self.driver_state.0.pointers
350         }
351     }
352 }
353 
354 impl TimerShared {
new() -> Self355     pub(super) fn new() -> Self {
356         Self {
357             state: StateCell::default(),
358             driver_state: CachePadded(TimerSharedPadded::new()),
359             _p: PhantomPinned,
360         }
361     }
362 
363     /// Gets the cached time-of-expiration value.
cached_when(&self) -> u64364     pub(super) fn cached_when(&self) -> u64 {
365         // Cached-when is only accessed under the driver lock, so we can use relaxed
366         self.driver_state.0.cached_when.load(Ordering::Relaxed)
367     }
368 
369     /// Gets the true time-of-expiration value, and copies it into the cached
370     /// time-of-expiration value.
371     ///
372     /// SAFETY: Must be called with the driver lock held, and when this entry is
373     /// not in any timer wheel lists.
sync_when(&self) -> u64374     pub(super) unsafe fn sync_when(&self) -> u64 {
375         let true_when = self.true_when();
376 
377         self.driver_state
378             .0
379             .cached_when
380             .store(true_when, Ordering::Relaxed);
381 
382         true_when
383     }
384 
385     /// Sets the cached time-of-expiration value.
386     ///
387     /// SAFETY: Must be called with the driver lock held, and when this entry is
388     /// not in any timer wheel lists.
set_cached_when(&self, when: u64)389     unsafe fn set_cached_when(&self, when: u64) {
390         self.driver_state
391             .0
392             .cached_when
393             .store(when, Ordering::Relaxed);
394     }
395 
396     /// Returns the true time-of-expiration value, with relaxed memory ordering.
true_when(&self) -> u64397     pub(super) fn true_when(&self) -> u64 {
398         self.state.when().expect("Timer already fired")
399     }
400 
401     /// Sets the true time-of-expiration value, even if it is less than the
402     /// current expiration or the timer is deregistered.
403     ///
404     /// SAFETY: Must only be called with the driver lock held and the entry not
405     /// in the timer wheel.
set_expiration(&self, t: u64)406     pub(super) unsafe fn set_expiration(&self, t: u64) {
407         self.state.set_expiration(t);
408         self.driver_state.0.cached_when.store(t, Ordering::Relaxed);
409     }
410 
411     /// Sets the true time-of-expiration only if it is after the current.
extend_expiration(&self, t: u64) -> Result<(), ()>412     pub(super) fn extend_expiration(&self, t: u64) -> Result<(), ()> {
413         self.state.extend_expiration(t)
414     }
415 
416     /// Returns a TimerHandle for this timer.
handle(&self) -> TimerHandle417     pub(super) fn handle(&self) -> TimerHandle {
418         TimerHandle {
419             inner: NonNull::from(self),
420         }
421     }
422 
423     /// Returns true if the state of this timer indicates that the timer might
424     /// be registered with the driver. This check is performed with relaxed
425     /// ordering, but is conservative - if it returns false, the timer is
426     /// definitely _not_ registered.
might_be_registered(&self) -> bool427     pub(super) fn might_be_registered(&self) -> bool {
428         self.state.might_be_registered()
429     }
430 }
431 
432 /// Additional shared state between the driver and the timer which is cache
433 /// padded. This contains the information that the driver thread accesses most
434 /// frequently to minimize contention. In particular, we move it away from the
435 /// waker, as the waker is updated on every poll.
436 struct TimerSharedPadded {
437     /// A link within the doubly-linked list of timers on a particular level and
438     /// slot. Valid only if state is equal to Registered.
439     ///
440     /// Only accessed under the entry lock.
441     pointers: linked_list::Pointers<TimerShared>,
442 
443     /// The expiration time for which this entry is currently registered.
444     /// Generally owned by the driver, but is accessed by the entry when not
445     /// registered.
446     cached_when: AtomicU64,
447 
448     /// The true expiration time. Set by the timer future, read by the driver.
449     true_when: AtomicU64,
450 }
451 
452 impl std::fmt::Debug for TimerSharedPadded {
fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result453     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
454         f.debug_struct("TimerSharedPadded")
455             .field("when", &self.true_when.load(Ordering::Relaxed))
456             .field("cached_when", &self.cached_when.load(Ordering::Relaxed))
457             .finish()
458     }
459 }
460 
461 impl TimerSharedPadded {
new() -> Self462     fn new() -> Self {
463         Self {
464             cached_when: AtomicU64::new(0),
465             true_when: AtomicU64::new(0),
466             pointers: linked_list::Pointers::new(),
467         }
468     }
469 }
470 
471 unsafe impl Send for TimerShared {}
472 unsafe impl Sync for TimerShared {}
473 
474 unsafe impl linked_list::Link for TimerShared {
475     type Handle = TimerHandle;
476 
477     type Target = TimerShared;
478 
as_raw(handle: &Self::Handle) -> NonNull<Self::Target>479     fn as_raw(handle: &Self::Handle) -> NonNull<Self::Target> {
480         handle.inner
481     }
482 
from_raw(ptr: NonNull<Self::Target>) -> Self::Handle483     unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Self::Handle {
484         TimerHandle { inner: ptr }
485     }
486 
pointers( target: NonNull<Self::Target>, ) -> NonNull<linked_list::Pointers<Self::Target>>487     unsafe fn pointers(
488         target: NonNull<Self::Target>,
489     ) -> NonNull<linked_list::Pointers<Self::Target>> {
490         TimerShared::addr_of_pointers(target)
491     }
492 }
493 
494 // ===== impl Entry =====
495 
496 impl TimerEntry {
497     #[track_caller]
new(handle: &scheduler::Handle, deadline: Instant) -> Self498     pub(crate) fn new(handle: &scheduler::Handle, deadline: Instant) -> Self {
499         // Panic if the time driver is not enabled
500         let _ = handle.driver().time();
501 
502         let driver = handle.clone();
503 
504         Self {
505             driver,
506             inner: StdUnsafeCell::new(TimerShared::new()),
507             initial_deadline: Some(deadline),
508             _m: std::marker::PhantomPinned,
509         }
510     }
511 
inner(&self) -> &TimerShared512     fn inner(&self) -> &TimerShared {
513         unsafe { &*self.inner.get() }
514     }
515 
is_elapsed(&self) -> bool516     pub(crate) fn is_elapsed(&self) -> bool {
517         !self.inner().state.might_be_registered() && self.initial_deadline.is_none()
518     }
519 
520     /// Cancels and deregisters the timer. This operation is irreversible.
cancel(self: Pin<&mut Self>)521     pub(crate) fn cancel(self: Pin<&mut Self>) {
522         // We need to perform an acq/rel fence with the driver thread, and the
523         // simplest way to do so is to grab the driver lock.
524         //
525         // Why is this necessary? We're about to release this timer's memory for
526         // some other non-timer use. However, we've been doing a bunch of
527         // relaxed (or even non-atomic) writes from the driver thread, and we'll
528         // be doing more from _this thread_ (as this memory is interpreted as
529         // something else).
530         //
531         // It is critical to ensure that, from the point of view of the driver,
532         // those future non-timer writes happen-after the timer is fully fired,
533         // and from the purpose of this thread, the driver's writes all
534         // happen-before we drop the timer. This in turn requires us to perform
535         // an acquire-release barrier in _both_ directions between the driver
536         // and dropping thread.
537         //
538         // The lock acquisition in clear_entry serves this purpose. All of the
539         // driver manipulations happen with the lock held, so we can just take
540         // the lock and be sure that this drop happens-after everything the
541         // driver did so far and happens-before everything the driver does in
542         // the future. While we have the lock held, we also go ahead and
543         // deregister the entry if necessary.
544         unsafe { self.driver().clear_entry(NonNull::from(self.inner())) };
545     }
546 
reset(mut self: Pin<&mut Self>, new_time: Instant)547     pub(crate) fn reset(mut self: Pin<&mut Self>, new_time: Instant) {
548         unsafe { self.as_mut().get_unchecked_mut() }.initial_deadline = None;
549 
550         let tick = self.driver().time_source().deadline_to_tick(new_time);
551 
552         if self.inner().extend_expiration(tick).is_ok() {
553             return;
554         }
555 
556         unsafe {
557             self.driver()
558                 .reregister(&self.driver.driver().io, tick, self.inner().into());
559         }
560     }
561 
poll_elapsed( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Result<(), super::Error>>562     pub(crate) fn poll_elapsed(
563         mut self: Pin<&mut Self>,
564         cx: &mut Context<'_>,
565     ) -> Poll<Result<(), super::Error>> {
566         if self.driver().is_shutdown() {
567             panic!("{}", crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR);
568         }
569 
570         if let Some(deadline) = self.initial_deadline {
571             self.as_mut().reset(deadline);
572         }
573 
574         let this = unsafe { self.get_unchecked_mut() };
575 
576         this.inner().state.poll(cx.waker())
577     }
578 
driver(&self) -> &super::Handle579     pub(crate) fn driver(&self) -> &super::Handle {
580         self.driver.driver().time()
581     }
582 }
583 
584 impl TimerHandle {
cached_when(&self) -> u64585     pub(super) unsafe fn cached_when(&self) -> u64 {
586         unsafe { self.inner.as_ref().cached_when() }
587     }
588 
sync_when(&self) -> u64589     pub(super) unsafe fn sync_when(&self) -> u64 {
590         unsafe { self.inner.as_ref().sync_when() }
591     }
592 
is_pending(&self) -> bool593     pub(super) unsafe fn is_pending(&self) -> bool {
594         unsafe { self.inner.as_ref().state.is_pending() }
595     }
596 
597     /// Forcibly sets the true and cached expiration times to the given tick.
598     ///
599     /// SAFETY: The caller must ensure that the handle remains valid, the driver
600     /// lock is held, and that the timer is not in any wheel linked lists.
set_expiration(&self, tick: u64)601     pub(super) unsafe fn set_expiration(&self, tick: u64) {
602         self.inner.as_ref().set_expiration(tick);
603     }
604 
605     /// Attempts to mark this entry as pending. If the expiration time is after
606     /// `not_after`, however, returns an Err with the current expiration time.
607     ///
608     /// If an `Err` is returned, the `cached_when` value will be updated to this
609     /// new expiration time.
610     ///
611     /// SAFETY: The caller must ensure that the handle remains valid, the driver
612     /// lock is held, and that the timer is not in any wheel linked lists.
613     /// After returning Ok, the entry must be added to the pending list.
mark_pending(&self, not_after: u64) -> Result<(), u64>614     pub(super) unsafe fn mark_pending(&self, not_after: u64) -> Result<(), u64> {
615         match self.inner.as_ref().state.mark_pending(not_after) {
616             Ok(()) => {
617                 // mark this as being on the pending queue in cached_when
618                 self.inner.as_ref().set_cached_when(u64::MAX);
619                 Ok(())
620             }
621             Err(tick) => {
622                 self.inner.as_ref().set_cached_when(tick);
623                 Err(tick)
624             }
625         }
626     }
627 
628     /// Attempts to transition to a terminal state. If the state is already a
629     /// terminal state, does nothing.
630     ///
631     /// Because the entry might be dropped after the state is moved to a
632     /// terminal state, this function consumes the handle to ensure we don't
633     /// access the entry afterwards.
634     ///
635     /// Returns the last-registered waker, if any.
636     ///
637     /// SAFETY: The driver lock must be held while invoking this function, and
638     /// the entry must not be in any wheel linked lists.
fire(self, completed_state: TimerResult) -> Option<Waker>639     pub(super) unsafe fn fire(self, completed_state: TimerResult) -> Option<Waker> {
640         self.inner.as_ref().state.fire(completed_state)
641     }
642 }
643 
644 impl Drop for TimerEntry {
drop(&mut self)645     fn drop(&mut self) {
646         unsafe { Pin::new_unchecked(self) }.as_mut().cancel()
647     }
648 }
649 
650 // Copied from [crossbeam/cache_padded](https://github.com/crossbeam-rs/crossbeam/blob/fa35346b7c789bba045ad789e894c68c466d1779/crossbeam-utils/src/cache_padded.rs#L62-L127)
651 //
652 // Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
653 // lines at a time, so we have to align to 128 bytes rather than 64.
654 //
655 // Sources:
656 // - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
657 // - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
658 //
659 // ARM's big.LITTLE architecture has asymmetric cores and "big" cores have 128-byte cache line size.
660 //
661 // Sources:
662 // - https://www.mono-project.com/news/2016/09/12/arm64-icache/
663 //
664 // powerpc64 has 128-byte cache line size.
665 //
666 // Sources:
667 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_ppc64x.go#L9
668 #[cfg_attr(
669     any(
670         target_arch = "x86_64",
671         target_arch = "aarch64",
672         target_arch = "powerpc64",
673     ),
674     repr(align(128))
675 )]
676 // arm, mips, mips64, and riscv64 have 32-byte cache line size.
677 //
678 // Sources:
679 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_arm.go#L7
680 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips.go#L7
681 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mipsle.go#L7
682 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_mips64x.go#L9
683 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_riscv64.go#L7
684 #[cfg_attr(
685     any(
686         target_arch = "arm",
687         target_arch = "mips",
688         target_arch = "mips64",
689         target_arch = "riscv64",
690     ),
691     repr(align(32))
692 )]
693 // s390x has 256-byte cache line size.
694 //
695 // Sources:
696 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_s390x.go#L7
697 #[cfg_attr(target_arch = "s390x", repr(align(256)))]
698 // x86 and wasm have 64-byte cache line size.
699 //
700 // Sources:
701 // - https://github.com/golang/go/blob/dda2991c2ea0c5914714469c4defc2562a907230/src/internal/cpu/cpu_x86.go#L9
702 // - https://github.com/golang/go/blob/3dd58676054223962cd915bb0934d1f9f489d4d2/src/internal/cpu/cpu_wasm.go#L7
703 //
704 // All others are assumed to have 64-byte cache line size.
705 #[cfg_attr(
706     not(any(
707         target_arch = "x86_64",
708         target_arch = "aarch64",
709         target_arch = "powerpc64",
710         target_arch = "arm",
711         target_arch = "mips",
712         target_arch = "mips64",
713         target_arch = "riscv64",
714         target_arch = "s390x",
715     )),
716     repr(align(64))
717 )]
718 #[derive(Debug, Default)]
719 struct CachePadded<T>(T);
720