1 //! Timer state structures. 2 //! 3 //! This module contains the heart of the intrusive timer implementation, and as 4 //! such the structures inside are full of tricky concurrency and unsafe code. 5 //! 6 //! # Ground rules 7 //! 8 //! The heart of the timer implementation here is the `TimerShared` structure, 9 //! shared between the `TimerEntry` and the driver. Generally, we permit access 10 //! to `TimerShared` ONLY via either 1) a mutable reference to `TimerEntry` or 11 //! 2) a held driver lock. 12 //! 13 //! It follows from this that any changes made while holding BOTH 1 and 2 will 14 //! be reliably visible, regardless of ordering. This is because of the acq/rel 15 //! fences on the driver lock ensuring ordering with 2, and rust mutable 16 //! reference rules for 1 (a mutable reference to an object can't be passed 17 //! between threads without an acq/rel barrier, and same-thread we have local 18 //! happens-before ordering). 19 //! 20 //! # State field 21 //! 22 //! Each timer has a state field associated with it. This field contains either 23 //! the current scheduled time, or a special flag value indicating its state. 24 //! This state can either indicate that the timer is on the 'pending' queue (and 25 //! thus will be fired with an `Ok(())` result soon) or that it has already been 26 //! fired/deregistered. 27 //! 28 //! This single state field allows for code that is firing the timer to 29 //! synchronize with any racing `reset` calls reliably. 30 //! 31 //! # Cached vs true timeouts 32 //! 33 //! To allow for the use case of a timeout that is periodically reset before 34 //! expiration to be as lightweight as possible, we support optimistically 35 //! lock-free timer resets, in the case where a timer is rescheduled to a later 36 //! point than it was originally scheduled for. 37 //! 38 //! This is accomplished by lazily rescheduling timers. That is, we update the 39 //! state field field with the true expiration of the timer from the holder of 40 //! the [`TimerEntry`]. When the driver services timers (ie, whenever it's 41 //! walking lists of timers), it checks this "true when" value, and reschedules 42 //! based on it. 43 //! 44 //! We do, however, also need to track what the expiration time was when we 45 //! originally registered the timer; this is used to locate the right linked 46 //! list when the timer is being cancelled. This is referred to as the "cached 47 //! when" internally. 48 //! 49 //! There is of course a race condition between timer reset and timer 50 //! expiration. If the driver fails to observe the updated expiration time, it 51 //! could trigger expiration of the timer too early. However, because 52 //! `mark_pending` performs a compare-and-swap, it will identify this race and 53 //! refuse to mark the timer as pending. 54 55 use crate::loom::cell::UnsafeCell; 56 use crate::loom::sync::atomic::AtomicU64; 57 use crate::loom::sync::atomic::Ordering; 58 59 use crate::sync::AtomicWaker; 60 use crate::time::Instant; 61 use crate::util::linked_list; 62 63 use super::Handle; 64 65 use std::cell::UnsafeCell as StdUnsafeCell; 66 use std::task::{Context, Poll, Waker}; 67 use std::{marker::PhantomPinned, pin::Pin, ptr::NonNull}; 68 69 type TimerResult = Result<(), crate::time::error::Error>; 70 71 const STATE_DEREGISTERED: u64 = u64::max_value(); 72 const STATE_PENDING_FIRE: u64 = STATE_DEREGISTERED - 1; 73 const STATE_MIN_VALUE: u64 = STATE_PENDING_FIRE; 74 75 /// This structure holds the current shared state of the timer - its scheduled 76 /// time (if registered), or otherwise the result of the timer completing, as 77 /// well as the registered waker. 78 /// 79 /// Generally, the StateCell is only permitted to be accessed from two contexts: 80 /// Either a thread holding the corresponding &mut TimerEntry, or a thread 81 /// holding the timer driver lock. The write actions on the StateCell amount to 82 /// passing "ownership" of the StateCell between these contexts; moving a timer 83 /// from the TimerEntry to the driver requires _both_ holding the &mut 84 /// TimerEntry and the driver lock, while moving it back (firing the timer) 85 /// requires only the driver lock. 86 pub(super) struct StateCell { 87 /// Holds either the scheduled expiration time for this timer, or (if the 88 /// timer has been fired and is unregistered), [`u64::max_value()`]. 89 state: AtomicU64, 90 /// If the timer is fired (an Acquire order read on state shows 91 /// `u64::max_value()`), holds the result that should be returned from 92 /// polling the timer. Otherwise, the contents are unspecified and reading 93 /// without holding the driver lock is undefined behavior. 94 result: UnsafeCell<TimerResult>, 95 /// The currently-registered waker 96 waker: CachePadded<AtomicWaker>, 97 } 98 99 impl Default for StateCell { default() -> Self100 fn default() -> Self { 101 Self::new() 102 } 103 } 104 105 impl std::fmt::Debug for StateCell { fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result106 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 107 write!(f, "StateCell({:?})", self.read_state()) 108 } 109 } 110 111 impl StateCell { new() -> Self112 fn new() -> Self { 113 Self { 114 state: AtomicU64::new(STATE_DEREGISTERED), 115 result: UnsafeCell::new(Ok(())), 116 waker: CachePadded(AtomicWaker::new()), 117 } 118 } 119 is_pending(&self) -> bool120 fn is_pending(&self) -> bool { 121 self.state.load(Ordering::Relaxed) == STATE_PENDING_FIRE 122 } 123 124 /// Returns the current expiration time, or None if not currently scheduled. when(&self) -> Option<u64>125 fn when(&self) -> Option<u64> { 126 let cur_state = self.state.load(Ordering::Relaxed); 127 128 if cur_state == u64::max_value() { 129 None 130 } else { 131 Some(cur_state) 132 } 133 } 134 135 /// If the timer is completed, returns the result of the timer. Otherwise, 136 /// returns None and registers the waker. poll(&self, waker: &Waker) -> Poll<TimerResult>137 fn poll(&self, waker: &Waker) -> Poll<TimerResult> { 138 // We must register first. This ensures that either `fire` will 139 // observe the new waker, or we will observe a racing fire to have set 140 // the state, or both. 141 self.waker.0.register_by_ref(waker); 142 143 self.read_state() 144 } 145 read_state(&self) -> Poll<TimerResult>146 fn read_state(&self) -> Poll<TimerResult> { 147 let cur_state = self.state.load(Ordering::Acquire); 148 149 if cur_state == STATE_DEREGISTERED { 150 // SAFETY: The driver has fired this timer; this involves writing 151 // the result, and then writing (with release ordering) the state 152 // field. 153 Poll::Ready(unsafe { self.result.with(|p| *p) }) 154 } else { 155 Poll::Pending 156 } 157 } 158 159 /// Marks this timer as being moved to the pending list, if its scheduled 160 /// time is not after `not_after`. 161 /// 162 /// If the timer is scheduled for a time after not_after, returns an Err 163 /// containing the current scheduled time. 164 /// 165 /// SAFETY: Must hold the driver lock. mark_pending(&self, not_after: u64) -> Result<(), u64>166 unsafe fn mark_pending(&self, not_after: u64) -> Result<(), u64> { 167 // Quick initial debug check to see if the timer is already fired. Since 168 // firing the timer can only happen with the driver lock held, we know 169 // we shouldn't be able to "miss" a transition to a fired state, even 170 // with relaxed ordering. 171 let mut cur_state = self.state.load(Ordering::Relaxed); 172 173 loop { 174 debug_assert!(cur_state < STATE_MIN_VALUE); 175 176 if cur_state > not_after { 177 break Err(cur_state); 178 } 179 180 match self.state.compare_exchange( 181 cur_state, 182 STATE_PENDING_FIRE, 183 Ordering::AcqRel, 184 Ordering::Acquire, 185 ) { 186 Ok(_) => { 187 break Ok(()); 188 } 189 Err(actual_state) => { 190 cur_state = actual_state; 191 } 192 } 193 } 194 } 195 196 /// Fires the timer, setting the result to the provided result. 197 /// 198 /// Returns: 199 /// * `Some(waker) - if fired and a waker needs to be invoked once the 200 /// driver lock is released 201 /// * `None` - if fired and a waker does not need to be invoked, or if 202 /// already fired 203 /// 204 /// SAFETY: The driver lock must be held. fire(&self, result: TimerResult) -> Option<Waker>205 unsafe fn fire(&self, result: TimerResult) -> Option<Waker> { 206 // Quick initial check to see if the timer is already fired. Since 207 // firing the timer can only happen with the driver lock held, we know 208 // we shouldn't be able to "miss" a transition to a fired state, even 209 // with relaxed ordering. 210 let cur_state = self.state.load(Ordering::Relaxed); 211 if cur_state == STATE_DEREGISTERED { 212 return None; 213 } 214 215 // SAFETY: We assume the driver lock is held and the timer is not 216 // fired, so only the driver is accessing this field. 217 // 218 // We perform a release-ordered store to state below, to ensure this 219 // write is visible before the state update is visible. 220 unsafe { self.result.with_mut(|p| *p = result) }; 221 222 self.state.store(STATE_DEREGISTERED, Ordering::Release); 223 224 self.waker.0.take_waker() 225 } 226 227 /// Marks the timer as registered (poll will return None) and sets the 228 /// expiration time. 229 /// 230 /// While this function is memory-safe, it should only be called from a 231 /// context holding both `&mut TimerEntry` and the driver lock. set_expiration(&self, timestamp: u64)232 fn set_expiration(&self, timestamp: u64) { 233 debug_assert!(timestamp < STATE_MIN_VALUE); 234 235 // We can use relaxed ordering because we hold the driver lock and will 236 // fence when we release the lock. 237 self.state.store(timestamp, Ordering::Relaxed); 238 } 239 240 /// Attempts to adjust the timer to a new timestamp. 241 /// 242 /// If the timer has already been fired, is pending firing, or the new 243 /// timestamp is earlier than the old timestamp, (or occasionally 244 /// spuriously) returns Err without changing the timer's state. In this 245 /// case, the timer must be deregistered and re-registered. extend_expiration(&self, new_timestamp: u64) -> Result<(), ()>246 fn extend_expiration(&self, new_timestamp: u64) -> Result<(), ()> { 247 let mut prior = self.state.load(Ordering::Relaxed); 248 loop { 249 if new_timestamp < prior || prior >= STATE_MIN_VALUE { 250 return Err(()); 251 } 252 253 match self.state.compare_exchange_weak( 254 prior, 255 new_timestamp, 256 Ordering::AcqRel, 257 Ordering::Acquire, 258 ) { 259 Ok(_) => { 260 return Ok(()); 261 } 262 Err(true_prior) => { 263 prior = true_prior; 264 } 265 } 266 } 267 } 268 269 /// Returns true if the state of this timer indicates that the timer might 270 /// be registered with the driver. This check is performed with relaxed 271 /// ordering, but is conservative - if it returns false, the timer is 272 /// definitely _not_ registered. might_be_registered(&self) -> bool273 pub(super) fn might_be_registered(&self) -> bool { 274 self.state.load(Ordering::Relaxed) != u64::max_value() 275 } 276 } 277 278 /// A timer entry. 279 /// 280 /// This is the handle to a timer that is controlled by the requester of the 281 /// timer. As this participates in intrusive data structures, it must be pinned 282 /// before polling. 283 #[derive(Debug)] 284 pub(super) struct TimerEntry { 285 /// Arc reference to the driver. We can only free the driver after 286 /// deregistering everything from their respective timer wheels. 287 driver: Handle, 288 /// Shared inner structure; this is part of an intrusive linked list, and 289 /// therefore other references can exist to it while mutable references to 290 /// Entry exist. 291 /// 292 /// This is manipulated only under the inner mutex. TODO: Can we use loom 293 /// cells for this? 294 inner: StdUnsafeCell<TimerShared>, 295 /// Initial deadline for the timer. This is used to register on the first 296 /// poll, as we can't register prior to being pinned. 297 initial_deadline: Option<Instant>, 298 /// Ensure the type is !Unpin 299 _m: std::marker::PhantomPinned, 300 } 301 302 unsafe impl Send for TimerEntry {} 303 unsafe impl Sync for TimerEntry {} 304 305 /// An TimerHandle is the (non-enforced) "unique" pointer from the driver to the 306 /// timer entry. Generally, at most one TimerHandle exists for a timer at a time 307 /// (enforced by the timer state machine). 308 /// 309 /// SAFETY: An TimerHandle is essentially a raw pointer, and the usual caveats 310 /// of pointer safety apply. In particular, TimerHandle does not itself enforce 311 /// that the timer does still exist; however, normally an TimerHandle is created 312 /// immediately before registering the timer, and is consumed when firing the 313 /// timer, to help minimize mistakes. Still, because TimerHandle cannot enforce 314 /// memory safety, all operations are unsafe. 315 #[derive(Debug)] 316 pub(crate) struct TimerHandle { 317 inner: NonNull<TimerShared>, 318 } 319 320 pub(super) type EntryList = crate::util::linked_list::LinkedList<TimerShared, TimerShared>; 321 322 /// The shared state structure of a timer. This structure is shared between the 323 /// frontend (`Entry`) and driver backend. 324 /// 325 /// Note that this structure is located inside the `TimerEntry` structure. 326 #[derive(Debug)] 327 pub(crate) struct TimerShared { 328 /// Current state. This records whether the timer entry is currently under 329 /// the ownership of the driver, and if not, its current state (not 330 /// complete, fired, error, etc). 331 state: StateCell, 332 333 /// Data manipulated by the driver thread itself, only. 334 driver_state: CachePadded<TimerSharedPadded>, 335 336 _p: PhantomPinned, 337 } 338 339 impl TimerShared { new() -> Self340 pub(super) fn new() -> Self { 341 Self { 342 state: StateCell::default(), 343 driver_state: CachePadded(TimerSharedPadded::new()), 344 _p: PhantomPinned, 345 } 346 } 347 348 /// Gets the cached time-of-expiration value cached_when(&self) -> u64349 pub(super) fn cached_when(&self) -> u64 { 350 // Cached-when is only accessed under the driver lock, so we can use relaxed 351 self.driver_state.0.cached_when.load(Ordering::Relaxed) 352 } 353 354 /// Gets the true time-of-expiration value, and copies it into the cached 355 /// time-of-expiration value. 356 /// 357 /// SAFETY: Must be called with the driver lock held, and when this entry is 358 /// not in any timer wheel lists. sync_when(&self) -> u64359 pub(super) unsafe fn sync_when(&self) -> u64 { 360 let true_when = self.true_when(); 361 362 self.driver_state 363 .0 364 .cached_when 365 .store(true_when, Ordering::Relaxed); 366 367 true_when 368 } 369 370 /// Sets the cached time-of-expiration value. 371 /// 372 /// SAFETY: Must be called with the driver lock held, and when this entry is 373 /// not in any timer wheel lists. set_cached_when(&self, when: u64)374 unsafe fn set_cached_when(&self, when: u64) { 375 self.driver_state 376 .0 377 .cached_when 378 .store(when, Ordering::Relaxed); 379 } 380 381 /// Returns the true time-of-expiration value, with relaxed memory ordering. true_when(&self) -> u64382 pub(super) fn true_when(&self) -> u64 { 383 self.state.when().expect("Timer already fired") 384 } 385 386 /// Sets the true time-of-expiration value, even if it is less than the 387 /// current expiration or the timer is deregistered. 388 /// 389 /// SAFETY: Must only be called with the driver lock held and the entry not 390 /// in the timer wheel. set_expiration(&self, t: u64)391 pub(super) unsafe fn set_expiration(&self, t: u64) { 392 self.state.set_expiration(t); 393 self.driver_state.0.cached_when.store(t, Ordering::Relaxed); 394 } 395 396 /// Sets the true time-of-expiration only if it is after the current. extend_expiration(&self, t: u64) -> Result<(), ()>397 pub(super) fn extend_expiration(&self, t: u64) -> Result<(), ()> { 398 self.state.extend_expiration(t) 399 } 400 401 /// Returns a TimerHandle for this timer. handle(&self) -> TimerHandle402 pub(super) fn handle(&self) -> TimerHandle { 403 TimerHandle { 404 inner: NonNull::from(self), 405 } 406 } 407 408 /// Returns true if the state of this timer indicates that the timer might 409 /// be registered with the driver. This check is performed with relaxed 410 /// ordering, but is conservative - if it returns false, the timer is 411 /// definitely _not_ registered. might_be_registered(&self) -> bool412 pub(super) fn might_be_registered(&self) -> bool { 413 self.state.might_be_registered() 414 } 415 } 416 417 /// Additional shared state between the driver and the timer which is cache 418 /// padded. This contains the information that the driver thread accesses most 419 /// frequently to minimize contention. In particular, we move it away from the 420 /// waker, as the waker is updated on every poll. 421 struct TimerSharedPadded { 422 /// The expiration time for which this entry is currently registered. 423 /// Generally owned by the driver, but is accessed by the entry when not 424 /// registered. 425 cached_when: AtomicU64, 426 427 /// The true expiration time. Set by the timer future, read by the driver. 428 true_when: AtomicU64, 429 430 /// A link within the doubly-linked list of timers on a particular level and 431 /// slot. Valid only if state is equal to Registered. 432 /// 433 /// Only accessed under the entry lock. 434 pointers: StdUnsafeCell<linked_list::Pointers<TimerShared>>, 435 } 436 437 impl std::fmt::Debug for TimerSharedPadded { fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result438 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 439 f.debug_struct("TimerSharedPadded") 440 .field("when", &self.true_when.load(Ordering::Relaxed)) 441 .field("cached_when", &self.cached_when.load(Ordering::Relaxed)) 442 .finish() 443 } 444 } 445 446 impl TimerSharedPadded { new() -> Self447 fn new() -> Self { 448 Self { 449 cached_when: AtomicU64::new(0), 450 true_when: AtomicU64::new(0), 451 pointers: StdUnsafeCell::new(linked_list::Pointers::new()), 452 } 453 } 454 } 455 456 unsafe impl Send for TimerShared {} 457 unsafe impl Sync for TimerShared {} 458 459 unsafe impl linked_list::Link for TimerShared { 460 type Handle = TimerHandle; 461 462 type Target = TimerShared; 463 as_raw(handle: &Self::Handle) -> NonNull<Self::Target>464 fn as_raw(handle: &Self::Handle) -> NonNull<Self::Target> { 465 handle.inner 466 } 467 from_raw(ptr: NonNull<Self::Target>) -> Self::Handle468 unsafe fn from_raw(ptr: NonNull<Self::Target>) -> Self::Handle { 469 TimerHandle { inner: ptr } 470 } 471 pointers( target: NonNull<Self::Target>, ) -> NonNull<linked_list::Pointers<Self::Target>>472 unsafe fn pointers( 473 target: NonNull<Self::Target>, 474 ) -> NonNull<linked_list::Pointers<Self::Target>> { 475 unsafe { NonNull::new(target.as_ref().driver_state.0.pointers.get()).unwrap() } 476 } 477 } 478 479 // ===== impl Entry ===== 480 481 impl TimerEntry { new(handle: &Handle, deadline: Instant) -> Self482 pub(crate) fn new(handle: &Handle, deadline: Instant) -> Self { 483 let driver = handle.clone(); 484 485 Self { 486 driver, 487 inner: StdUnsafeCell::new(TimerShared::new()), 488 initial_deadline: Some(deadline), 489 _m: std::marker::PhantomPinned, 490 } 491 } 492 inner(&self) -> &TimerShared493 fn inner(&self) -> &TimerShared { 494 unsafe { &*self.inner.get() } 495 } 496 is_elapsed(&self) -> bool497 pub(crate) fn is_elapsed(&self) -> bool { 498 !self.inner().state.might_be_registered() && self.initial_deadline.is_none() 499 } 500 501 /// Cancels and deregisters the timer. This operation is irreversible. cancel(self: Pin<&mut Self>)502 pub(crate) fn cancel(self: Pin<&mut Self>) { 503 // We need to perform an acq/rel fence with the driver thread, and the 504 // simplest way to do so is to grab the driver lock. 505 // 506 // Why is this necessary? We're about to release this timer's memory for 507 // some other non-timer use. However, we've been doing a bunch of 508 // relaxed (or even non-atomic) writes from the driver thread, and we'll 509 // be doing more from _this thread_ (as this memory is interpreted as 510 // something else). 511 // 512 // It is critical to ensure that, from the point of view of the driver, 513 // those future non-timer writes happen-after the timer is fully fired, 514 // and from the purpose of this thread, the driver's writes all 515 // happen-before we drop the timer. This in turn requires us to perform 516 // an acquire-release barrier in _both_ directions between the driver 517 // and dropping thread. 518 // 519 // The lock acquisition in clear_entry serves this purpose. All of the 520 // driver manipulations happen with the lock held, so we can just take 521 // the lock and be sure that this drop happens-after everything the 522 // driver did so far and happens-before everything the driver does in 523 // the future. While we have the lock held, we also go ahead and 524 // deregister the entry if necessary. 525 unsafe { self.driver.clear_entry(NonNull::from(self.inner())) }; 526 } 527 reset(mut self: Pin<&mut Self>, new_time: Instant)528 pub(crate) fn reset(mut self: Pin<&mut Self>, new_time: Instant) { 529 unsafe { self.as_mut().get_unchecked_mut() }.initial_deadline = None; 530 531 let tick = self.driver.time_source().deadline_to_tick(new_time); 532 533 if self.inner().extend_expiration(tick).is_ok() { 534 return; 535 } 536 537 unsafe { 538 self.driver.reregister(tick, self.inner().into()); 539 } 540 } 541 poll_elapsed( mut self: Pin<&mut Self>, cx: &mut Context<'_>, ) -> Poll<Result<(), super::Error>>542 pub(crate) fn poll_elapsed( 543 mut self: Pin<&mut Self>, 544 cx: &mut Context<'_>, 545 ) -> Poll<Result<(), super::Error>> { 546 if self.driver.is_shutdown() { 547 panic!("{}", crate::util::error::RUNTIME_SHUTTING_DOWN_ERROR); 548 } 549 550 if let Some(deadline) = self.initial_deadline { 551 self.as_mut().reset(deadline); 552 } 553 554 let this = unsafe { self.get_unchecked_mut() }; 555 556 this.inner().state.poll(cx.waker()) 557 } 558 } 559 560 impl TimerHandle { cached_when(&self) -> u64561 pub(super) unsafe fn cached_when(&self) -> u64 { 562 unsafe { self.inner.as_ref().cached_when() } 563 } 564 sync_when(&self) -> u64565 pub(super) unsafe fn sync_when(&self) -> u64 { 566 unsafe { self.inner.as_ref().sync_when() } 567 } 568 is_pending(&self) -> bool569 pub(super) unsafe fn is_pending(&self) -> bool { 570 unsafe { self.inner.as_ref().state.is_pending() } 571 } 572 573 /// Forcibly sets the true and cached expiration times to the given tick. 574 /// 575 /// SAFETY: The caller must ensure that the handle remains valid, the driver 576 /// lock is held, and that the timer is not in any wheel linked lists. set_expiration(&self, tick: u64)577 pub(super) unsafe fn set_expiration(&self, tick: u64) { 578 self.inner.as_ref().set_expiration(tick); 579 } 580 581 /// Attempts to mark this entry as pending. If the expiration time is after 582 /// `not_after`, however, returns an Err with the current expiration time. 583 /// 584 /// If an `Err` is returned, the `cached_when` value will be updated to this 585 /// new expiration time. 586 /// 587 /// SAFETY: The caller must ensure that the handle remains valid, the driver 588 /// lock is held, and that the timer is not in any wheel linked lists. 589 /// After returning Ok, the entry must be added to the pending list. mark_pending(&self, not_after: u64) -> Result<(), u64>590 pub(super) unsafe fn mark_pending(&self, not_after: u64) -> Result<(), u64> { 591 match self.inner.as_ref().state.mark_pending(not_after) { 592 Ok(()) => { 593 // mark this as being on the pending queue in cached_when 594 self.inner.as_ref().set_cached_when(u64::max_value()); 595 Ok(()) 596 } 597 Err(tick) => { 598 self.inner.as_ref().set_cached_when(tick); 599 Err(tick) 600 } 601 } 602 } 603 604 /// Attempts to transition to a terminal state. If the state is already a 605 /// terminal state, does nothing. 606 /// 607 /// Because the entry might be dropped after the state is moved to a 608 /// terminal state, this function consumes the handle to ensure we don't 609 /// access the entry afterwards. 610 /// 611 /// Returns the last-registered waker, if any. 612 /// 613 /// SAFETY: The driver lock must be held while invoking this function, and 614 /// the entry must not be in any wheel linked lists. fire(self, completed_state: TimerResult) -> Option<Waker>615 pub(super) unsafe fn fire(self, completed_state: TimerResult) -> Option<Waker> { 616 self.inner.as_ref().state.fire(completed_state) 617 } 618 } 619 620 impl Drop for TimerEntry { drop(&mut self)621 fn drop(&mut self) { 622 unsafe { Pin::new_unchecked(self) }.as_mut().cancel() 623 } 624 } 625 626 #[cfg_attr(target_arch = "x86_64", repr(align(128)))] 627 #[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))] 628 #[derive(Debug, Default)] 629 struct CachePadded<T>(T); 630