• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! Synchronization primitives for one-time evaluation.
2 
3 use crate::{
4     atomic::{AtomicU8, Ordering},
5     RelaxStrategy, Spin,
6 };
7 use core::{cell::UnsafeCell, fmt, marker::PhantomData, mem::MaybeUninit};
8 
9 /// A primitive that provides lazy one-time initialization.
10 ///
11 /// Unlike its `std::sync` equivalent, this is generalized such that the closure returns a
12 /// value to be stored by the [`Once`] (`std::sync::Once` can be trivially emulated with
13 /// `Once`).
14 ///
15 /// Because [`Once::new`] is `const`, this primitive may be used to safely initialize statics.
16 ///
17 /// # Examples
18 ///
19 /// ```
20 /// use spin;
21 ///
22 /// static START: spin::Once = spin::Once::new();
23 ///
24 /// START.call_once(|| {
25 ///     // run initialization here
26 /// });
27 /// ```
28 pub struct Once<T = (), R = Spin> {
29     phantom: PhantomData<R>,
30     status: AtomicStatus,
31     data: UnsafeCell<MaybeUninit<T>>,
32 }
33 
34 impl<T, R> Default for Once<T, R> {
default() -> Self35     fn default() -> Self {
36         Self::new()
37     }
38 }
39 
40 impl<T: fmt::Debug, R> fmt::Debug for Once<T, R> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result41     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
42         match self.get() {
43             Some(s) => write!(f, "Once {{ data: ")
44                 .and_then(|()| s.fmt(f))
45                 .and_then(|()| write!(f, "}}")),
46             None => write!(f, "Once {{ <uninitialized> }}"),
47         }
48     }
49 }
50 
51 // Same unsafe impls as `std::sync::RwLock`, because this also allows for
52 // concurrent reads.
53 unsafe impl<T: Send + Sync, R> Sync for Once<T, R> {}
54 unsafe impl<T: Send, R> Send for Once<T, R> {}
55 
56 mod status {
57     use super::*;
58 
59     // SAFETY: This structure has an invariant, namely that the inner atomic u8 must *always* have
60     // a value for which there exists a valid Status. This means that users of this API must only
61     // be allowed to load and store `Status`es.
62     #[repr(transparent)]
63     pub struct AtomicStatus(AtomicU8);
64 
65     // Four states that a Once can be in, encoded into the lower bits of `status` in
66     // the Once structure.
67     #[repr(u8)]
68     #[derive(Clone, Copy, Debug, PartialEq)]
69     pub enum Status {
70         Incomplete = 0x00,
71         Running = 0x01,
72         Complete = 0x02,
73         Panicked = 0x03,
74     }
75     impl Status {
76         // Construct a status from an inner u8 integer.
77         //
78         // # Safety
79         //
80         // For this to be safe, the inner number must have a valid corresponding enum variant.
new_unchecked(inner: u8) -> Self81         unsafe fn new_unchecked(inner: u8) -> Self {
82             core::mem::transmute(inner)
83         }
84     }
85 
86     impl AtomicStatus {
87         #[inline(always)]
new(status: Status) -> Self88         pub const fn new(status: Status) -> Self {
89             // SAFETY: We got the value directly from status, so transmuting back is fine.
90             Self(AtomicU8::new(status as u8))
91         }
92         #[inline(always)]
load(&self, ordering: Ordering) -> Status93         pub fn load(&self, ordering: Ordering) -> Status {
94             // SAFETY: We know that the inner integer must have been constructed from a Status in
95             // the first place.
96             unsafe { Status::new_unchecked(self.0.load(ordering)) }
97         }
98         #[inline(always)]
store(&self, status: Status, ordering: Ordering)99         pub fn store(&self, status: Status, ordering: Ordering) {
100             // SAFETY: While not directly unsafe, this is safe because the value was retrieved from
101             // a status, thus making transmutation safe.
102             self.0.store(status as u8, ordering);
103         }
104         #[inline(always)]
compare_exchange( &self, old: Status, new: Status, success: Ordering, failure: Ordering, ) -> Result<Status, Status>105         pub fn compare_exchange(
106             &self,
107             old: Status,
108             new: Status,
109             success: Ordering,
110             failure: Ordering,
111         ) -> Result<Status, Status> {
112             match self
113                 .0
114                 .compare_exchange(old as u8, new as u8, success, failure)
115             {
116                 // SAFETY: A compare exchange will always return a value that was later stored into
117                 // the atomic u8, but due to the invariant that it must be a valid Status, we know
118                 // that both Ok(_) and Err(_) will be safely transmutable.
119                 Ok(ok) => Ok(unsafe { Status::new_unchecked(ok) }),
120                 Err(err) => Err(unsafe { Status::new_unchecked(err) }),
121             }
122         }
123         #[inline(always)]
get_mut(&mut self) -> &mut Status124         pub fn get_mut(&mut self) -> &mut Status {
125             // SAFETY: Since we know that the u8 inside must be a valid Status, we can safely cast
126             // it to a &mut Status.
127             unsafe { &mut *((self.0.get_mut() as *mut u8).cast::<Status>()) }
128         }
129     }
130 }
131 use self::status::{AtomicStatus, Status};
132 
133 use core::hint::unreachable_unchecked as unreachable;
134 
135 impl<T, R: RelaxStrategy> Once<T, R> {
136     /// Performs an initialization routine once and only once. The given closure
137     /// will be executed if this is the first time `call_once` has been called,
138     /// and otherwise the routine will *not* be invoked.
139     ///
140     /// This method will block the calling thread if another initialization
141     /// routine is currently running.
142     ///
143     /// When this function returns, it is guaranteed that some initialization
144     /// has run and completed (it may not be the closure specified). The
145     /// returned pointer will point to the result from the closure that was
146     /// run.
147     ///
148     /// # Panics
149     ///
150     /// This function will panic if the [`Once`] previously panicked while attempting
151     /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
152     /// primitives.
153     ///
154     /// # Examples
155     ///
156     /// ```
157     /// use spin;
158     ///
159     /// static INIT: spin::Once<usize> = spin::Once::new();
160     ///
161     /// fn get_cached_val() -> usize {
162     ///     *INIT.call_once(expensive_computation)
163     /// }
164     ///
165     /// fn expensive_computation() -> usize {
166     ///     // ...
167     /// # 2
168     /// }
169     /// ```
call_once<F: FnOnce() -> T>(&self, f: F) -> &T170     pub fn call_once<F: FnOnce() -> T>(&self, f: F) -> &T {
171         match self.try_call_once(|| Ok::<T, core::convert::Infallible>(f())) {
172             Ok(x) => x,
173             Err(void) => match void {},
174         }
175     }
176 
177     /// This method is similar to `call_once`, but allows the given closure to
178     /// fail, and lets the `Once` in a uninitialized state if it does.
179     ///
180     /// This method will block the calling thread if another initialization
181     /// routine is currently running.
182     ///
183     /// When this function returns without error, it is guaranteed that some
184     /// initialization has run and completed (it may not be the closure
185     /// specified). The returned reference will point to the result from the
186     /// closure that was run.
187     ///
188     /// # Panics
189     ///
190     /// This function will panic if the [`Once`] previously panicked while attempting
191     /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
192     /// primitives.
193     ///
194     /// # Examples
195     ///
196     /// ```
197     /// use spin;
198     ///
199     /// static INIT: spin::Once<usize> = spin::Once::new();
200     ///
201     /// fn get_cached_val() -> Result<usize, String> {
202     ///     INIT.try_call_once(expensive_fallible_computation).map(|x| *x)
203     /// }
204     ///
205     /// fn expensive_fallible_computation() -> Result<usize, String> {
206     ///     // ...
207     /// # Ok(2)
208     /// }
209     /// ```
try_call_once<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E>210     pub fn try_call_once<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
211         // SAFETY: We perform an Acquire load because if this were to return COMPLETE, then we need
212         // the preceding stores done while initializing, to become visible after this load.
213         let mut status = self.status.load(Ordering::Acquire);
214 
215         if status == Status::Incomplete {
216             match self.status.compare_exchange(
217                 Status::Incomplete,
218                 Status::Running,
219                 // SAFETY: Success ordering: We do not have to synchronize any data at all, as the
220                 // value is at this point uninitialized, so Relaxed is technically sufficient. We
221                 // will however have to do a Release store later. However, the success ordering
222                 // must always be at least as strong as the failure ordering, so we choose Acquire
223                 // here anyway.
224                 Ordering::Acquire,
225                 // SAFETY: Failure ordering: While we have already loaded the status initially, we
226                 // know that if some other thread would have fully initialized this in between,
227                 // then there will be new not-yet-synchronized accesses done during that
228                 // initialization that would not have been synchronized by the earlier load. Thus
229                 // we use Acquire to ensure when we later call force_get() in the last match
230                 // statement, if the status was changed to COMPLETE, that those accesses will become
231                 // visible to us.
232                 Ordering::Acquire,
233             ) {
234                 Ok(_must_be_state_incomplete) => {
235                     // The compare-exchange succeeded, so we shall initialize it.
236 
237                     // We use a guard (Finish) to catch panics caused by builder
238                     let finish = Finish {
239                         status: &self.status,
240                     };
241                     let val = match f() {
242                         Ok(val) => val,
243                         Err(err) => {
244                             // If an error occurs, clean up everything and leave.
245                             core::mem::forget(finish);
246                             self.status.store(Status::Incomplete, Ordering::Release);
247                             return Err(err);
248                         }
249                     };
250                     unsafe {
251                         // SAFETY:
252                         // `UnsafeCell`/deref: currently the only accessor, mutably
253                         // and immutably by cas exclusion.
254                         // `write`: pointer comes from `MaybeUninit`.
255                         (*self.data.get()).as_mut_ptr().write(val);
256                     };
257                     // If there were to be a panic with unwind enabled, the code would
258                     // short-circuit and never reach the point where it writes the inner data.
259                     // The destructor for Finish will run, and poison the Once to ensure that other
260                     // threads accessing it do not exhibit unwanted behavior, if there were to be
261                     // any inconsistency in data structures caused by the panicking thread.
262                     //
263                     // However, f() is expected in the general case not to panic. In that case, we
264                     // simply forget the guard, bypassing its destructor. We could theoretically
265                     // clear a flag instead, but this eliminates the call to the destructor at
266                     // compile time, and unconditionally poisons during an eventual panic, if
267                     // unwinding is enabled.
268                     core::mem::forget(finish);
269 
270                     // SAFETY: Release is required here, so that all memory accesses done in the
271                     // closure when initializing, become visible to other threads that perform Acquire
272                     // loads.
273                     //
274                     // And, we also know that the changes this thread has done will not magically
275                     // disappear from our cache, so it does not need to be AcqRel.
276                     self.status.store(Status::Complete, Ordering::Release);
277 
278                     // This next line is mainly an optimization.
279                     return unsafe { Ok(self.force_get()) };
280                 }
281                 // The compare-exchange failed, so we know for a fact that the status cannot be
282                 // INCOMPLETE, or it would have succeeded.
283                 Err(other_status) => status = other_status,
284             }
285         }
286 
287         Ok(match status {
288             // SAFETY: We have either checked with an Acquire load, that the status is COMPLETE, or
289             // initialized it ourselves, in which case no additional synchronization is needed.
290             Status::Complete => unsafe { self.force_get() },
291             Status::Panicked => panic!("Once panicked"),
292             Status::Running => self.poll().unwrap_or_else(|| {
293                 if cfg!(debug_assertions) {
294                     unreachable!("Encountered INCOMPLETE when polling Once")
295                 } else {
296                     // SAFETY: This poll is guaranteed never to fail because the API of poll
297                     // promises spinning if initialization is in progress. We've already
298                     // checked that initialisation is in progress, and initialisation is
299                     // monotonic: once done, it cannot be undone. We also fetched the status
300                     // with Acquire semantics, thereby guaranteeing that the later-executed
301                     // poll will also agree with us that initialization is in progress. Ergo,
302                     // this poll cannot fail.
303                     unsafe {
304                         unreachable();
305                     }
306                 }
307             }),
308 
309             // SAFETY: The only invariant possible in addition to the aforementioned ones at the
310             // moment, is INCOMPLETE. However, the only way for this match statement to be
311             // reached, is if we lost the CAS (otherwise we would have returned early), in
312             // which case we know for a fact that the state cannot be changed back to INCOMPLETE as
313             // `Once`s are monotonic.
314             Status::Incomplete => unsafe { unreachable() },
315         })
316     }
317 
318     /// Spins until the [`Once`] contains a value.
319     ///
320     /// Note that in releases prior to `0.7`, this function had the behaviour of [`Once::poll`].
321     ///
322     /// # Panics
323     ///
324     /// This function will panic if the [`Once`] previously panicked while attempting
325     /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
326     /// primitives.
wait(&self) -> &T327     pub fn wait(&self) -> &T {
328         loop {
329             match self.poll() {
330                 Some(x) => break x,
331                 None => R::relax(),
332             }
333         }
334     }
335 
336     /// Like [`Once::get`], but will spin if the [`Once`] is in the process of being
337     /// initialized. If initialization has not even begun, `None` will be returned.
338     ///
339     /// Note that in releases prior to `0.7`, this function was named `wait`.
340     ///
341     /// # Panics
342     ///
343     /// This function will panic if the [`Once`] previously panicked while attempting
344     /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
345     /// primitives.
poll(&self) -> Option<&T>346     pub fn poll(&self) -> Option<&T> {
347         loop {
348             // SAFETY: Acquire is safe here, because if the status is COMPLETE, then we want to make
349             // sure that all memory accessed done while initializing that value, are visible when
350             // we return a reference to the inner data after this load.
351             match self.status.load(Ordering::Acquire) {
352                 Status::Incomplete => return None,
353                 Status::Running => R::relax(), // We spin
354                 Status::Complete => return Some(unsafe { self.force_get() }),
355                 Status::Panicked => panic!("Once previously poisoned by a panicked"),
356             }
357         }
358     }
359 }
360 
361 impl<T, R> Once<T, R> {
362     /// Initialization constant of [`Once`].
363     #[allow(clippy::declare_interior_mutable_const)]
364     pub const INIT: Self = Self {
365         phantom: PhantomData,
366         status: AtomicStatus::new(Status::Incomplete),
367         data: UnsafeCell::new(MaybeUninit::uninit()),
368     };
369 
370     /// Creates a new [`Once`].
new() -> Self371     pub const fn new() -> Self {
372         Self::INIT
373     }
374 
375     /// Creates a new initialized [`Once`].
initialized(data: T) -> Self376     pub const fn initialized(data: T) -> Self {
377         Self {
378             phantom: PhantomData,
379             status: AtomicStatus::new(Status::Complete),
380             data: UnsafeCell::new(MaybeUninit::new(data)),
381         }
382     }
383 
384     /// Retrieve a pointer to the inner data.
385     ///
386     /// While this method itself is safe, accessing the pointer before the [`Once`] has been
387     /// initialized is UB, unless this method has already been written to from a pointer coming
388     /// from this method.
as_mut_ptr(&self) -> *mut T389     pub fn as_mut_ptr(&self) -> *mut T {
390         // SAFETY:
391         // * MaybeUninit<T> always has exactly the same layout as T
392         self.data.get().cast::<T>()
393     }
394 
395     /// Get a reference to the initialized instance. Must only be called once COMPLETE.
force_get(&self) -> &T396     unsafe fn force_get(&self) -> &T {
397         // SAFETY:
398         // * `UnsafeCell`/inner deref: data never changes again
399         // * `MaybeUninit`/outer deref: data was initialized
400         &*(*self.data.get()).as_ptr()
401     }
402 
403     /// Get a reference to the initialized instance. Must only be called once COMPLETE.
force_get_mut(&mut self) -> &mut T404     unsafe fn force_get_mut(&mut self) -> &mut T {
405         // SAFETY:
406         // * `UnsafeCell`/inner deref: data never changes again
407         // * `MaybeUninit`/outer deref: data was initialized
408         &mut *(*self.data.get()).as_mut_ptr()
409     }
410 
411     /// Get a reference to the initialized instance. Must only be called once COMPLETE.
force_into_inner(self) -> T412     unsafe fn force_into_inner(self) -> T {
413         // SAFETY:
414         // * `UnsafeCell`/inner deref: data never changes again
415         // * `MaybeUninit`/outer deref: data was initialized
416         (*self.data.get()).as_ptr().read()
417     }
418 
419     /// Returns a reference to the inner value if the [`Once`] has been initialized.
get(&self) -> Option<&T>420     pub fn get(&self) -> Option<&T> {
421         // SAFETY: Just as with `poll`, Acquire is safe here because we want to be able to see the
422         // nonatomic stores done when initializing, once we have loaded and checked the status.
423         match self.status.load(Ordering::Acquire) {
424             Status::Complete => Some(unsafe { self.force_get() }),
425             _ => None,
426         }
427     }
428 
429     /// Returns a reference to the inner value on the unchecked assumption that the  [`Once`] has been initialized.
430     ///
431     /// # Safety
432     ///
433     /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
434     /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
435     /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
436     /// checking initialization is unacceptable and the `Once` has already been initialized.
get_unchecked(&self) -> &T437     pub unsafe fn get_unchecked(&self) -> &T {
438         debug_assert_eq!(
439             self.status.load(Ordering::SeqCst),
440             Status::Complete,
441             "Attempted to access an uninitialized Once. If this was run without debug checks, this would be undefined behaviour. This is a serious bug and you must fix it.",
442         );
443         self.force_get()
444     }
445 
446     /// Returns a mutable reference to the inner value if the [`Once`] has been initialized.
447     ///
448     /// Because this method requires a mutable reference to the [`Once`], no synchronization
449     /// overhead is required to access the inner value. In effect, it is zero-cost.
get_mut(&mut self) -> Option<&mut T>450     pub fn get_mut(&mut self) -> Option<&mut T> {
451         match *self.status.get_mut() {
452             Status::Complete => Some(unsafe { self.force_get_mut() }),
453             _ => None,
454         }
455     }
456 
457     /// Returns a mutable reference to the inner value
458     ///
459     /// # Safety
460     ///
461     /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
462     /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused).
463     /// However, this can be useful in some instances for exposing the `Once` to FFI or when the overhead of atomically
464     /// checking initialization is unacceptable and the `Once` has already been initialized.
get_mut_unchecked(&mut self) -> &mut T465     pub unsafe fn get_mut_unchecked(&mut self) -> &mut T {
466         debug_assert_eq!(
467             self.status.load(Ordering::SeqCst),
468             Status::Complete,
469             "Attempted to access an unintialized Once.  If this was to run without debug checks, this would be undefined behavior.  This is a serious bug and you must fix it.",
470         );
471         self.force_get_mut()
472     }
473 
474     /// Returns a the inner value if the [`Once`] has been initialized.
475     ///
476     /// Because this method requires ownership of the [`Once`], no synchronization overhead
477     /// is required to access the inner value. In effect, it is zero-cost.
try_into_inner(mut self) -> Option<T>478     pub fn try_into_inner(mut self) -> Option<T> {
479         match *self.status.get_mut() {
480             Status::Complete => Some(unsafe { self.force_into_inner() }),
481             _ => None,
482         }
483     }
484 
485     /// Returns a the inner value if the [`Once`] has been initialized.
486     /// # Safety
487     ///
488     /// This is *extremely* unsafe if the `Once` has not already been initialized because a reference to uninitialized
489     /// memory will be returned, immediately triggering undefined behaviour (even if the reference goes unused)
490     /// This can be useful, if `Once` has already been initialized, and you want to bypass an
491     /// option check.
into_inner_unchecked(self) -> T492     pub unsafe fn into_inner_unchecked(self) -> T {
493         debug_assert_eq!(
494             self.status.load(Ordering::SeqCst),
495             Status::Complete,
496             "Attempted to access an unintialized Once.  If this was to run without debug checks, this would be undefined behavior.  This is a serious bug and you must fix it.",
497         );
498         self.force_into_inner()
499     }
500 
501     /// Checks whether the value has been initialized.
502     ///
503     /// This is done using [`Acquire`](core::sync::atomic::Ordering::Acquire) ordering, and
504     /// therefore it is safe to access the value directly via
505     /// [`get_unchecked`](Self::get_unchecked) if this returns true.
is_completed(&self) -> bool506     pub fn is_completed(&self) -> bool {
507         // TODO: Add a similar variant for Relaxed?
508         self.status.load(Ordering::Acquire) == Status::Complete
509     }
510 }
511 
512 impl<T, R> From<T> for Once<T, R> {
from(data: T) -> Self513     fn from(data: T) -> Self {
514         Self::initialized(data)
515     }
516 }
517 
518 impl<T, R> Drop for Once<T, R> {
drop(&mut self)519     fn drop(&mut self) {
520         // No need to do any atomic access here, we have &mut!
521         if *self.status.get_mut() == Status::Complete {
522             unsafe {
523                 //TODO: Use MaybeUninit::assume_init_drop once stabilised
524                 core::ptr::drop_in_place((*self.data.get()).as_mut_ptr());
525             }
526         }
527     }
528 }
529 
530 struct Finish<'a> {
531     status: &'a AtomicStatus,
532 }
533 
534 impl<'a> Drop for Finish<'a> {
drop(&mut self)535     fn drop(&mut self) {
536         // While using Relaxed here would most likely not be an issue, we use SeqCst anyway.
537         // This is mainly because panics are not meant to be fast at all, but also because if
538         // there were to be a compiler bug which reorders accesses within the same thread,
539         // where it should not, we want to be sure that the panic really is handled, and does
540         // not cause additional problems. SeqCst will therefore help guarding against such
541         // bugs.
542         self.status.store(Status::Panicked, Ordering::SeqCst);
543     }
544 }
545 
546 #[cfg(test)]
547 mod tests {
548     use std::prelude::v1::*;
549 
550     use std::sync::mpsc::channel;
551     use std::thread;
552 
553     use super::*;
554 
555     #[test]
smoke_once()556     fn smoke_once() {
557         static O: Once = Once::new();
558         let mut a = 0;
559         O.call_once(|| a += 1);
560         assert_eq!(a, 1);
561         O.call_once(|| a += 1);
562         assert_eq!(a, 1);
563     }
564 
565     #[test]
smoke_once_value()566     fn smoke_once_value() {
567         static O: Once<usize> = Once::new();
568         let a = O.call_once(|| 1);
569         assert_eq!(*a, 1);
570         let b = O.call_once(|| 2);
571         assert_eq!(*b, 1);
572     }
573 
574     #[test]
stampede_once()575     fn stampede_once() {
576         static O: Once = Once::new();
577         static mut RUN: bool = false;
578 
579         let (tx, rx) = channel();
580         let mut ts = Vec::new();
581         for _ in 0..10 {
582             let tx = tx.clone();
583             ts.push(thread::spawn(move || {
584                 for _ in 0..4 {
585                     thread::yield_now()
586                 }
587                 unsafe {
588                     O.call_once(|| {
589                         assert!(!RUN);
590                         RUN = true;
591                     });
592                     assert!(RUN);
593                 }
594                 tx.send(()).unwrap();
595             }));
596         }
597 
598         unsafe {
599             O.call_once(|| {
600                 assert!(!RUN);
601                 RUN = true;
602             });
603             assert!(RUN);
604         }
605 
606         for _ in 0..10 {
607             rx.recv().unwrap();
608         }
609 
610         for t in ts {
611             t.join().unwrap();
612         }
613     }
614 
615     #[test]
get()616     fn get() {
617         static INIT: Once<usize> = Once::new();
618 
619         assert!(INIT.get().is_none());
620         INIT.call_once(|| 2);
621         assert_eq!(INIT.get().map(|r| *r), Some(2));
622     }
623 
624     #[test]
get_no_wait()625     fn get_no_wait() {
626         static INIT: Once<usize> = Once::new();
627 
628         assert!(INIT.get().is_none());
629         let t = thread::spawn(move || {
630             INIT.call_once(|| {
631                 thread::sleep(std::time::Duration::from_secs(3));
632                 42
633             });
634         });
635         assert!(INIT.get().is_none());
636 
637         t.join().unwrap();
638     }
639 
640     #[test]
poll()641     fn poll() {
642         static INIT: Once<usize> = Once::new();
643 
644         assert!(INIT.poll().is_none());
645         INIT.call_once(|| 3);
646         assert_eq!(INIT.poll().map(|r| *r), Some(3));
647     }
648 
649     #[test]
wait()650     fn wait() {
651         static INIT: Once<usize> = Once::new();
652 
653         let t = std::thread::spawn(|| {
654             assert_eq!(*INIT.wait(), 3);
655             assert!(INIT.is_completed());
656         });
657 
658         for _ in 0..4 {
659             thread::yield_now()
660         }
661 
662         assert!(INIT.poll().is_none());
663         INIT.call_once(|| 3);
664 
665         t.join().unwrap();
666     }
667 
668     #[test]
669     #[ignore = "Android uses panic_abort"]
panic()670     fn panic() {
671         use std::panic;
672 
673         static INIT: Once = Once::new();
674 
675         // poison the once
676         let t = panic::catch_unwind(|| {
677             INIT.call_once(|| panic!());
678         });
679         assert!(t.is_err());
680 
681         // poisoning propagates
682         let t = panic::catch_unwind(|| {
683             INIT.call_once(|| {});
684         });
685         assert!(t.is_err());
686     }
687 
688     #[test]
init_constant()689     fn init_constant() {
690         static O: Once = Once::INIT;
691         let mut a = 0;
692         O.call_once(|| a += 1);
693         assert_eq!(a, 1);
694         O.call_once(|| a += 1);
695         assert_eq!(a, 1);
696     }
697 
698     static mut CALLED: bool = false;
699 
700     struct DropTest {}
701 
702     impl Drop for DropTest {
drop(&mut self)703         fn drop(&mut self) {
704             unsafe {
705                 CALLED = true;
706             }
707         }
708     }
709 
710     // This is sort of two test cases, but if we write them as separate test methods
711     // they can be executed concurrently and then fail some small fraction of the
712     // time.
713     #[test]
drop_occurs_and_skip_uninit_drop()714     fn drop_occurs_and_skip_uninit_drop() {
715         unsafe {
716             CALLED = false;
717         }
718 
719         {
720             let once = Once::<_>::new();
721             once.call_once(|| DropTest {});
722         }
723 
724         assert!(unsafe { CALLED });
725         // Now test that we skip drops for the uninitialized case.
726         unsafe {
727             CALLED = false;
728         }
729 
730         let once = Once::<DropTest>::new();
731         drop(once);
732 
733         assert!(unsafe { !CALLED });
734     }
735 
736     #[test]
call_once_test()737     fn call_once_test() {
738         for _ in 0..20 {
739             use std::sync::atomic::AtomicUsize;
740             use std::sync::Arc;
741             use std::time::Duration;
742             let share = Arc::new(AtomicUsize::new(0));
743             let once = Arc::new(Once::<_, Spin>::new());
744             let mut hs = Vec::new();
745             for _ in 0..8 {
746                 let h = thread::spawn({
747                     let share = share.clone();
748                     let once = once.clone();
749                     move || {
750                         thread::sleep(Duration::from_millis(10));
751                         once.call_once(|| {
752                             share.fetch_add(1, Ordering::SeqCst);
753                         });
754                     }
755                 });
756                 hs.push(h);
757             }
758             for h in hs {
759                 h.join().unwrap();
760             }
761             assert_eq!(1, share.load(Ordering::SeqCst));
762         }
763     }
764 }
765