1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7
8 use core::{
9 ptr,
10 sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
11 };
12 use std::time::Instant;
13
14 mod bindings;
15 mod keyed_event;
16 mod waitaddress;
17
18 enum Backend {
19 KeyedEvent(keyed_event::KeyedEvent),
20 WaitAddress(waitaddress::WaitAddress),
21 }
22
23 static BACKEND: AtomicPtr<Backend> = AtomicPtr::new(ptr::null_mut());
24
25 impl Backend {
26 #[inline]
get() -> &'static Backend27 fn get() -> &'static Backend {
28 // Fast path: use the existing object
29 let backend_ptr = BACKEND.load(Ordering::Acquire);
30 if !backend_ptr.is_null() {
31 return unsafe { &*backend_ptr };
32 };
33
34 Backend::create()
35 }
36
37 #[cold]
create() -> &'static Backend38 fn create() -> &'static Backend {
39 // Try to create a new Backend
40 let backend;
41 if let Some(waitaddress) = waitaddress::WaitAddress::create() {
42 backend = Backend::WaitAddress(waitaddress);
43 } else if let Some(keyed_event) = keyed_event::KeyedEvent::create() {
44 backend = Backend::KeyedEvent(keyed_event);
45 } else {
46 panic!(
47 "parking_lot requires either NT Keyed Events (WinXP+) or \
48 WaitOnAddress/WakeByAddress (Win8+)"
49 );
50 }
51
52 // Try to set our new Backend as the global one
53 let backend_ptr = Box::into_raw(Box::new(backend));
54 match BACKEND.compare_exchange(
55 ptr::null_mut(),
56 backend_ptr,
57 Ordering::Release,
58 Ordering::Relaxed,
59 ) {
60 Ok(_) => unsafe { &*backend_ptr },
61 Err(global_backend_ptr) => {
62 unsafe {
63 // We lost the race, free our object and return the global one
64 let _ = Box::from_raw(backend_ptr);
65 &*global_backend_ptr
66 }
67 }
68 }
69 }
70 }
71
72 // Helper type for putting a thread to sleep until some other thread wakes it up
73 pub struct ThreadParker {
74 key: AtomicUsize,
75 backend: &'static Backend,
76 }
77
78 impl super::ThreadParkerT for ThreadParker {
79 type UnparkHandle = UnparkHandle;
80
81 const IS_CHEAP_TO_CONSTRUCT: bool = true;
82
83 #[inline]
new() -> ThreadParker84 fn new() -> ThreadParker {
85 // Initialize the backend here to ensure we don't get any panics
86 // later on, which could leave synchronization primitives in a broken
87 // state.
88 ThreadParker {
89 key: AtomicUsize::new(0),
90 backend: Backend::get(),
91 }
92 }
93
94 // Prepares the parker. This should be called before adding it to the queue.
95 #[inline]
prepare_park(&self)96 unsafe fn prepare_park(&self) {
97 match *self.backend {
98 Backend::KeyedEvent(ref x) => x.prepare_park(&self.key),
99 Backend::WaitAddress(ref x) => x.prepare_park(&self.key),
100 }
101 }
102
103 // Checks if the park timed out. This should be called while holding the
104 // queue lock after park_until has returned false.
105 #[inline]
timed_out(&self) -> bool106 unsafe fn timed_out(&self) -> bool {
107 match *self.backend {
108 Backend::KeyedEvent(ref x) => x.timed_out(&self.key),
109 Backend::WaitAddress(ref x) => x.timed_out(&self.key),
110 }
111 }
112
113 // Parks the thread until it is unparked. This should be called after it has
114 // been added to the queue, after unlocking the queue.
115 #[inline]
park(&self)116 unsafe fn park(&self) {
117 match *self.backend {
118 Backend::KeyedEvent(ref x) => x.park(&self.key),
119 Backend::WaitAddress(ref x) => x.park(&self.key),
120 }
121 }
122
123 // Parks the thread until it is unparked or the timeout is reached. This
124 // should be called after it has been added to the queue, after unlocking
125 // the queue. Returns true if we were unparked and false if we timed out.
126 #[inline]
park_until(&self, timeout: Instant) -> bool127 unsafe fn park_until(&self, timeout: Instant) -> bool {
128 match *self.backend {
129 Backend::KeyedEvent(ref x) => x.park_until(&self.key, timeout),
130 Backend::WaitAddress(ref x) => x.park_until(&self.key, timeout),
131 }
132 }
133
134 // Locks the parker to prevent the target thread from exiting. This is
135 // necessary to ensure that thread-local ThreadData objects remain valid.
136 // This should be called while holding the queue lock.
137 #[inline]
unpark_lock(&self) -> UnparkHandle138 unsafe fn unpark_lock(&self) -> UnparkHandle {
139 match *self.backend {
140 Backend::KeyedEvent(ref x) => UnparkHandle::KeyedEvent(x.unpark_lock(&self.key)),
141 Backend::WaitAddress(ref x) => UnparkHandle::WaitAddress(x.unpark_lock(&self.key)),
142 }
143 }
144 }
145
146 // Handle for a thread that is about to be unparked. We need to mark the thread
147 // as unparked while holding the queue lock, but we delay the actual unparking
148 // until after the queue lock is released.
149 pub enum UnparkHandle {
150 KeyedEvent(keyed_event::UnparkHandle),
151 WaitAddress(waitaddress::UnparkHandle),
152 }
153
154 impl super::UnparkHandleT for UnparkHandle {
155 // Wakes up the parked thread. This should be called after the queue lock is
156 // released to avoid blocking the queue for too long.
157 #[inline]
unpark(self)158 unsafe fn unpark(self) {
159 match self {
160 UnparkHandle::KeyedEvent(x) => x.unpark(),
161 UnparkHandle::WaitAddress(x) => x.unpark(),
162 }
163 }
164 }
165
166 // Yields the rest of the current timeslice to the OS
167 #[inline]
thread_yield()168 pub fn thread_yield() {
169 unsafe {
170 // We don't use SwitchToThread here because it doesn't consider all
171 // threads in the system and the thread we are waiting for may not get
172 // selected.
173 bindings::Sleep(0);
174 }
175 }
176