• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::cell::UnsafeCell;
6 use std::ops::{Deref, DerefMut};
7 use std::sync::atomic::{spin_loop_hint, AtomicBool, Ordering};
8 
9 const UNLOCKED: bool = false;
10 const LOCKED: bool = true;
11 
12 /// A primitive that provides safe, mutable access to a shared resource.
13 ///
14 /// Unlike `Mutex`, a `SpinLock` will not voluntarily yield its CPU time until the resource is
15 /// available and will instead keep spinning until the resource is acquired. For the vast majority
16 /// of cases, `Mutex` is a better choice than `SpinLock`. If a `SpinLock` must be used then users
17 /// should try to do as little work as possible while holding the `SpinLock` and avoid any sort of
18 /// blocking at all costs as it can severely penalize performance.
19 ///
20 /// # Poisoning
21 ///
22 /// This `SpinLock` does not implement lock poisoning so it is possible for threads to access
23 /// poisoned data if a thread panics while holding the lock. If lock poisoning is needed, it can be
24 /// implemented by wrapping the `SpinLock` in a new type that implements poisoning. See the
25 /// implementation of `std::sync::Mutex` for an example of how to do this.
26 pub struct SpinLock<T: ?Sized> {
27     lock: AtomicBool,
28     value: UnsafeCell<T>,
29 }
30 
31 impl<T> SpinLock<T> {
32     /// Creates a new, unlocked `SpinLock` that's ready for use.
new(value: T) -> SpinLock<T>33     pub fn new(value: T) -> SpinLock<T> {
34         SpinLock {
35             lock: AtomicBool::new(UNLOCKED),
36             value: UnsafeCell::new(value),
37         }
38     }
39 
40     /// Consumes the `SpinLock` and returns the value guarded by it. This method doesn't perform any
41     /// locking as the compiler guarantees that there are no references to `self`.
into_inner(self) -> T42     pub fn into_inner(self) -> T {
43         // No need to take the lock because the compiler can statically guarantee
44         // that there are no references to the SpinLock.
45         self.value.into_inner()
46     }
47 }
48 
49 impl<T: ?Sized> SpinLock<T> {
50     /// Acquires exclusive, mutable access to the resource protected by the `SpinLock`, blocking the
51     /// current thread until it is able to do so. Upon returning, the current thread will be the
52     /// only thread with access to the resource. The `SpinLock` will be released when the returned
53     /// `SpinLockGuard` is dropped. Attempting to call `lock` while already holding the `SpinLock`
54     /// will cause a deadlock.
lock(&self) -> SpinLockGuard<T>55     pub fn lock(&self) -> SpinLockGuard<T> {
56         while self
57             .lock
58             .compare_exchange_weak(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
59             .is_err()
60         {
61             spin_loop_hint();
62         }
63 
64         SpinLockGuard {
65             lock: self,
66             value: unsafe { &mut *self.value.get() },
67         }
68     }
69 
unlock(&self)70     fn unlock(&self) {
71         // Don't need to compare and swap because we exclusively hold the lock.
72         self.lock.store(UNLOCKED, Ordering::Release);
73     }
74 
75     /// Returns a mutable reference to the contained value. This method doesn't perform any locking
76     /// as the compiler will statically guarantee that there are no other references to `self`.
get_mut(&mut self) -> &mut T77     pub fn get_mut(&mut self) -> &mut T {
78         // Safe because the compiler can statically guarantee that there are no other references to
79         // `self`. This is also why we don't need to acquire the lock.
80         unsafe { &mut *self.value.get() }
81     }
82 }
83 
84 unsafe impl<T: ?Sized + Send> Send for SpinLock<T> {}
85 unsafe impl<T: ?Sized + Send> Sync for SpinLock<T> {}
86 
87 impl<T: ?Sized + Default> Default for SpinLock<T> {
default() -> Self88     fn default() -> Self {
89         Self::new(Default::default())
90     }
91 }
92 
93 impl<T> From<T> for SpinLock<T> {
from(source: T) -> Self94     fn from(source: T) -> Self {
95         Self::new(source)
96     }
97 }
98 
99 /// An RAII implementation of a "scoped lock" for a `SpinLock`. When this structure is dropped, the
100 /// lock will be released. The resource protected by the `SpinLock` can be accessed via the `Deref`
101 /// and `DerefMut` implementations of this structure.
102 pub struct SpinLockGuard<'a, T: 'a + ?Sized> {
103     lock: &'a SpinLock<T>,
104     value: &'a mut T,
105 }
106 
107 impl<'a, T: ?Sized> Deref for SpinLockGuard<'a, T> {
108     type Target = T;
deref(&self) -> &T109     fn deref(&self) -> &T {
110         self.value
111     }
112 }
113 
114 impl<'a, T: ?Sized> DerefMut for SpinLockGuard<'a, T> {
deref_mut(&mut self) -> &mut T115     fn deref_mut(&mut self) -> &mut T {
116         self.value
117     }
118 }
119 
120 impl<'a, T: ?Sized> Drop for SpinLockGuard<'a, T> {
drop(&mut self)121     fn drop(&mut self) {
122         self.lock.unlock();
123     }
124 }
125 
126 #[cfg(test)]
127 mod test {
128     use super::*;
129 
130     use std::mem;
131     use std::sync::atomic::{AtomicUsize, Ordering};
132     use std::sync::Arc;
133     use std::thread;
134 
135     #[derive(PartialEq, Eq, Debug)]
136     struct NonCopy(u32);
137 
138     #[test]
it_works()139     fn it_works() {
140         let sl = SpinLock::new(NonCopy(13));
141 
142         assert_eq!(*sl.lock(), NonCopy(13));
143     }
144 
145     #[test]
smoke()146     fn smoke() {
147         let sl = SpinLock::new(NonCopy(7));
148 
149         mem::drop(sl.lock());
150         mem::drop(sl.lock());
151     }
152 
153     #[test]
send()154     fn send() {
155         let sl = SpinLock::new(NonCopy(19));
156 
157         thread::spawn(move || {
158             let value = sl.lock();
159             assert_eq!(*value, NonCopy(19));
160         })
161         .join()
162         .unwrap();
163     }
164 
165     #[test]
high_contention()166     fn high_contention() {
167         const THREADS: usize = 23;
168         const ITERATIONS: usize = 101;
169 
170         let mut threads = Vec::with_capacity(THREADS);
171 
172         let sl = Arc::new(SpinLock::new(0usize));
173         for _ in 0..THREADS {
174             let sl2 = sl.clone();
175             threads.push(thread::spawn(move || {
176                 for _ in 0..ITERATIONS {
177                     *sl2.lock() += 1;
178                 }
179             }));
180         }
181 
182         for t in threads.into_iter() {
183             t.join().unwrap();
184         }
185 
186         assert_eq!(*sl.lock(), THREADS * ITERATIONS);
187     }
188 
189     #[test]
get_mut()190     fn get_mut() {
191         let mut sl = SpinLock::new(NonCopy(13));
192         *sl.get_mut() = NonCopy(17);
193 
194         assert_eq!(sl.into_inner(), NonCopy(17));
195     }
196 
197     #[test]
into_inner()198     fn into_inner() {
199         let sl = SpinLock::new(NonCopy(29));
200         assert_eq!(sl.into_inner(), NonCopy(29));
201     }
202 
203     #[test]
into_inner_drop()204     fn into_inner_drop() {
205         struct NeedsDrop(Arc<AtomicUsize>);
206         impl Drop for NeedsDrop {
207             fn drop(&mut self) {
208                 self.0.fetch_add(1, Ordering::AcqRel);
209             }
210         }
211 
212         let value = Arc::new(AtomicUsize::new(0));
213         let needs_drop = SpinLock::new(NeedsDrop(value.clone()));
214         assert_eq!(value.load(Ordering::Acquire), 0);
215 
216         {
217             let inner = needs_drop.into_inner();
218             assert_eq!(inner.0.load(Ordering::Acquire), 0);
219         }
220 
221         assert_eq!(value.load(Ordering::Acquire), 1);
222     }
223 
224     #[test]
arc_nested()225     fn arc_nested() {
226         // Tests nested sltexes and access to underlying data.
227         let sl = SpinLock::new(1);
228         let arc = Arc::new(SpinLock::new(sl));
229         thread::spawn(move || {
230             let nested = arc.lock();
231             let lock2 = nested.lock();
232             assert_eq!(*lock2, 1);
233         })
234         .join()
235         .unwrap();
236     }
237 
238     #[test]
arc_access_in_unwind()239     fn arc_access_in_unwind() {
240         let arc = Arc::new(SpinLock::new(1));
241         let arc2 = arc.clone();
242         thread::spawn(move || {
243             struct Unwinder {
244                 i: Arc<SpinLock<i32>>,
245             }
246             impl Drop for Unwinder {
247                 fn drop(&mut self) {
248                     *self.i.lock() += 1;
249                 }
250             }
251             let _u = Unwinder { i: arc2 };
252             panic!();
253         })
254         .join()
255         .expect_err("thread did not panic");
256         let lock = arc.lock();
257         assert_eq!(*lock, 2);
258     }
259 
260     #[test]
unsized_value()261     fn unsized_value() {
262         let sltex: &SpinLock<[i32]> = &SpinLock::new([1, 2, 3]);
263         {
264             let b = &mut *sltex.lock();
265             b[0] = 4;
266             b[2] = 5;
267         }
268         let expected: &[i32] = &[4, 2, 5];
269         assert_eq!(&*sltex.lock(), expected);
270     }
271 }
272