• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::{
6     cell::UnsafeCell,
7     hint,
8     ops::{Deref, DerefMut},
9     sync::atomic::{AtomicBool, Ordering},
10 };
11 
12 const UNLOCKED: bool = false;
13 const LOCKED: bool = true;
14 
15 /// A primitive that provides safe, mutable access to a shared resource.
16 ///
17 /// Unlike `Mutex`, a `SpinLock` will not voluntarily yield its CPU time until the resource is
18 /// available and will instead keep spinning until the resource is acquired. For the vast majority
19 /// of cases, `Mutex` is a better choice than `SpinLock`. If a `SpinLock` must be used then users
20 /// should try to do as little work as possible while holding the `SpinLock` and avoid any sort of
21 /// blocking at all costs as it can severely penalize performance.
22 ///
23 /// # Poisoning
24 ///
25 /// This `SpinLock` does not implement lock poisoning so it is possible for threads to access
26 /// poisoned data if a thread panics while holding the lock. If lock poisoning is needed, it can be
27 /// implemented by wrapping the `SpinLock` in a new type that implements poisoning. See the
28 /// implementation of `std::sync::Mutex` for an example of how to do this.
29 #[repr(align(128))]
30 pub struct SpinLock<T: ?Sized> {
31     lock: AtomicBool,
32     value: UnsafeCell<T>,
33 }
34 
35 impl<T> SpinLock<T> {
36     /// Creates a new, unlocked `SpinLock` that's ready for use.
new(value: T) -> SpinLock<T>37     pub fn new(value: T) -> SpinLock<T> {
38         SpinLock {
39             lock: AtomicBool::new(UNLOCKED),
40             value: UnsafeCell::new(value),
41         }
42     }
43 
44     /// Consumes the `SpinLock` and returns the value guarded by it. This method doesn't perform any
45     /// locking as the compiler guarantees that there are no references to `self`.
into_inner(self) -> T46     pub fn into_inner(self) -> T {
47         // No need to take the lock because the compiler can statically guarantee
48         // that there are no references to the SpinLock.
49         self.value.into_inner()
50     }
51 }
52 
53 impl<T: ?Sized> SpinLock<T> {
54     /// Acquires exclusive, mutable access to the resource protected by the `SpinLock`, blocking the
55     /// current thread until it is able to do so. Upon returning, the current thread will be the
56     /// only thread with access to the resource. The `SpinLock` will be released when the returned
57     /// `SpinLockGuard` is dropped. Attempting to call `lock` while already holding the `SpinLock`
58     /// will cause a deadlock.
lock(&self) -> SpinLockGuard<T>59     pub fn lock(&self) -> SpinLockGuard<T> {
60         loop {
61             let state = self.lock.load(Ordering::Relaxed);
62             if state == UNLOCKED
63                 && self
64                     .lock
65                     .compare_exchange_weak(UNLOCKED, LOCKED, Ordering::Acquire, Ordering::Relaxed)
66                     .is_ok()
67             {
68                 break;
69             }
70             hint::spin_loop();
71         }
72 
73         SpinLockGuard {
74             lock: self,
75             value: unsafe { &mut *self.value.get() },
76         }
77     }
78 
unlock(&self)79     fn unlock(&self) {
80         // Don't need to compare and swap because we exclusively hold the lock.
81         self.lock.store(UNLOCKED, Ordering::Release);
82     }
83 
84     /// Returns a mutable reference to the contained value. This method doesn't perform any locking
85     /// as the compiler will statically guarantee that there are no other references to `self`.
get_mut(&mut self) -> &mut T86     pub fn get_mut(&mut self) -> &mut T {
87         // Safe because the compiler can statically guarantee that there are no other references to
88         // `self`. This is also why we don't need to acquire the lock.
89         unsafe { &mut *self.value.get() }
90     }
91 }
92 
93 unsafe impl<T: ?Sized + Send> Send for SpinLock<T> {}
94 unsafe impl<T: ?Sized + Send> Sync for SpinLock<T> {}
95 
96 impl<T: ?Sized + Default> Default for SpinLock<T> {
default() -> Self97     fn default() -> Self {
98         Self::new(Default::default())
99     }
100 }
101 
102 impl<T> From<T> for SpinLock<T> {
from(source: T) -> Self103     fn from(source: T) -> Self {
104         Self::new(source)
105     }
106 }
107 
108 /// An RAII implementation of a "scoped lock" for a `SpinLock`. When this structure is dropped, the
109 /// lock will be released. The resource protected by the `SpinLock` can be accessed via the `Deref`
110 /// and `DerefMut` implementations of this structure.
111 pub struct SpinLockGuard<'a, T: 'a + ?Sized> {
112     lock: &'a SpinLock<T>,
113     value: &'a mut T,
114 }
115 
116 impl<'a, T: ?Sized> Deref for SpinLockGuard<'a, T> {
117     type Target = T;
deref(&self) -> &T118     fn deref(&self) -> &T {
119         self.value
120     }
121 }
122 
123 impl<'a, T: ?Sized> DerefMut for SpinLockGuard<'a, T> {
deref_mut(&mut self) -> &mut T124     fn deref_mut(&mut self) -> &mut T {
125         self.value
126     }
127 }
128 
129 impl<'a, T: ?Sized> Drop for SpinLockGuard<'a, T> {
drop(&mut self)130     fn drop(&mut self) {
131         self.lock.unlock();
132     }
133 }
134 
135 #[cfg(test)]
136 mod test {
137     use super::*;
138 
139     use std::{
140         mem,
141         sync::{
142             atomic::{AtomicUsize, Ordering},
143             Arc,
144         },
145         thread,
146     };
147 
148     #[derive(PartialEq, Eq, Debug)]
149     struct NonCopy(u32);
150 
151     #[test]
it_works()152     fn it_works() {
153         let sl = SpinLock::new(NonCopy(13));
154 
155         assert_eq!(*sl.lock(), NonCopy(13));
156     }
157 
158     #[test]
smoke()159     fn smoke() {
160         let sl = SpinLock::new(NonCopy(7));
161 
162         mem::drop(sl.lock());
163         mem::drop(sl.lock());
164     }
165 
166     #[test]
send()167     fn send() {
168         let sl = SpinLock::new(NonCopy(19));
169 
170         thread::spawn(move || {
171             let value = sl.lock();
172             assert_eq!(*value, NonCopy(19));
173         })
174         .join()
175         .unwrap();
176     }
177 
178     #[test]
high_contention()179     fn high_contention() {
180         const THREADS: usize = 23;
181         const ITERATIONS: usize = 101;
182 
183         let mut threads = Vec::with_capacity(THREADS);
184 
185         let sl = Arc::new(SpinLock::new(0usize));
186         for _ in 0..THREADS {
187             let sl2 = sl.clone();
188             threads.push(thread::spawn(move || {
189                 for _ in 0..ITERATIONS {
190                     *sl2.lock() += 1;
191                 }
192             }));
193         }
194 
195         for t in threads.into_iter() {
196             t.join().unwrap();
197         }
198 
199         assert_eq!(*sl.lock(), THREADS * ITERATIONS);
200     }
201 
202     #[test]
get_mut()203     fn get_mut() {
204         let mut sl = SpinLock::new(NonCopy(13));
205         *sl.get_mut() = NonCopy(17);
206 
207         assert_eq!(sl.into_inner(), NonCopy(17));
208     }
209 
210     #[test]
into_inner()211     fn into_inner() {
212         let sl = SpinLock::new(NonCopy(29));
213         assert_eq!(sl.into_inner(), NonCopy(29));
214     }
215 
216     #[test]
into_inner_drop()217     fn into_inner_drop() {
218         struct NeedsDrop(Arc<AtomicUsize>);
219         impl Drop for NeedsDrop {
220             fn drop(&mut self) {
221                 self.0.fetch_add(1, Ordering::AcqRel);
222             }
223         }
224 
225         let value = Arc::new(AtomicUsize::new(0));
226         let needs_drop = SpinLock::new(NeedsDrop(value.clone()));
227         assert_eq!(value.load(Ordering::Acquire), 0);
228 
229         {
230             let inner = needs_drop.into_inner();
231             assert_eq!(inner.0.load(Ordering::Acquire), 0);
232         }
233 
234         assert_eq!(value.load(Ordering::Acquire), 1);
235     }
236 
237     #[test]
arc_nested()238     fn arc_nested() {
239         // Tests nested sltexes and access to underlying data.
240         let sl = SpinLock::new(1);
241         let arc = Arc::new(SpinLock::new(sl));
242         thread::spawn(move || {
243             let nested = arc.lock();
244             let lock2 = nested.lock();
245             assert_eq!(*lock2, 1);
246         })
247         .join()
248         .unwrap();
249     }
250 
251     #[test]
arc_access_in_unwind()252     fn arc_access_in_unwind() {
253         let arc = Arc::new(SpinLock::new(1));
254         let arc2 = arc.clone();
255         thread::spawn(move || {
256             struct Unwinder {
257                 i: Arc<SpinLock<i32>>,
258             }
259             impl Drop for Unwinder {
260                 fn drop(&mut self) {
261                     *self.i.lock() += 1;
262                 }
263             }
264             let _u = Unwinder { i: arc2 };
265             panic!();
266         })
267         .join()
268         .expect_err("thread did not panic");
269         let lock = arc.lock();
270         assert_eq!(*lock, 2);
271     }
272 
273     #[test]
unsized_value()274     fn unsized_value() {
275         let sltex: &SpinLock<[i32]> = &SpinLock::new([1, 2, 3]);
276         {
277             let b = &mut *sltex.lock();
278             b[0] = 4;
279             b[2] = 5;
280         }
281         let expected: &[i32] = &[4, 2, 5];
282         assert_eq!(&*sltex.lock(), expected);
283     }
284 }
285