• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! A naïve spinning mutex.
2 //!
3 //! Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case
4 //! latency is theoretically infinite.
5 
6 use core::{
7     cell::UnsafeCell,
8     fmt,
9     ops::{Deref, DerefMut},
10     sync::atomic::{AtomicBool, Ordering},
11     marker::PhantomData,
12 };
13 use crate::{RelaxStrategy, Spin};
14 
15 /// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data.
16 ///
17 /// # Example
18 ///
19 /// ```
20 /// use spin;
21 ///
22 /// let lock = spin::mutex::SpinMutex::<_>::new(0);
23 ///
24 /// // Modify the data
25 /// *lock.lock() = 2;
26 ///
27 /// // Read the data
28 /// let answer = *lock.lock();
29 /// assert_eq!(answer, 2);
30 /// ```
31 ///
32 /// # Thread safety example
33 ///
34 /// ```
35 /// use spin;
36 /// use std::sync::{Arc, Barrier};
37 ///
38 /// let thread_count = 1000;
39 /// let spin_mutex = Arc::new(spin::mutex::SpinMutex::<_>::new(0));
40 ///
41 /// // We use a barrier to ensure the readout happens after all writing
42 /// let barrier = Arc::new(Barrier::new(thread_count + 1));
43 ///
44 /// for _ in (0..thread_count) {
45 ///     let my_barrier = barrier.clone();
46 ///     let my_lock = spin_mutex.clone();
47 ///     std::thread::spawn(move || {
48 ///         let mut guard = my_lock.lock();
49 ///         *guard += 1;
50 ///
51 ///         // Release the lock to prevent a deadlock
52 ///         drop(guard);
53 ///         my_barrier.wait();
54 ///     });
55 /// }
56 ///
57 /// barrier.wait();
58 ///
59 /// let answer = { *spin_mutex.lock() };
60 /// assert_eq!(answer, thread_count);
61 /// ```
62 pub struct SpinMutex<T: ?Sized, R = Spin> {
63     phantom: PhantomData<R>,
64     pub(crate) lock: AtomicBool,
65     data: UnsafeCell<T>,
66 }
67 
68 /// A guard that provides mutable data access.
69 ///
70 /// When the guard falls out of scope it will release the lock.
71 pub struct SpinMutexGuard<'a, T: ?Sized + 'a> {
72     lock: &'a AtomicBool,
73     data: &'a mut T,
74 }
75 
76 // Same unsafe impls as `std::sync::Mutex`
77 unsafe impl<T: ?Sized + Send> Sync for SpinMutex<T> {}
78 unsafe impl<T: ?Sized + Send> Send for SpinMutex<T> {}
79 
80 impl<T, R> SpinMutex<T, R> {
81     /// Creates a new [`SpinMutex`] wrapping the supplied data.
82     ///
83     /// # Example
84     ///
85     /// ```
86     /// use spin::mutex::SpinMutex;
87     ///
88     /// static MUTEX: SpinMutex<()> = SpinMutex::<_>::new(());
89     ///
90     /// fn demo() {
91     ///     let lock = MUTEX.lock();
92     ///     // do something with lock
93     ///     drop(lock);
94     /// }
95     /// ```
96     #[inline(always)]
new(data: T) -> Self97     pub const fn new(data: T) -> Self {
98         SpinMutex {
99             lock: AtomicBool::new(false),
100             data: UnsafeCell::new(data),
101             phantom: PhantomData,
102         }
103     }
104 
105     /// Consumes this [`SpinMutex`] and unwraps the underlying data.
106     ///
107     /// # Example
108     ///
109     /// ```
110     /// let lock = spin::mutex::SpinMutex::<_>::new(42);
111     /// assert_eq!(42, lock.into_inner());
112     /// ```
113     #[inline(always)]
into_inner(self) -> T114     pub fn into_inner(self) -> T {
115         // We know statically that there are no outstanding references to
116         // `self` so there's no need to lock.
117         let SpinMutex { data, .. } = self;
118         data.into_inner()
119     }
120 
121     /// Returns a mutable pointer to the underlying data.
122     ///
123     /// This is mostly meant to be used for applications which require manual unlocking, but where
124     /// storing both the lock and the pointer to the inner data gets inefficient.
125     ///
126     /// # Example
127     /// ```
128     /// let lock = spin::mutex::SpinMutex::<_>::new(42);
129     ///
130     /// unsafe {
131     ///     core::mem::forget(lock.lock());
132     ///
133     ///     assert_eq!(lock.as_mut_ptr().read(), 42);
134     ///     lock.as_mut_ptr().write(58);
135     ///
136     ///     lock.force_unlock();
137     /// }
138     ///
139     /// assert_eq!(*lock.lock(), 58);
140     ///
141     /// ```
142     #[inline(always)]
as_mut_ptr(&self) -> *mut T143     pub fn as_mut_ptr(&self) -> *mut T {
144         self.data.get()
145     }
146 }
147 
148 impl<T: ?Sized, R: RelaxStrategy> SpinMutex<T, R> {
149     /// Locks the [`SpinMutex`] and returns a guard that permits access to the inner data.
150     ///
151     /// The returned value may be dereferenced for data access
152     /// and the lock will be dropped when the guard falls out of scope.
153     ///
154     /// ```
155     /// let lock = spin::mutex::SpinMutex::<_>::new(0);
156     /// {
157     ///     let mut data = lock.lock();
158     ///     // The lock is now locked and the data can be accessed
159     ///     *data += 1;
160     ///     // The lock is implicitly dropped at the end of the scope
161     /// }
162     /// ```
163     #[inline(always)]
lock(&self) -> SpinMutexGuard<T>164     pub fn lock(&self) -> SpinMutexGuard<T> {
165         // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
166         // when called in a loop.
167         while self.lock.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
168             // Wait until the lock looks unlocked before retrying
169             while self.is_locked() {
170                 R::relax();
171             }
172         }
173 
174         SpinMutexGuard {
175             lock: &self.lock,
176             data: unsafe { &mut *self.data.get() },
177         }
178     }
179 }
180 
181 impl<T: ?Sized, R> SpinMutex<T, R> {
182     /// Returns `true` if the lock is currently held.
183     ///
184     /// # Safety
185     ///
186     /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
187     /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
188     #[inline(always)]
is_locked(&self) -> bool189     pub fn is_locked(&self) -> bool {
190         self.lock.load(Ordering::Relaxed)
191     }
192 
193     /// Force unlock this [`SpinMutex`].
194     ///
195     /// # Safety
196     ///
197     /// This is *extremely* unsafe if the lock is not held by the current
198     /// thread. However, this can be useful in some instances for exposing the
199     /// lock to FFI that doesn't know how to deal with RAII.
200     #[inline(always)]
force_unlock(&self)201     pub unsafe fn force_unlock(&self) {
202         self.lock.store(false, Ordering::Release);
203     }
204 
205     /// Try to lock this [`SpinMutex`], returning a lock guard if successful.
206     ///
207     /// # Example
208     ///
209     /// ```
210     /// let lock = spin::mutex::SpinMutex::<_>::new(42);
211     ///
212     /// let maybe_guard = lock.try_lock();
213     /// assert!(maybe_guard.is_some());
214     ///
215     /// // `maybe_guard` is still held, so the second call fails
216     /// let maybe_guard2 = lock.try_lock();
217     /// assert!(maybe_guard2.is_none());
218     /// ```
219     #[inline(always)]
try_lock(&self) -> Option<SpinMutexGuard<T>>220     pub fn try_lock(&self) -> Option<SpinMutexGuard<T>> {
221         // The reason for using a strong compare_exchange is explained here:
222         // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
223         if self.lock.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_ok() {
224             Some(SpinMutexGuard {
225                 lock: &self.lock,
226                 data: unsafe { &mut *self.data.get() },
227             })
228         } else {
229             None
230         }
231     }
232 
233     /// Returns a mutable reference to the underlying data.
234     ///
235     /// Since this call borrows the [`SpinMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
236     /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
237     /// such, this is a 'zero-cost' operation.
238     ///
239     /// # Example
240     ///
241     /// ```
242     /// let mut lock = spin::mutex::SpinMutex::<_>::new(0);
243     /// *lock.get_mut() = 10;
244     /// assert_eq!(*lock.lock(), 10);
245     /// ```
246     #[inline(always)]
get_mut(&mut self) -> &mut T247     pub fn get_mut(&mut self) -> &mut T {
248         // We know statically that there are no other references to `self`, so
249         // there's no need to lock the inner mutex.
250         unsafe { &mut *self.data.get() }
251     }
252 }
253 
254 impl<T: ?Sized + fmt::Debug, R> fmt::Debug for SpinMutex<T, R> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result255     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
256         match self.try_lock() {
257             Some(guard) => write!(f, "Mutex {{ data: ")
258                 .and_then(|()| (&*guard).fmt(f))
259                 .and_then(|()| write!(f, "}}")),
260             None => write!(f, "Mutex {{ <locked> }}"),
261         }
262     }
263 }
264 
265 impl<T: ?Sized + Default, R> Default for SpinMutex<T, R> {
default() -> Self266     fn default() -> Self {
267         Self::new(Default::default())
268     }
269 }
270 
271 impl<T, R> From<T> for SpinMutex<T, R> {
from(data: T) -> Self272     fn from(data: T) -> Self {
273         Self::new(data)
274     }
275 }
276 
277 impl<'a, T: ?Sized> SpinMutexGuard<'a, T> {
278     /// Leak the lock guard, yielding a mutable reference to the underlying data.
279     ///
280     /// Note that this function will permanently lock the original [`SpinMutex`].
281     ///
282     /// ```
283     /// let mylock = spin::mutex::SpinMutex::<_>::new(0);
284     ///
285     /// let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock());
286     ///
287     /// *data = 1;
288     /// assert_eq!(*data, 1);
289     /// ```
290     #[inline(always)]
leak(this: Self) -> &'a mut T291     pub fn leak(this: Self) -> &'a mut T {
292         let data = this.data as *mut _; // Keep it in pointer form temporarily to avoid double-aliasing
293         core::mem::forget(this);
294         unsafe { &mut *data }
295     }
296 }
297 
298 impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for SpinMutexGuard<'a, T> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result299     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
300         fmt::Debug::fmt(&**self, f)
301     }
302 }
303 
304 impl<'a, T: ?Sized + fmt::Display> fmt::Display for SpinMutexGuard<'a, T> {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result305     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
306         fmt::Display::fmt(&**self, f)
307     }
308 }
309 
310 impl<'a, T: ?Sized> Deref for SpinMutexGuard<'a, T> {
311     type Target = T;
deref(&self) -> &T312     fn deref(&self) -> &T {
313         self.data
314     }
315 }
316 
317 impl<'a, T: ?Sized> DerefMut for SpinMutexGuard<'a, T> {
deref_mut(&mut self) -> &mut T318     fn deref_mut(&mut self) -> &mut T {
319         self.data
320     }
321 }
322 
323 impl<'a, T: ?Sized> Drop for SpinMutexGuard<'a, T> {
324     /// The dropping of the MutexGuard will release the lock it was created from.
drop(&mut self)325     fn drop(&mut self) {
326         self.lock.store(false, Ordering::Release);
327     }
328 }
329 
330 #[cfg(feature = "lock_api")]
331 unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for SpinMutex<(), R> {
332     type GuardMarker = lock_api_crate::GuardSend;
333 
334     const INIT: Self = Self::new(());
335 
lock(&self)336     fn lock(&self) {
337         // Prevent guard destructor running
338         core::mem::forget(Self::lock(self));
339     }
340 
try_lock(&self) -> bool341     fn try_lock(&self) -> bool {
342         // Prevent guard destructor running
343         Self::try_lock(self).map(core::mem::forget).is_some()
344     }
345 
unlock(&self)346     unsafe fn unlock(&self) {
347         self.force_unlock();
348     }
349 
is_locked(&self) -> bool350     fn is_locked(&self) -> bool {
351         Self::is_locked(self)
352     }
353 }
354 
355 #[cfg(test)]
356 mod tests {
357     use std::prelude::v1::*;
358 
359     use std::sync::atomic::{AtomicUsize, Ordering};
360     use std::sync::mpsc::channel;
361     use std::sync::Arc;
362     use std::thread;
363 
364     type SpinMutex<T> = super::SpinMutex<T>;
365 
366     #[derive(Eq, PartialEq, Debug)]
367     struct NonCopy(i32);
368 
369     #[test]
smoke()370     fn smoke() {
371         let m = SpinMutex::<_>::new(());
372         drop(m.lock());
373         drop(m.lock());
374     }
375 
376     #[test]
lots_and_lots()377     fn lots_and_lots() {
378         static M: SpinMutex<()> = SpinMutex::<_>::new(());
379         static mut CNT: u32 = 0;
380         const J: u32 = 1000;
381         const K: u32 = 3;
382 
383         fn inc() {
384             for _ in 0..J {
385                 unsafe {
386                     let _g = M.lock();
387                     CNT += 1;
388                 }
389             }
390         }
391 
392         let (tx, rx) = channel();
393         for _ in 0..K {
394             let tx2 = tx.clone();
395             thread::spawn(move || {
396                 inc();
397                 tx2.send(()).unwrap();
398             });
399             let tx2 = tx.clone();
400             thread::spawn(move || {
401                 inc();
402                 tx2.send(()).unwrap();
403             });
404         }
405 
406         drop(tx);
407         for _ in 0..2 * K {
408             rx.recv().unwrap();
409         }
410         assert_eq!(unsafe { CNT }, J * K * 2);
411     }
412 
413     #[test]
try_lock()414     fn try_lock() {
415         let mutex = SpinMutex::<_>::new(42);
416 
417         // First lock succeeds
418         let a = mutex.try_lock();
419         assert_eq!(a.as_ref().map(|r| **r), Some(42));
420 
421         // Additional lock failes
422         let b = mutex.try_lock();
423         assert!(b.is_none());
424 
425         // After dropping lock, it succeeds again
426         ::core::mem::drop(a);
427         let c = mutex.try_lock();
428         assert_eq!(c.as_ref().map(|r| **r), Some(42));
429     }
430 
431     #[test]
test_into_inner()432     fn test_into_inner() {
433         let m = SpinMutex::<_>::new(NonCopy(10));
434         assert_eq!(m.into_inner(), NonCopy(10));
435     }
436 
437     #[test]
test_into_inner_drop()438     fn test_into_inner_drop() {
439         struct Foo(Arc<AtomicUsize>);
440         impl Drop for Foo {
441             fn drop(&mut self) {
442                 self.0.fetch_add(1, Ordering::SeqCst);
443             }
444         }
445         let num_drops = Arc::new(AtomicUsize::new(0));
446         let m = SpinMutex::<_>::new(Foo(num_drops.clone()));
447         assert_eq!(num_drops.load(Ordering::SeqCst), 0);
448         {
449             let _inner = m.into_inner();
450             assert_eq!(num_drops.load(Ordering::SeqCst), 0);
451         }
452         assert_eq!(num_drops.load(Ordering::SeqCst), 1);
453     }
454 
455     #[test]
test_mutex_arc_nested()456     fn test_mutex_arc_nested() {
457         // Tests nested mutexes and access
458         // to underlying data.
459         let arc = Arc::new(SpinMutex::<_>::new(1));
460         let arc2 = Arc::new(SpinMutex::<_>::new(arc));
461         let (tx, rx) = channel();
462         let _t = thread::spawn(move || {
463             let lock = arc2.lock();
464             let lock2 = lock.lock();
465             assert_eq!(*lock2, 1);
466             tx.send(()).unwrap();
467         });
468         rx.recv().unwrap();
469     }
470 
471     #[test]
472     #[ignore = "Android uses panic_abort"]
test_mutex_arc_access_in_unwind()473     fn test_mutex_arc_access_in_unwind() {
474         let arc = Arc::new(SpinMutex::<_>::new(1));
475         let arc2 = arc.clone();
476         let _ = thread::spawn(move || -> () {
477             struct Unwinder {
478                 i: Arc<SpinMutex<i32>>,
479             }
480             impl Drop for Unwinder {
481                 fn drop(&mut self) {
482                     *self.i.lock() += 1;
483                 }
484             }
485             let _u = Unwinder { i: arc2 };
486             panic!();
487         })
488         .join();
489         let lock = arc.lock();
490         assert_eq!(*lock, 2);
491     }
492 
493     #[test]
test_mutex_unsized()494     fn test_mutex_unsized() {
495         let mutex: &SpinMutex<[i32]> = &SpinMutex::<_>::new([1, 2, 3]);
496         {
497             let b = &mut *mutex.lock();
498             b[0] = 4;
499             b[2] = 5;
500         }
501         let comp: &[i32] = &[4, 2, 5];
502         assert_eq!(&*mutex.lock(), comp);
503     }
504 
505     #[test]
test_mutex_force_lock()506     fn test_mutex_force_lock() {
507         let lock = SpinMutex::<_>::new(());
508         ::std::mem::forget(lock.lock());
509         unsafe {
510             lock.force_unlock();
511         }
512         assert!(lock.try_lock().is_some());
513     }
514 }
515