• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7 
8 use crate::raw_mutex::RawMutex;
9 
10 /// A mutual exclusion primitive useful for protecting shared data
11 ///
12 /// This mutex will block threads waiting for the lock to become available. The
13 /// mutex can be statically initialized or created by the `new`
14 /// constructor. Each mutex has a type parameter which represents the data that
15 /// it is protecting. The data can only be accessed through the RAII guards
16 /// returned from `lock` and `try_lock`, which guarantees that the data is only
17 /// ever accessed when the mutex is locked.
18 ///
19 /// # Fairness
20 ///
21 /// A typical unfair lock can often end up in a situation where a single thread
22 /// quickly acquires and releases the same mutex in succession, which can starve
23 /// other threads waiting to acquire the mutex. While this improves throughput
24 /// because it doesn't force a context switch when a thread tries to re-acquire
25 /// a mutex it has just released, this can starve other threads.
26 ///
27 /// This mutex uses [eventual fairness](https://trac.webkit.org/changeset/203350)
28 /// to ensure that the lock will be fair on average without sacrificing
29 /// throughput. This is done by forcing a fair unlock on average every 0.5ms,
30 /// which will force the lock to go to the next thread waiting for the mutex.
31 ///
32 /// Additionally, any critical section longer than 1ms will always use a fair
33 /// unlock, which has a negligible impact on throughput considering the length
34 /// of the critical section.
35 ///
36 /// You can also force a fair unlock by calling `MutexGuard::unlock_fair` when
37 /// unlocking a mutex instead of simply dropping the `MutexGuard`.
38 ///
39 /// # Differences from the standard library `Mutex`
40 ///
41 /// - No poisoning, the lock is released normally on panic.
42 /// - Only requires 1 byte of space, whereas the standard library boxes the
43 ///   `Mutex` due to platform limitations.
44 /// - Can be statically constructed.
45 /// - Does not require any drop glue when dropped.
46 /// - Inline fast path for the uncontended case.
47 /// - Efficient handling of micro-contention using adaptive spinning.
48 /// - Allows raw locking & unlocking without a guard.
49 /// - Supports eventual fairness so that the mutex is fair on average.
50 /// - Optionally allows making the mutex fair by calling `MutexGuard::unlock_fair`.
51 ///
52 /// # Examples
53 ///
54 /// ```
55 /// use parking_lot::Mutex;
56 /// use std::sync::{Arc, mpsc::channel};
57 /// use std::thread;
58 ///
59 /// const N: usize = 10;
60 ///
61 /// // Spawn a few threads to increment a shared variable (non-atomically), and
62 /// // let the main thread know once all increments are done.
63 /// //
64 /// // Here we're using an Arc to share memory among threads, and the data inside
65 /// // the Arc is protected with a mutex.
66 /// let data = Arc::new(Mutex::new(0));
67 ///
68 /// let (tx, rx) = channel();
69 /// for _ in 0..10 {
70 ///     let (data, tx) = (Arc::clone(&data), tx.clone());
71 ///     thread::spawn(move || {
72 ///         // The shared state can only be accessed once the lock is held.
73 ///         // Our non-atomic increment is safe because we're the only thread
74 ///         // which can access the shared state when the lock is held.
75 ///         let mut data = data.lock();
76 ///         *data += 1;
77 ///         if *data == N {
78 ///             tx.send(()).unwrap();
79 ///         }
80 ///         // the lock is unlocked here when `data` goes out of scope.
81 ///     });
82 /// }
83 ///
84 /// rx.recv().unwrap();
85 /// ```
86 pub type Mutex<T> = lock_api::Mutex<RawMutex, T>;
87 
88 /// Creates a new mutex in an unlocked state ready for use.
89 ///
90 /// This allows creating a mutex in a constant context on stable Rust.
const_mutex<T>(val: T) -> Mutex<T>91 pub const fn const_mutex<T>(val: T) -> Mutex<T> {
92     Mutex::const_new(<RawMutex as lock_api::RawMutex>::INIT, val)
93 }
94 
95 /// An RAII implementation of a "scoped lock" of a mutex. When this structure is
96 /// dropped (falls out of scope), the lock will be unlocked.
97 ///
98 /// The data protected by the mutex can be accessed through this guard via its
99 /// `Deref` and `DerefMut` implementations.
100 pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>;
101 
102 /// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
103 /// subfield of the protected data.
104 ///
105 /// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
106 /// former doesn't support temporarily unlocking and re-locking, since that
107 /// could introduce soundness issues if the locked object is modified by another
108 /// thread.
109 pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
110 
111 #[cfg(test)]
112 mod tests {
113     use crate::{Condvar, Mutex};
114     use std::sync::atomic::{AtomicUsize, Ordering};
115     use std::sync::mpsc::channel;
116     use std::sync::Arc;
117     use std::thread;
118 
119     #[cfg(feature = "serde")]
120     use bincode::{deserialize, serialize};
121 
122     struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
123 
124     #[derive(Eq, PartialEq, Debug)]
125     struct NonCopy(i32);
126 
127     unsafe impl<T: Send> Send for Packet<T> {}
128     unsafe impl<T> Sync for Packet<T> {}
129 
130     #[test]
smoke()131     fn smoke() {
132         let m = Mutex::new(());
133         drop(m.lock());
134         drop(m.lock());
135     }
136 
137     #[test]
lots_and_lots()138     fn lots_and_lots() {
139         const J: u32 = 1000;
140         const K: u32 = 3;
141 
142         let m = Arc::new(Mutex::new(0));
143 
144         fn inc(m: &Mutex<u32>) {
145             for _ in 0..J {
146                 *m.lock() += 1;
147             }
148         }
149 
150         let (tx, rx) = channel();
151         for _ in 0..K {
152             let tx2 = tx.clone();
153             let m2 = m.clone();
154             thread::spawn(move || {
155                 inc(&m2);
156                 tx2.send(()).unwrap();
157             });
158             let tx2 = tx.clone();
159             let m2 = m.clone();
160             thread::spawn(move || {
161                 inc(&m2);
162                 tx2.send(()).unwrap();
163             });
164         }
165 
166         drop(tx);
167         for _ in 0..2 * K {
168             rx.recv().unwrap();
169         }
170         assert_eq!(*m.lock(), J * K * 2);
171     }
172 
173     #[test]
try_lock()174     fn try_lock() {
175         let m = Mutex::new(());
176         *m.try_lock().unwrap() = ();
177     }
178 
179     #[test]
test_into_inner()180     fn test_into_inner() {
181         let m = Mutex::new(NonCopy(10));
182         assert_eq!(m.into_inner(), NonCopy(10));
183     }
184 
185     #[test]
test_into_inner_drop()186     fn test_into_inner_drop() {
187         struct Foo(Arc<AtomicUsize>);
188         impl Drop for Foo {
189             fn drop(&mut self) {
190                 self.0.fetch_add(1, Ordering::SeqCst);
191             }
192         }
193         let num_drops = Arc::new(AtomicUsize::new(0));
194         let m = Mutex::new(Foo(num_drops.clone()));
195         assert_eq!(num_drops.load(Ordering::SeqCst), 0);
196         {
197             let _inner = m.into_inner();
198             assert_eq!(num_drops.load(Ordering::SeqCst), 0);
199         }
200         assert_eq!(num_drops.load(Ordering::SeqCst), 1);
201     }
202 
203     #[test]
test_get_mut()204     fn test_get_mut() {
205         let mut m = Mutex::new(NonCopy(10));
206         *m.get_mut() = NonCopy(20);
207         assert_eq!(m.into_inner(), NonCopy(20));
208     }
209 
210     #[test]
test_mutex_arc_condvar()211     fn test_mutex_arc_condvar() {
212         let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
213         let packet2 = Packet(packet.0.clone());
214         let (tx, rx) = channel();
215         let _t = thread::spawn(move || {
216             // wait until parent gets in
217             rx.recv().unwrap();
218             let (lock, cvar) = &*packet2.0;
219             let mut lock = lock.lock();
220             *lock = true;
221             cvar.notify_one();
222         });
223 
224         let (lock, cvar) = &*packet.0;
225         let mut lock = lock.lock();
226         tx.send(()).unwrap();
227         assert!(!*lock);
228         while !*lock {
229             cvar.wait(&mut lock);
230         }
231     }
232 
233     #[test]
test_mutex_arc_nested()234     fn test_mutex_arc_nested() {
235         // Tests nested mutexes and access
236         // to underlying data.
237         let arc = Arc::new(Mutex::new(1));
238         let arc2 = Arc::new(Mutex::new(arc));
239         let (tx, rx) = channel();
240         let _t = thread::spawn(move || {
241             let lock = arc2.lock();
242             let lock2 = lock.lock();
243             assert_eq!(*lock2, 1);
244             tx.send(()).unwrap();
245         });
246         rx.recv().unwrap();
247     }
248 
249     #[test]
test_mutex_arc_access_in_unwind()250     fn test_mutex_arc_access_in_unwind() {
251         let arc = Arc::new(Mutex::new(1));
252         let arc2 = arc.clone();
253         let _ = thread::spawn(move || {
254             struct Unwinder {
255                 i: Arc<Mutex<i32>>,
256             }
257             impl Drop for Unwinder {
258                 fn drop(&mut self) {
259                     *self.i.lock() += 1;
260                 }
261             }
262             let _u = Unwinder { i: arc2 };
263             panic!();
264         })
265         .join();
266         let lock = arc.lock();
267         assert_eq!(*lock, 2);
268     }
269 
270     #[test]
test_mutex_unsized()271     fn test_mutex_unsized() {
272         let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
273         {
274             let b = &mut *mutex.lock();
275             b[0] = 4;
276             b[2] = 5;
277         }
278         let comp: &[i32] = &[4, 2, 5];
279         assert_eq!(&*mutex.lock(), comp);
280     }
281 
282     #[test]
test_mutexguard_sync()283     fn test_mutexguard_sync() {
284         fn sync<T: Sync>(_: T) {}
285 
286         let mutex = Mutex::new(());
287         sync(mutex.lock());
288     }
289 
290     #[test]
test_mutex_debug()291     fn test_mutex_debug() {
292         let mutex = Mutex::new(vec![0u8, 10]);
293 
294         assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }");
295         let _lock = mutex.lock();
296         assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }");
297     }
298 
299     #[cfg(feature = "serde")]
300     #[test]
test_serde()301     fn test_serde() {
302         let contents: Vec<u8> = vec![0, 1, 2];
303         let mutex = Mutex::new(contents.clone());
304 
305         let serialized = serialize(&mutex).unwrap();
306         let deserialized: Mutex<Vec<u8>> = deserialize(&serialized).unwrap();
307 
308         assert_eq!(*(mutex.lock()), *(deserialized.lock()));
309         assert_eq!(contents, *(deserialized.lock()));
310     }
311 }
312