• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 Amanieu d'Antras
2 //
3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5 // http://opensource.org/licenses/MIT>, at your option. This file may not be
6 // copied, modified, or distributed except according to those terms.
7 
8 use crate::{
9     mutex::{RawMutex, RawMutexFair, RawMutexTimed},
10     GuardNoSend,
11 };
12 use core::{
13     cell::{Cell, UnsafeCell},
14     fmt,
15     marker::PhantomData,
16     mem,
17     num::NonZeroUsize,
18     ops::Deref,
19     sync::atomic::{AtomicUsize, Ordering},
20 };
21 
22 #[cfg(feature = "arc_lock")]
23 use alloc::sync::Arc;
24 #[cfg(feature = "arc_lock")]
25 use core::mem::ManuallyDrop;
26 #[cfg(feature = "arc_lock")]
27 use core::ptr;
28 
29 #[cfg(feature = "owning_ref")]
30 use owning_ref::StableAddress;
31 
32 #[cfg(feature = "serde")]
33 use serde::{Deserialize, Deserializer, Serialize, Serializer};
34 
35 /// Helper trait which returns a non-zero thread ID.
36 ///
37 /// The simplest way to implement this trait is to return the address of a
38 /// thread-local variable.
39 ///
40 /// # Safety
41 ///
42 /// Implementations of this trait must ensure that no two active threads share
43 /// the same thread ID. However the ID of a thread that has exited can be
44 /// re-used since that thread is no longer active.
45 pub unsafe trait GetThreadId {
46     /// Initial value.
47     // A “non-constant” const item is a legacy way to supply an initialized value to downstream
48     // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
49     #[allow(clippy::declare_interior_mutable_const)]
50     const INIT: Self;
51 
52     /// Returns a non-zero thread ID which identifies the current thread of
53     /// execution.
nonzero_thread_id(&self) -> NonZeroUsize54     fn nonzero_thread_id(&self) -> NonZeroUsize;
55 }
56 
57 /// A raw mutex type that wraps another raw mutex to provide reentrancy.
58 ///
59 /// Although this has the same methods as the [`RawMutex`] trait, it does
60 /// not implement it, and should not be used in the same way, since this
61 /// mutex can successfully acquire a lock multiple times in the same thread.
62 /// Only use this when you know you want a raw mutex that can be locked
63 /// reentrantly; you probably want [`ReentrantMutex`] instead.
64 ///
65 /// [`RawMutex`]: trait.RawMutex.html
66 /// [`ReentrantMutex`]: struct.ReentrantMutex.html
67 pub struct RawReentrantMutex<R, G> {
68     owner: AtomicUsize,
69     lock_count: Cell<usize>,
70     mutex: R,
71     get_thread_id: G,
72 }
73 
74 unsafe impl<R: RawMutex + Send, G: GetThreadId + Send> Send for RawReentrantMutex<R, G> {}
75 unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync> Sync for RawReentrantMutex<R, G> {}
76 
77 impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
78     /// Initial value for an unlocked mutex.
79     #[allow(clippy::declare_interior_mutable_const)]
80     pub const INIT: Self = RawReentrantMutex {
81         owner: AtomicUsize::new(0),
82         lock_count: Cell::new(0),
83         mutex: R::INIT,
84         get_thread_id: G::INIT,
85     };
86 
87     #[inline]
lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool88     fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
89         let id = self.get_thread_id.nonzero_thread_id().get();
90         if self.owner.load(Ordering::Relaxed) == id {
91             self.lock_count.set(
92                 self.lock_count
93                     .get()
94                     .checked_add(1)
95                     .expect("ReentrantMutex lock count overflow"),
96             );
97         } else {
98             if !try_lock() {
99                 return false;
100             }
101             self.owner.store(id, Ordering::Relaxed);
102             debug_assert_eq!(self.lock_count.get(), 0);
103             self.lock_count.set(1);
104         }
105         true
106     }
107 
108     /// Acquires this mutex, blocking if it's held by another thread.
109     #[inline]
lock(&self)110     pub fn lock(&self) {
111         self.lock_internal(|| {
112             self.mutex.lock();
113             true
114         });
115     }
116 
117     /// Attempts to acquire this mutex without blocking. Returns `true`
118     /// if the lock was successfully acquired and `false` otherwise.
119     #[inline]
try_lock(&self) -> bool120     pub fn try_lock(&self) -> bool {
121         self.lock_internal(|| self.mutex.try_lock())
122     }
123 
124     /// Unlocks this mutex. The inner mutex may not be unlocked if
125     /// this mutex was acquired previously in the current thread.
126     ///
127     /// # Safety
128     ///
129     /// This method may only be called if the mutex is held by the current thread.
130     #[inline]
unlock(&self)131     pub unsafe fn unlock(&self) {
132         let lock_count = self.lock_count.get() - 1;
133         self.lock_count.set(lock_count);
134         if lock_count == 0 {
135             self.owner.store(0, Ordering::Relaxed);
136             self.mutex.unlock();
137         }
138     }
139 
140     /// Checks whether the mutex is currently locked.
141     #[inline]
is_locked(&self) -> bool142     pub fn is_locked(&self) -> bool {
143         self.mutex.is_locked()
144     }
145 
146     /// Checks whether the mutex is currently held by the current thread.
147     #[inline]
is_owned_by_current_thread(&self) -> bool148     pub fn is_owned_by_current_thread(&self) -> bool {
149         let id = self.get_thread_id.nonzero_thread_id().get();
150         self.owner.load(Ordering::Relaxed) == id
151     }
152 }
153 
154 impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
155     /// Unlocks this mutex using a fair unlock protocol. The inner mutex
156     /// may not be unlocked if this mutex was acquired previously in the
157     /// current thread.
158     ///
159     /// # Safety
160     ///
161     /// This method may only be called if the mutex is held by the current thread.
162     #[inline]
unlock_fair(&self)163     pub unsafe fn unlock_fair(&self) {
164         let lock_count = self.lock_count.get() - 1;
165         self.lock_count.set(lock_count);
166         if lock_count == 0 {
167             self.owner.store(0, Ordering::Relaxed);
168             self.mutex.unlock_fair();
169         }
170     }
171 
172     /// Temporarily yields the mutex to a waiting thread if there is one.
173     ///
174     /// This method is functionally equivalent to calling `unlock_fair` followed
175     /// by `lock`, however it can be much more efficient in the case where there
176     /// are no waiting threads.
177     ///
178     /// # Safety
179     ///
180     /// This method may only be called if the mutex is held by the current thread.
181     #[inline]
bump(&self)182     pub unsafe fn bump(&self) {
183         if self.lock_count.get() == 1 {
184             let id = self.owner.load(Ordering::Relaxed);
185             self.owner.store(0, Ordering::Relaxed);
186             self.mutex.bump();
187             self.owner.store(id, Ordering::Relaxed);
188         }
189     }
190 }
191 
192 impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
193     /// Attempts to acquire this lock until a timeout is reached.
194     #[inline]
try_lock_until(&self, timeout: R::Instant) -> bool195     pub fn try_lock_until(&self, timeout: R::Instant) -> bool {
196         self.lock_internal(|| self.mutex.try_lock_until(timeout))
197     }
198 
199     /// Attempts to acquire this lock until a timeout is reached.
200     #[inline]
try_lock_for(&self, timeout: R::Duration) -> bool201     pub fn try_lock_for(&self, timeout: R::Duration) -> bool {
202         self.lock_internal(|| self.mutex.try_lock_for(timeout))
203     }
204 }
205 
206 /// A mutex which can be recursively locked by a single thread.
207 ///
208 /// This type is identical to `Mutex` except for the following points:
209 ///
210 /// - Locking multiple times from the same thread will work correctly instead of
211 ///   deadlocking.
212 /// - `ReentrantMutexGuard` does not give mutable references to the locked data.
213 ///   Use a `RefCell` if you need this.
214 ///
215 /// See [`Mutex`](struct.Mutex.html) for more details about the underlying mutex
216 /// primitive.
217 pub struct ReentrantMutex<R, G, T: ?Sized> {
218     raw: RawReentrantMutex<R, G>,
219     data: UnsafeCell<T>,
220 }
221 
222 unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
223     for ReentrantMutex<R, G, T>
224 {
225 }
226 unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
227     for ReentrantMutex<R, G, T>
228 {
229 }
230 
231 impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
232     /// Creates a new reentrant mutex in an unlocked state ready for use.
233     #[cfg(feature = "nightly")]
234     #[inline]
new(val: T) -> ReentrantMutex<R, G, T>235     pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
236         ReentrantMutex {
237             data: UnsafeCell::new(val),
238             raw: RawReentrantMutex {
239                 owner: AtomicUsize::new(0),
240                 lock_count: Cell::new(0),
241                 mutex: R::INIT,
242                 get_thread_id: G::INIT,
243             },
244         }
245     }
246 
247     /// Creates a new reentrant mutex in an unlocked state ready for use.
248     #[cfg(not(feature = "nightly"))]
249     #[inline]
new(val: T) -> ReentrantMutex<R, G, T>250     pub fn new(val: T) -> ReentrantMutex<R, G, T> {
251         ReentrantMutex {
252             data: UnsafeCell::new(val),
253             raw: RawReentrantMutex {
254                 owner: AtomicUsize::new(0),
255                 lock_count: Cell::new(0),
256                 mutex: R::INIT,
257                 get_thread_id: G::INIT,
258             },
259         }
260     }
261 
262     /// Consumes this mutex, returning the underlying data.
263     #[inline]
into_inner(self) -> T264     pub fn into_inner(self) -> T {
265         self.data.into_inner()
266     }
267 }
268 
269 impl<R, G, T> ReentrantMutex<R, G, T> {
270     /// Creates a new reentrant mutex based on a pre-existing raw mutex and a
271     /// helper to get the thread ID.
272     ///
273     /// This allows creating a reentrant mutex in a constant context on stable
274     /// Rust.
275     #[inline]
const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T>276     pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> {
277         ReentrantMutex {
278             data: UnsafeCell::new(val),
279             raw: RawReentrantMutex {
280                 owner: AtomicUsize::new(0),
281                 lock_count: Cell::new(0),
282                 mutex: raw_mutex,
283                 get_thread_id,
284             },
285         }
286     }
287 }
288 
289 impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
290     /// # Safety
291     ///
292     /// The lock must be held when calling this method.
293     #[inline]
guard(&self) -> ReentrantMutexGuard<'_, R, G, T>294     unsafe fn guard(&self) -> ReentrantMutexGuard<'_, R, G, T> {
295         ReentrantMutexGuard {
296             remutex: &self,
297             marker: PhantomData,
298         }
299     }
300 
301     /// Acquires a reentrant mutex, blocking the current thread until it is able
302     /// to do so.
303     ///
304     /// If the mutex is held by another thread then this function will block the
305     /// local thread until it is available to acquire the mutex. If the mutex is
306     /// already held by the current thread then this function will increment the
307     /// lock reference count and return immediately. Upon returning,
308     /// the thread is the only thread with the mutex held. An RAII guard is
309     /// returned to allow scoped unlock of the lock. When the guard goes out of
310     /// scope, the mutex will be unlocked.
311     #[inline]
lock(&self) -> ReentrantMutexGuard<'_, R, G, T>312     pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> {
313         self.raw.lock();
314         // SAFETY: The lock is held, as required.
315         unsafe { self.guard() }
316     }
317 
318     /// Attempts to acquire this lock.
319     ///
320     /// If the lock could not be acquired at this time, then `None` is returned.
321     /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
322     /// guard is dropped.
323     ///
324     /// This function does not block.
325     #[inline]
try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>>326     pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
327         if self.raw.try_lock() {
328             // SAFETY: The lock is held, as required.
329             Some(unsafe { self.guard() })
330         } else {
331             None
332         }
333     }
334 
335     /// Returns a mutable reference to the underlying data.
336     ///
337     /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to
338     /// take place---the mutable borrow statically guarantees no locks exist.
339     #[inline]
get_mut(&mut self) -> &mut T340     pub fn get_mut(&mut self) -> &mut T {
341         unsafe { &mut *self.data.get() }
342     }
343 
344     /// Checks whether the mutex is currently locked.
345     #[inline]
is_locked(&self) -> bool346     pub fn is_locked(&self) -> bool {
347         self.raw.is_locked()
348     }
349 
350     /// Checks whether the mutex is currently held by the current thread.
351     #[inline]
is_owned_by_current_thread(&self) -> bool352     pub fn is_owned_by_current_thread(&self) -> bool {
353         self.raw.is_owned_by_current_thread()
354     }
355 
356     /// Forcibly unlocks the mutex.
357     ///
358     /// This is useful when combined with `mem::forget` to hold a lock without
359     /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
360     /// dealing with FFI.
361     ///
362     /// # Safety
363     ///
364     /// This method must only be called if the current thread logically owns a
365     /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
366     /// Behavior is undefined if a mutex is unlocked when not locked.
367     #[inline]
force_unlock(&self)368     pub unsafe fn force_unlock(&self) {
369         self.raw.unlock();
370     }
371 
372     /// Returns the underlying raw mutex object.
373     ///
374     /// Note that you will most likely need to import the `RawMutex` trait from
375     /// `lock_api` to be able to call functions on the raw mutex.
376     ///
377     /// # Safety
378     ///
379     /// This method is unsafe because it allows unlocking a mutex while
380     /// still holding a reference to a `ReentrantMutexGuard`.
381     #[inline]
raw(&self) -> &R382     pub unsafe fn raw(&self) -> &R {
383         &self.raw.mutex
384     }
385 
386     /// Returns a raw pointer to the underlying data.
387     ///
388     /// This is useful when combined with `mem::forget` to hold a lock without
389     /// the need to maintain a `ReentrantMutexGuard` object alive, for example
390     /// when dealing with FFI.
391     ///
392     /// # Safety
393     ///
394     /// You must ensure that there are no data races when dereferencing the
395     /// returned pointer, for example if the current thread logically owns a
396     /// `ReentrantMutexGuard` but that guard has been discarded using
397     /// `mem::forget`.
398     #[inline]
data_ptr(&self) -> *mut T399     pub fn data_ptr(&self) -> *mut T {
400         self.data.get()
401     }
402 
403     /// # Safety
404     ///
405     /// The lock must be held before calling this method.
406     #[cfg(feature = "arc_lock")]
407     #[inline]
guard_arc(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T>408     unsafe fn guard_arc(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> {
409         ArcReentrantMutexGuard {
410             remutex: self.clone(),
411             marker: PhantomData,
412         }
413     }
414 
415     /// Acquires a reentrant mutex through an `Arc`.
416     ///
417     /// This method is similar to the `lock` method; however, it requires the `ReentrantMutex` to be inside of an
418     /// `Arc` and the resulting mutex guard has no lifetime requirements.
419     #[cfg(feature = "arc_lock")]
420     #[inline]
lock_arc(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T>421     pub fn lock_arc(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> {
422         self.raw.lock();
423         // SAFETY: locking guarantee is upheld
424         unsafe { self.guard_arc() }
425     }
426 
427     /// Attempts to acquire a reentrant mutex through an `Arc`.
428     ///
429     /// This method is similar to the `try_lock` method; however, it requires the `ReentrantMutex` to be inside
430     /// of an `Arc` and the resulting mutex guard has no lifetime requirements.
431     #[cfg(feature = "arc_lock")]
432     #[inline]
try_lock_arc(self: &Arc<Self>) -> Option<ArcReentrantMutexGuard<R, G, T>>433     pub fn try_lock_arc(self: &Arc<Self>) -> Option<ArcReentrantMutexGuard<R, G, T>> {
434         if self.raw.try_lock() {
435             // SAFETY: locking guarantee is upheld
436             Some(unsafe { self.guard_arc() })
437         } else {
438             None
439         }
440     }
441 }
442 
443 impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
444     /// Forcibly unlocks the mutex using a fair unlock protocol.
445     ///
446     /// This is useful when combined with `mem::forget` to hold a lock without
447     /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
448     /// dealing with FFI.
449     ///
450     /// # Safety
451     ///
452     /// This method must only be called if the current thread logically owns a
453     /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
454     /// Behavior is undefined if a mutex is unlocked when not locked.
455     #[inline]
force_unlock_fair(&self)456     pub unsafe fn force_unlock_fair(&self) {
457         self.raw.unlock_fair();
458     }
459 }
460 
461 impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
462     /// Attempts to acquire this lock until a timeout is reached.
463     ///
464     /// If the lock could not be acquired before the timeout expired, then
465     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
466     /// be unlocked when the guard is dropped.
467     #[inline]
try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>>468     pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
469         if self.raw.try_lock_for(timeout) {
470             // SAFETY: The lock is held, as required.
471             Some(unsafe { self.guard() })
472         } else {
473             None
474         }
475     }
476 
477     /// Attempts to acquire this lock until a timeout is reached.
478     ///
479     /// If the lock could not be acquired before the timeout expired, then
480     /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
481     /// be unlocked when the guard is dropped.
482     #[inline]
try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>>483     pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
484         if self.raw.try_lock_until(timeout) {
485             // SAFETY: The lock is held, as required.
486             Some(unsafe { self.guard() })
487         } else {
488             None
489         }
490     }
491 
492     /// Attempts to acquire this lock until a timeout is reached, through an `Arc`.
493     ///
494     /// This method is similar to the `try_lock_for` method; however, it requires the `ReentrantMutex` to be
495     /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements.
496     #[cfg(feature = "arc_lock")]
497     #[inline]
try_lock_arc_for( self: &Arc<Self>, timeout: R::Duration, ) -> Option<ArcReentrantMutexGuard<R, G, T>>498     pub fn try_lock_arc_for(
499         self: &Arc<Self>,
500         timeout: R::Duration,
501     ) -> Option<ArcReentrantMutexGuard<R, G, T>> {
502         if self.raw.try_lock_for(timeout) {
503             // SAFETY: locking guarantee is upheld
504             Some(unsafe { self.guard_arc() })
505         } else {
506             None
507         }
508     }
509 
510     /// Attempts to acquire this lock until a timeout is reached, through an `Arc`.
511     ///
512     /// This method is similar to the `try_lock_until` method; however, it requires the `ReentrantMutex` to be
513     /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements.
514     #[cfg(feature = "arc_lock")]
515     #[inline]
try_lock_arc_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcReentrantMutexGuard<R, G, T>>516     pub fn try_lock_arc_until(
517         self: &Arc<Self>,
518         timeout: R::Instant,
519     ) -> Option<ArcReentrantMutexGuard<R, G, T>> {
520         if self.raw.try_lock_until(timeout) {
521             // SAFETY: locking guarantee is upheld
522             Some(unsafe { self.guard_arc() })
523         } else {
524             None
525         }
526     }
527 }
528 
529 impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> {
530     #[inline]
default() -> ReentrantMutex<R, G, T>531     fn default() -> ReentrantMutex<R, G, T> {
532         ReentrantMutex::new(Default::default())
533     }
534 }
535 
536 impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
537     #[inline]
from(t: T) -> ReentrantMutex<R, G, T>538     fn from(t: T) -> ReentrantMutex<R, G, T> {
539         ReentrantMutex::new(t)
540     }
541 }
542 
543 impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result544     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
545         match self.try_lock() {
546             Some(guard) => f
547                 .debug_struct("ReentrantMutex")
548                 .field("data", &&*guard)
549                 .finish(),
550             None => {
551                 struct LockedPlaceholder;
552                 impl fmt::Debug for LockedPlaceholder {
553                     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
554                         f.write_str("<locked>")
555                     }
556                 }
557 
558                 f.debug_struct("ReentrantMutex")
559                     .field("data", &LockedPlaceholder)
560                     .finish()
561             }
562         }
563     }
564 }
565 
566 // Copied and modified from serde
567 #[cfg(feature = "serde")]
568 impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
569 where
570     R: RawMutex,
571     G: GetThreadId,
572     T: Serialize + ?Sized,
573 {
serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer,574     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
575     where
576         S: Serializer,
577     {
578         self.lock().serialize(serializer)
579     }
580 }
581 
582 #[cfg(feature = "serde")]
583 impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
584 where
585     R: RawMutex,
586     G: GetThreadId,
587     T: Deserialize<'de> + ?Sized,
588 {
deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>,589     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
590     where
591         D: Deserializer<'de>,
592     {
593         Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
594     }
595 }
596 
597 /// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
598 /// is dropped (falls out of scope), the lock will be unlocked.
599 ///
600 /// The data protected by the mutex can be accessed through this guard via its
601 /// `Deref` implementation.
602 #[must_use = "if unused the ReentrantMutex will immediately unlock"]
603 pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
604     remutex: &'a ReentrantMutex<R, G, T>,
605     marker: PhantomData<(&'a T, GuardNoSend)>,
606 }
607 
608 unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
609     for ReentrantMutexGuard<'a, R, G, T>
610 {
611 }
612 
613 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
614     /// Returns a reference to the original `ReentrantMutex` object.
remutex(s: &Self) -> &'a ReentrantMutex<R, G, T>615     pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
616         s.remutex
617     }
618 
619     /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
620     ///
621     /// This operation cannot fail as the `ReentrantMutexGuard` passed
622     /// in already locked the mutex.
623     ///
624     /// This is an associated function that needs to be
625     /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
626     /// the same name on the contents of the locked data.
627     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> where F: FnOnce(&T) -> &U,628     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
629     where
630         F: FnOnce(&T) -> &U,
631     {
632         let raw = &s.remutex.raw;
633         let data = f(unsafe { &*s.remutex.data.get() });
634         mem::forget(s);
635         MappedReentrantMutexGuard {
636             raw,
637             data,
638             marker: PhantomData,
639         }
640     }
641 
642     /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
643     /// locked data. The original guard is return if the closure returns `None`.
644     ///
645     /// This operation cannot fail as the `ReentrantMutexGuard` passed
646     /// in already locked the mutex.
647     ///
648     /// This is an associated function that needs to be
649     /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
650     /// the same name on the contents of the locked data.
651     #[inline]
try_map<U: ?Sized, F>( s: Self, f: F, ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,652     pub fn try_map<U: ?Sized, F>(
653         s: Self,
654         f: F,
655     ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
656     where
657         F: FnOnce(&mut T) -> Option<&mut U>,
658     {
659         let raw = &s.remutex.raw;
660         let data = match f(unsafe { &mut *s.remutex.data.get() }) {
661             Some(data) => data,
662             None => return Err(s),
663         };
664         mem::forget(s);
665         Ok(MappedReentrantMutexGuard {
666             raw,
667             data,
668             marker: PhantomData,
669         })
670     }
671 
672     /// Temporarily unlocks the mutex to execute the given function.
673     ///
674     /// This is safe because `&mut` guarantees that there exist no other
675     /// references to the data protected by the mutex.
676     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,677     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
678     where
679         F: FnOnce() -> U,
680     {
681         // Safety: A ReentrantMutexGuard always holds the lock.
682         unsafe {
683             s.remutex.raw.unlock();
684         }
685         defer!(s.remutex.raw.lock());
686         f()
687     }
688 }
689 
690 impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
691     ReentrantMutexGuard<'a, R, G, T>
692 {
693     /// Unlocks the mutex using a fair unlock protocol.
694     ///
695     /// By default, mutexes are unfair and allow the current thread to re-lock
696     /// the mutex before another has the chance to acquire the lock, even if
697     /// that thread has been blocked on the mutex for a long time. This is the
698     /// default because it allows much higher throughput as it avoids forcing a
699     /// context switch on every mutex unlock. This can result in one thread
700     /// acquiring a mutex many more times than other threads.
701     ///
702     /// However in some cases it can be beneficial to ensure fairness by forcing
703     /// the lock to pass on to a waiting thread if there is one. This is done by
704     /// using this method instead of dropping the `ReentrantMutexGuard` normally.
705     #[inline]
unlock_fair(s: Self)706     pub fn unlock_fair(s: Self) {
707         // Safety: A ReentrantMutexGuard always holds the lock
708         unsafe {
709             s.remutex.raw.unlock_fair();
710         }
711         mem::forget(s);
712     }
713 
714     /// Temporarily unlocks the mutex to execute the given function.
715     ///
716     /// The mutex is unlocked a fair unlock protocol.
717     ///
718     /// This is safe because `&mut` guarantees that there exist no other
719     /// references to the data protected by the mutex.
720     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,721     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
722     where
723         F: FnOnce() -> U,
724     {
725         // Safety: A ReentrantMutexGuard always holds the lock
726         unsafe {
727             s.remutex.raw.unlock_fair();
728         }
729         defer!(s.remutex.raw.lock());
730         f()
731     }
732 
733     /// Temporarily yields the mutex to a waiting thread if there is one.
734     ///
735     /// This method is functionally equivalent to calling `unlock_fair` followed
736     /// by `lock`, however it can be much more efficient in the case where there
737     /// are no waiting threads.
738     #[inline]
bump(s: &mut Self)739     pub fn bump(s: &mut Self) {
740         // Safety: A ReentrantMutexGuard always holds the lock
741         unsafe {
742             s.remutex.raw.bump();
743         }
744     }
745 }
746 
747 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
748     for ReentrantMutexGuard<'a, R, G, T>
749 {
750     type Target = T;
751     #[inline]
deref(&self) -> &T752     fn deref(&self) -> &T {
753         unsafe { &*self.remutex.data.get() }
754     }
755 }
756 
757 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
758     for ReentrantMutexGuard<'a, R, G, T>
759 {
760     #[inline]
drop(&mut self)761     fn drop(&mut self) {
762         // Safety: A ReentrantMutexGuard always holds the lock.
763         unsafe {
764             self.remutex.raw.unlock();
765         }
766     }
767 }
768 
769 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
770     for ReentrantMutexGuard<'a, R, G, T>
771 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result772     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
773         fmt::Debug::fmt(&**self, f)
774     }
775 }
776 
777 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
778     for ReentrantMutexGuard<'a, R, G, T>
779 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result780     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
781         (**self).fmt(f)
782     }
783 }
784 
785 #[cfg(feature = "owning_ref")]
786 unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
787     for ReentrantMutexGuard<'a, R, G, T>
788 {
789 }
790 
791 /// An RAII mutex guard returned by the `Arc` locking operations on `ReentrantMutex`.
792 ///
793 /// This is similar to the `ReentrantMutexGuard` struct, except instead of using a reference to unlock the
794 /// `Mutex` it uses an `Arc<ReentrantMutex>`. This has several advantages, most notably that it has an `'static`
795 /// lifetime.
796 #[cfg(feature = "arc_lock")]
797 #[must_use = "if unused the ReentrantMutex will immediately unlock"]
798 pub struct ArcReentrantMutexGuard<R: RawMutex, G: GetThreadId, T: ?Sized> {
799     remutex: Arc<ReentrantMutex<R, G, T>>,
800     marker: PhantomData<GuardNoSend>,
801 }
802 
803 #[cfg(feature = "arc_lock")]
804 impl<R: RawMutex, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> {
805     /// Returns a reference to the `ReentrantMutex` this object is guarding, contained in its `Arc`.
remutex(s: &Self) -> &Arc<ReentrantMutex<R, G, T>>806     pub fn remutex(s: &Self) -> &Arc<ReentrantMutex<R, G, T>> {
807         &s.remutex
808     }
809 
810     /// Temporarily unlocks the mutex to execute the given function.
811     ///
812     /// This is safe because `&mut` guarantees that there exist no other
813     /// references to the data protected by the mutex.
814     #[inline]
unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,815     pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
816     where
817         F: FnOnce() -> U,
818     {
819         // Safety: A ReentrantMutexGuard always holds the lock.
820         unsafe {
821             s.remutex.raw.unlock();
822         }
823         defer!(s.remutex.raw.lock());
824         f()
825     }
826 }
827 
828 #[cfg(feature = "arc_lock")]
829 impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> {
830     /// Unlocks the mutex using a fair unlock protocol.
831     ///
832     /// This is functionally identical to the `unlock_fair` method on [`ReentrantMutexGuard`].
833     #[inline]
unlock_fair(s: Self)834     pub fn unlock_fair(s: Self) {
835         // Safety: A ReentrantMutexGuard always holds the lock
836         unsafe {
837             s.remutex.raw.unlock_fair();
838         }
839 
840         // SAFETY: ensure that the Arc's refcount is decremented
841         let mut s = ManuallyDrop::new(s);
842         unsafe { ptr::drop_in_place(&mut s.remutex) };
843     }
844 
845     /// Temporarily unlocks the mutex to execute the given function.
846     ///
847     /// This is functionally identical to the `unlocked_fair` method on [`ReentrantMutexGuard`].
848     #[inline]
unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,849     pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
850     where
851         F: FnOnce() -> U,
852     {
853         // Safety: A ReentrantMutexGuard always holds the lock
854         unsafe {
855             s.remutex.raw.unlock_fair();
856         }
857         defer!(s.remutex.raw.lock());
858         f()
859     }
860 
861     /// Temporarily yields the mutex to a waiting thread if there is one.
862     ///
863     /// This is functionally equivalent to the `bump` method on [`ReentrantMutexGuard`].
864     #[inline]
bump(s: &mut Self)865     pub fn bump(s: &mut Self) {
866         // Safety: A ReentrantMutexGuard always holds the lock
867         unsafe {
868             s.remutex.raw.bump();
869         }
870     }
871 }
872 
873 #[cfg(feature = "arc_lock")]
874 impl<R: RawMutex, G: GetThreadId, T: ?Sized> Deref for ArcReentrantMutexGuard<R, G, T> {
875     type Target = T;
876     #[inline]
deref(&self) -> &T877     fn deref(&self) -> &T {
878         unsafe { &*self.remutex.data.get() }
879     }
880 }
881 
882 #[cfg(feature = "arc_lock")]
883 impl<R: RawMutex, G: GetThreadId, T: ?Sized> Drop for ArcReentrantMutexGuard<R, G, T> {
884     #[inline]
drop(&mut self)885     fn drop(&mut self) {
886         // Safety: A ReentrantMutexGuard always holds the lock.
887         unsafe {
888             self.remutex.raw.unlock();
889         }
890     }
891 }
892 
893 /// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
894 /// subfield of the protected data.
895 ///
896 /// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
897 /// former doesn't support temporarily unlocking and re-locking, since that
898 /// could introduce soundness issues if the locked object is modified by another
899 /// thread.
900 #[must_use = "if unused the ReentrantMutex will immediately unlock"]
901 pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
902     raw: &'a RawReentrantMutex<R, G>,
903     data: *const T,
904     marker: PhantomData<&'a T>,
905 }
906 
907 unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
908     for MappedReentrantMutexGuard<'a, R, G, T>
909 {
910 }
911 
912 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
913     MappedReentrantMutexGuard<'a, R, G, T>
914 {
915     /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
916     ///
917     /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
918     /// in already locked the mutex.
919     ///
920     /// This is an associated function that needs to be
921     /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
922     /// the same name on the contents of the locked data.
923     #[inline]
map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U> where F: FnOnce(&T) -> &U,924     pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
925     where
926         F: FnOnce(&T) -> &U,
927     {
928         let raw = s.raw;
929         let data = f(unsafe { &*s.data });
930         mem::forget(s);
931         MappedReentrantMutexGuard {
932             raw,
933             data,
934             marker: PhantomData,
935         }
936     }
937 
938     /// Attempts to make  a new `MappedReentrantMutexGuard` for a component of the
939     /// locked data. The original guard is return if the closure returns `None`.
940     ///
941     /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
942     /// in already locked the mutex.
943     ///
944     /// This is an associated function that needs to be
945     /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
946     /// the same name on the contents of the locked data.
947     #[inline]
try_map<U: ?Sized, F>( s: Self, f: F, ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self> where F: FnOnce(&T) -> Option<&U>,948     pub fn try_map<U: ?Sized, F>(
949         s: Self,
950         f: F,
951     ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
952     where
953         F: FnOnce(&T) -> Option<&U>,
954     {
955         let raw = s.raw;
956         let data = match f(unsafe { &*s.data }) {
957             Some(data) => data,
958             None => return Err(s),
959         };
960         mem::forget(s);
961         Ok(MappedReentrantMutexGuard {
962             raw,
963             data,
964             marker: PhantomData,
965         })
966     }
967 }
968 
969 impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
970     MappedReentrantMutexGuard<'a, R, G, T>
971 {
972     /// Unlocks the mutex using a fair unlock protocol.
973     ///
974     /// By default, mutexes are unfair and allow the current thread to re-lock
975     /// the mutex before another has the chance to acquire the lock, even if
976     /// that thread has been blocked on the mutex for a long time. This is the
977     /// default because it allows much higher throughput as it avoids forcing a
978     /// context switch on every mutex unlock. This can result in one thread
979     /// acquiring a mutex many more times than other threads.
980     ///
981     /// However in some cases it can be beneficial to ensure fairness by forcing
982     /// the lock to pass on to a waiting thread if there is one. This is done by
983     /// using this method instead of dropping the `ReentrantMutexGuard` normally.
984     #[inline]
unlock_fair(s: Self)985     pub fn unlock_fair(s: Self) {
986         // Safety: A MappedReentrantMutexGuard always holds the lock
987         unsafe {
988             s.raw.unlock_fair();
989         }
990         mem::forget(s);
991     }
992 }
993 
994 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
995     for MappedReentrantMutexGuard<'a, R, G, T>
996 {
997     type Target = T;
998     #[inline]
deref(&self) -> &T999     fn deref(&self) -> &T {
1000         unsafe { &*self.data }
1001     }
1002 }
1003 
1004 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
1005     for MappedReentrantMutexGuard<'a, R, G, T>
1006 {
1007     #[inline]
drop(&mut self)1008     fn drop(&mut self) {
1009         // Safety: A MappedReentrantMutexGuard always holds the lock.
1010         unsafe {
1011             self.raw.unlock();
1012         }
1013     }
1014 }
1015 
1016 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1017     for MappedReentrantMutexGuard<'a, R, G, T>
1018 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1019     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1020         fmt::Debug::fmt(&**self, f)
1021     }
1022 }
1023 
1024 impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1025     for MappedReentrantMutexGuard<'a, R, G, T>
1026 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1027     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1028         (**self).fmt(f)
1029     }
1030 }
1031 
1032 #[cfg(feature = "owning_ref")]
1033 unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
1034     for MappedReentrantMutexGuard<'a, R, G, T>
1035 {
1036 }
1037