Lines Matching full:lock
8 /// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to …
15 /// let lock = spin::mutex::SpinMutex::new(0);
18 /// *lock.lock() = 2;
21 /// let answer = *lock.lock();
41 /// let mut guard = my_lock.lock();
44 /// // Release the lock to prevent a deadlock
52 /// let answer = { *spin_mutex.lock() };
56 pub(crate) lock: AtomicBool, field
62 /// When the guard falls out of scope it will release the lock.
64 lock: &'a AtomicBool, field
83 /// let lock = MUTEX.lock();
84 /// // do something with lock
85 /// drop(lock);
91 lock: AtomicBool::new(false), in new()
101 /// let lock = spin::mutex::SpinMutex::new(42);
102 /// assert_eq!(42, lock.into_inner());
107 // `self` so there's no need to lock. in into_inner()
114 /// Returns `true` if the lock is currently held.
122 self.lock.load(Ordering::Relaxed) in is_locked()
128 /// and the lock will be dropped when the guard falls out of scope.
131 /// let lock = spin::mutex::SpinMutex::new(0);
133 /// let mut data = lock.lock();
134 /// // The lock is now locked and the data can be accessed
136 /// // The lock is implicitly dropped at the end of the scope
140 pub fn lock(&self) -> SpinMutexGuard<T> { in lock() method
141 … // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock` in lock()
143 …while self.lock.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() { in lock()
144 // Wait until the lock looks unlocked before retrying in lock()
151 lock: &self.lock, in lock()
160 /// This is *extremely* unsafe if the lock is not held by the current
162 /// lock to FFI that doesn't know how to deal with RAII.
165 self.lock.store(false, Ordering::Release); in force_unlock()
168 /// Try to lock this [`SpinMutex`], returning a lock guard if successful.
173 /// let lock = spin::mutex::SpinMutex::new(42);
175 /// let maybe_guard = lock.try_lock();
179 /// let maybe_guard2 = lock.try_lock();
186 if self.lock.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_ok() { in try_lock()
188 lock: &self.lock, in try_lock()
205 /// let mut lock = spin::mutex::SpinMutex::new(0);
206 /// *lock.get_mut() = 10;
207 /// assert_eq!(*lock.lock(), 10);
212 // there's no need to lock the inner mutex. in get_mut()
241 /// Leak the lock guard, yielding a mutable reference to the underlying data.
243 /// Note that this function will permanently lock the original [`SpinMutex`].
248 /// let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock());
287 /// The dropping of the MutexGuard will release the lock it was created from.
289 self.lock.store(false, Ordering::Release); in drop()
299 fn lock(&self) { in lock() method
301 core::mem::forget(Self::lock(self)); in lock()
335 drop(m.lock()); in smoke()
336 drop(m.lock()); in smoke()
349 let _g = M.lock(); in lots_and_lots()
380 // First lock succeeds in try_lock()
384 // Additional lock failes in try_lock()
388 // After dropping lock, it succeeds again in try_lock()
426 let lock = arc2.lock(); in test_mutex_arc_nested() localVariable
427 let lock2 = lock.lock(); in test_mutex_arc_nested()
445 *self.i.lock() += 1; in test_mutex_arc_access_in_unwind()
452 let lock = arc.lock(); in test_mutex_arc_access_in_unwind() localVariable
453 assert_eq!(*lock, 2); in test_mutex_arc_access_in_unwind()
460 let b = &mut *mutex.lock(); in test_mutex_unsized()
465 assert_eq!(&*mutex.lock(), comp); in test_mutex_unsized()
470 let lock = SpinMutex::new(()); in test_mutex_force_lock() localVariable
471 ::std::mem::forget(lock.lock()); in test_mutex_force_lock()
473 lock.force_unlock(); in test_mutex_force_lock()
475 assert!(lock.try_lock().is_some()); in test_mutex_force_lock()