1 // Copyright 2016 Amanieu d'Antras 2 // 3 // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or 4 // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or 5 // http://opensource.org/licenses/MIT>, at your option. This file may not be 6 // copied, modified, or distributed except according to those terms. 7 8 use core::cell::UnsafeCell; 9 use core::fmt; 10 use core::marker::PhantomData; 11 use core::mem; 12 use core::ops::{Deref, DerefMut}; 13 14 #[cfg(feature = "arc_lock")] 15 use alloc::sync::Arc; 16 #[cfg(feature = "arc_lock")] 17 use core::mem::ManuallyDrop; 18 #[cfg(feature = "arc_lock")] 19 use core::ptr; 20 21 #[cfg(feature = "owning_ref")] 22 use owning_ref::StableAddress; 23 24 #[cfg(feature = "serde")] 25 use serde::{Deserialize, Deserializer, Serialize, Serializer}; 26 27 /// Basic operations for a reader-writer lock. 28 /// 29 /// Types implementing this trait can be used by `RwLock` to form a safe and 30 /// fully-functioning `RwLock` type. 31 /// 32 /// # Safety 33 /// 34 /// Implementations of this trait must ensure that the `RwLock` is actually 35 /// exclusive: an exclusive lock can't be acquired while an exclusive or shared 36 /// lock exists, and a shared lock can't be acquire while an exclusive lock 37 /// exists. 38 pub unsafe trait RawRwLock { 39 /// Initial value for an unlocked `RwLock`. 40 // A “non-constant” const item is a legacy way to supply an initialized value to downstream 41 // static items. Can hopefully be replaced with `const fn new() -> Self` at some point. 42 #[allow(clippy::declare_interior_mutable_const)] 43 const INIT: Self; 44 45 /// Marker type which determines whether a lock guard should be `Send`. Use 46 /// one of the `GuardSend` or `GuardNoSend` helper types here. 47 type GuardMarker; 48 49 /// Acquires a shared lock, blocking the current thread until it is able to do so. lock_shared(&self)50 fn lock_shared(&self); 51 52 /// Attempts to acquire a shared lock without blocking. try_lock_shared(&self) -> bool53 fn try_lock_shared(&self) -> bool; 54 55 /// Releases a shared lock. 56 /// 57 /// # Safety 58 /// 59 /// This method may only be called if a shared lock is held in the current context. unlock_shared(&self)60 unsafe fn unlock_shared(&self); 61 62 /// Acquires an exclusive lock, blocking the current thread until it is able to do so. lock_exclusive(&self)63 fn lock_exclusive(&self); 64 65 /// Attempts to acquire an exclusive lock without blocking. try_lock_exclusive(&self) -> bool66 fn try_lock_exclusive(&self) -> bool; 67 68 /// Releases an exclusive lock. 69 /// 70 /// # Safety 71 /// 72 /// This method may only be called if an exclusive lock is held in the current context. unlock_exclusive(&self)73 unsafe fn unlock_exclusive(&self); 74 75 /// Checks if this `RwLock` is currently locked in any way. 76 #[inline] is_locked(&self) -> bool77 fn is_locked(&self) -> bool { 78 let acquired_lock = self.try_lock_exclusive(); 79 if acquired_lock { 80 // Safety: A lock was successfully acquired above. 81 unsafe { 82 self.unlock_exclusive(); 83 } 84 } 85 !acquired_lock 86 } 87 88 /// Check if this `RwLock` is currently exclusively locked. is_locked_exclusive(&self) -> bool89 fn is_locked_exclusive(&self) -> bool { 90 let acquired_lock = self.try_lock_shared(); 91 if acquired_lock { 92 // Safety: A shared lock was successfully acquired above. 93 unsafe { 94 self.unlock_shared(); 95 } 96 } 97 !acquired_lock 98 } 99 } 100 101 /// Additional methods for `RwLock`s which support fair unlocking. 102 /// 103 /// Fair unlocking means that a lock is handed directly over to the next waiting 104 /// thread if there is one, without giving other threads the opportunity to 105 /// "steal" the lock in the meantime. This is typically slower than unfair 106 /// unlocking, but may be necessary in certain circumstances. 107 pub unsafe trait RawRwLockFair: RawRwLock { 108 /// Releases a shared lock using a fair unlock protocol. 109 /// 110 /// # Safety 111 /// 112 /// This method may only be called if a shared lock is held in the current context. unlock_shared_fair(&self)113 unsafe fn unlock_shared_fair(&self); 114 115 /// Releases an exclusive lock using a fair unlock protocol. 116 /// 117 /// # Safety 118 /// 119 /// This method may only be called if an exclusive lock is held in the current context. unlock_exclusive_fair(&self)120 unsafe fn unlock_exclusive_fair(&self); 121 122 /// Temporarily yields a shared lock to a waiting thread if there is one. 123 /// 124 /// This method is functionally equivalent to calling `unlock_shared_fair` followed 125 /// by `lock_shared`, however it can be much more efficient in the case where there 126 /// are no waiting threads. 127 /// 128 /// # Safety 129 /// 130 /// This method may only be called if a shared lock is held in the current context. bump_shared(&self)131 unsafe fn bump_shared(&self) { 132 self.unlock_shared_fair(); 133 self.lock_shared(); 134 } 135 136 /// Temporarily yields an exclusive lock to a waiting thread if there is one. 137 /// 138 /// This method is functionally equivalent to calling `unlock_exclusive_fair` followed 139 /// by `lock_exclusive`, however it can be much more efficient in the case where there 140 /// are no waiting threads. 141 /// 142 /// # Safety 143 /// 144 /// This method may only be called if an exclusive lock is held in the current context. bump_exclusive(&self)145 unsafe fn bump_exclusive(&self) { 146 self.unlock_exclusive_fair(); 147 self.lock_exclusive(); 148 } 149 } 150 151 /// Additional methods for `RwLock`s which support atomically downgrading an 152 /// exclusive lock to a shared lock. 153 pub unsafe trait RawRwLockDowngrade: RawRwLock { 154 /// Atomically downgrades an exclusive lock into a shared lock without 155 /// allowing any thread to take an exclusive lock in the meantime. 156 /// 157 /// # Safety 158 /// 159 /// This method may only be called if an exclusive lock is held in the current context. downgrade(&self)160 unsafe fn downgrade(&self); 161 } 162 163 /// Additional methods for `RwLock`s which support locking with timeouts. 164 /// 165 /// The `Duration` and `Instant` types are specified as associated types so that 166 /// this trait is usable even in `no_std` environments. 167 pub unsafe trait RawRwLockTimed: RawRwLock { 168 /// Duration type used for `try_lock_for`. 169 type Duration; 170 171 /// Instant type used for `try_lock_until`. 172 type Instant; 173 174 /// Attempts to acquire a shared lock until a timeout is reached. try_lock_shared_for(&self, timeout: Self::Duration) -> bool175 fn try_lock_shared_for(&self, timeout: Self::Duration) -> bool; 176 177 /// Attempts to acquire a shared lock until a timeout is reached. try_lock_shared_until(&self, timeout: Self::Instant) -> bool178 fn try_lock_shared_until(&self, timeout: Self::Instant) -> bool; 179 180 /// Attempts to acquire an exclusive lock until a timeout is reached. try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool181 fn try_lock_exclusive_for(&self, timeout: Self::Duration) -> bool; 182 183 /// Attempts to acquire an exclusive lock until a timeout is reached. try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool184 fn try_lock_exclusive_until(&self, timeout: Self::Instant) -> bool; 185 } 186 187 /// Additional methods for `RwLock`s which support recursive read locks. 188 /// 189 /// These are guaranteed to succeed without blocking if 190 /// another read lock is held at the time of the call. This allows a thread 191 /// to recursively lock a `RwLock`. However using this method can cause 192 /// writers to starve since readers no longer block if a writer is waiting 193 /// for the lock. 194 pub unsafe trait RawRwLockRecursive: RawRwLock { 195 /// Acquires a shared lock without deadlocking in case of a recursive lock. lock_shared_recursive(&self)196 fn lock_shared_recursive(&self); 197 198 /// Attempts to acquire a shared lock without deadlocking in case of a recursive lock. try_lock_shared_recursive(&self) -> bool199 fn try_lock_shared_recursive(&self) -> bool; 200 } 201 202 /// Additional methods for `RwLock`s which support recursive read locks and timeouts. 203 pub unsafe trait RawRwLockRecursiveTimed: RawRwLockRecursive + RawRwLockTimed { 204 /// Attempts to acquire a shared lock until a timeout is reached, without 205 /// deadlocking in case of a recursive lock. try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool206 fn try_lock_shared_recursive_for(&self, timeout: Self::Duration) -> bool; 207 208 /// Attempts to acquire a shared lock until a timeout is reached, without 209 /// deadlocking in case of a recursive lock. try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool210 fn try_lock_shared_recursive_until(&self, timeout: Self::Instant) -> bool; 211 } 212 213 /// Additional methods for `RwLock`s which support atomically upgrading a shared 214 /// lock to an exclusive lock. 215 /// 216 /// This requires acquiring a special "upgradable read lock" instead of a 217 /// normal shared lock. There may only be one upgradable lock at any time, 218 /// otherwise deadlocks could occur when upgrading. 219 pub unsafe trait RawRwLockUpgrade: RawRwLock { 220 /// Acquires an upgradable lock, blocking the current thread until it is able to do so. lock_upgradable(&self)221 fn lock_upgradable(&self); 222 223 /// Attempts to acquire an upgradable lock without blocking. try_lock_upgradable(&self) -> bool224 fn try_lock_upgradable(&self) -> bool; 225 226 /// Releases an upgradable lock. 227 /// 228 /// # Safety 229 /// 230 /// This method may only be called if an upgradable lock is held in the current context. unlock_upgradable(&self)231 unsafe fn unlock_upgradable(&self); 232 233 /// Upgrades an upgradable lock to an exclusive lock. 234 /// 235 /// # Safety 236 /// 237 /// This method may only be called if an upgradable lock is held in the current context. upgrade(&self)238 unsafe fn upgrade(&self); 239 240 /// Attempts to upgrade an upgradable lock to an exclusive lock without 241 /// blocking. 242 /// 243 /// # Safety 244 /// 245 /// This method may only be called if an upgradable lock is held in the current context. try_upgrade(&self) -> bool246 unsafe fn try_upgrade(&self) -> bool; 247 } 248 249 /// Additional methods for `RwLock`s which support upgradable locks and fair 250 /// unlocking. 251 pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair { 252 /// Releases an upgradable lock using a fair unlock protocol. 253 /// 254 /// # Safety 255 /// 256 /// This method may only be called if an upgradable lock is held in the current context. unlock_upgradable_fair(&self)257 unsafe fn unlock_upgradable_fair(&self); 258 259 /// Temporarily yields an upgradable lock to a waiting thread if there is one. 260 /// 261 /// This method is functionally equivalent to calling `unlock_upgradable_fair` followed 262 /// by `lock_upgradable`, however it can be much more efficient in the case where there 263 /// are no waiting threads. 264 /// 265 /// # Safety 266 /// 267 /// This method may only be called if an upgradable lock is held in the current context. bump_upgradable(&self)268 unsafe fn bump_upgradable(&self) { 269 self.unlock_upgradable_fair(); 270 self.lock_upgradable(); 271 } 272 } 273 274 /// Additional methods for `RwLock`s which support upgradable locks and lock 275 /// downgrading. 276 pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade { 277 /// Downgrades an upgradable lock to a shared lock. 278 /// 279 /// # Safety 280 /// 281 /// This method may only be called if an upgradable lock is held in the current context. downgrade_upgradable(&self)282 unsafe fn downgrade_upgradable(&self); 283 284 /// Downgrades an exclusive lock to an upgradable lock. 285 /// 286 /// # Safety 287 /// 288 /// This method may only be called if an exclusive lock is held in the current context. downgrade_to_upgradable(&self)289 unsafe fn downgrade_to_upgradable(&self); 290 } 291 292 /// Additional methods for `RwLock`s which support upgradable locks and locking 293 /// with timeouts. 294 pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed { 295 /// Attempts to acquire an upgradable lock until a timeout is reached. try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool296 fn try_lock_upgradable_for(&self, timeout: Self::Duration) -> bool; 297 298 /// Attempts to acquire an upgradable lock until a timeout is reached. try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool299 fn try_lock_upgradable_until(&self, timeout: Self::Instant) -> bool; 300 301 /// Attempts to upgrade an upgradable lock to an exclusive lock until a 302 /// timeout is reached. 303 /// 304 /// # Safety 305 /// 306 /// This method may only be called if an upgradable lock is held in the current context. try_upgrade_for(&self, timeout: Self::Duration) -> bool307 unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool; 308 309 /// Attempts to upgrade an upgradable lock to an exclusive lock until a 310 /// timeout is reached. 311 /// 312 /// # Safety 313 /// 314 /// This method may only be called if an upgradable lock is held in the current context. try_upgrade_until(&self, timeout: Self::Instant) -> bool315 unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool; 316 } 317 318 /// A reader-writer lock 319 /// 320 /// This type of lock allows a number of readers or at most one writer at any 321 /// point in time. The write portion of this lock typically allows modification 322 /// of the underlying data (exclusive access) and the read portion of this lock 323 /// typically allows for read-only access (shared access). 324 /// 325 /// The type parameter `T` represents the data that this lock protects. It is 326 /// required that `T` satisfies `Send` to be shared across threads and `Sync` to 327 /// allow concurrent access through readers. The RAII guards returned from the 328 /// locking methods implement `Deref` (and `DerefMut` for the `write` methods) 329 /// to allow access to the contained of the lock. 330 pub struct RwLock<R, T: ?Sized> { 331 raw: R, 332 data: UnsafeCell<T>, 333 } 334 335 // Copied and modified from serde 336 #[cfg(feature = "serde")] 337 impl<R, T> Serialize for RwLock<R, T> 338 where 339 R: RawRwLock, 340 T: Serialize + ?Sized, 341 { serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer,342 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> 343 where 344 S: Serializer, 345 { 346 self.read().serialize(serializer) 347 } 348 } 349 350 #[cfg(feature = "serde")] 351 impl<'de, R, T> Deserialize<'de> for RwLock<R, T> 352 where 353 R: RawRwLock, 354 T: Deserialize<'de> + ?Sized, 355 { deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>,356 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> 357 where 358 D: Deserializer<'de>, 359 { 360 Deserialize::deserialize(deserializer).map(RwLock::new) 361 } 362 } 363 364 unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {} 365 unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {} 366 367 impl<R: RawRwLock, T> RwLock<R, T> { 368 /// Creates a new instance of an `RwLock<T>` which is unlocked. 369 #[cfg(has_const_fn_trait_bound)] 370 #[inline] new(val: T) -> RwLock<R, T>371 pub const fn new(val: T) -> RwLock<R, T> { 372 RwLock { 373 data: UnsafeCell::new(val), 374 raw: R::INIT, 375 } 376 } 377 378 /// Creates a new instance of an `RwLock<T>` which is unlocked. 379 #[cfg(not(has_const_fn_trait_bound))] 380 #[inline] new(val: T) -> RwLock<R, T>381 pub fn new(val: T) -> RwLock<R, T> { 382 RwLock { 383 data: UnsafeCell::new(val), 384 raw: R::INIT, 385 } 386 } 387 388 /// Consumes this `RwLock`, returning the underlying data. 389 #[inline] 390 #[allow(unused_unsafe)] into_inner(self) -> T391 pub fn into_inner(self) -> T { 392 unsafe { self.data.into_inner() } 393 } 394 } 395 396 impl<R, T> RwLock<R, T> { 397 /// Creates a new new instance of an `RwLock<T>` based on a pre-existing 398 /// `RawRwLock<T>`. 399 #[inline] from_raw(raw_rwlock: R, val: T) -> RwLock<R, T>400 pub const fn from_raw(raw_rwlock: R, val: T) -> RwLock<R, T> { 401 RwLock { 402 data: UnsafeCell::new(val), 403 raw: raw_rwlock, 404 } 405 } 406 407 /// Creates a new new instance of an `RwLock<T>` based on a pre-existing 408 /// `RawRwLock<T>`. 409 /// 410 /// This allows creating a `RwLock<T>` in a constant context on stable 411 /// Rust. 412 /// 413 /// This method is a legacy alias for [`from_raw`](Self::from_raw). 414 #[inline] const_new(raw_rwlock: R, val: T) -> RwLock<R, T>415 pub const fn const_new(raw_rwlock: R, val: T) -> RwLock<R, T> { 416 Self::from_raw(raw_rwlock, val) 417 } 418 } 419 420 impl<R: RawRwLock, T: ?Sized> RwLock<R, T> { 421 /// Creates a new `RwLockReadGuard` without checking if the lock is held. 422 /// 423 /// # Safety 424 /// 425 /// This method must only be called if the thread logically holds a read lock. 426 /// 427 /// This function does not increment the read count of the lock. Calling this function when a 428 /// guard has already been produced is undefined behaviour unless the guard was forgotten 429 /// with `mem::forget`. 430 #[inline] make_read_guard_unchecked(&self) -> RwLockReadGuard<'_, R, T>431 pub unsafe fn make_read_guard_unchecked(&self) -> RwLockReadGuard<'_, R, T> { 432 RwLockReadGuard { 433 rwlock: self, 434 marker: PhantomData, 435 } 436 } 437 438 /// Creates a new `RwLockReadGuard` without checking if the lock is held. 439 /// 440 /// # Safety 441 /// 442 /// This method must only be called if the thread logically holds a write lock. 443 /// 444 /// Calling this function when a guard has already been produced is undefined behaviour unless 445 /// the guard was forgotten with `mem::forget`. 446 #[inline] make_write_guard_unchecked(&self) -> RwLockWriteGuard<'_, R, T>447 pub unsafe fn make_write_guard_unchecked(&self) -> RwLockWriteGuard<'_, R, T> { 448 RwLockWriteGuard { 449 rwlock: self, 450 marker: PhantomData, 451 } 452 } 453 454 /// Locks this `RwLock` with shared read access, blocking the current thread 455 /// until it can be acquired. 456 /// 457 /// The calling thread will be blocked until there are no more writers which 458 /// hold the lock. There may be other readers currently inside the lock when 459 /// this method returns. 460 /// 461 /// Note that attempts to recursively acquire a read lock on a `RwLock` when 462 /// the current thread already holds one may result in a deadlock. 463 /// 464 /// Returns an RAII guard which will release this thread's shared access 465 /// once it is dropped. 466 #[inline] read(&self) -> RwLockReadGuard<'_, R, T>467 pub fn read(&self) -> RwLockReadGuard<'_, R, T> { 468 self.raw.lock_shared(); 469 // SAFETY: The lock is held, as required. 470 unsafe { self.make_read_guard_unchecked() } 471 } 472 473 /// Attempts to acquire this `RwLock` with shared read access. 474 /// 475 /// If the access could not be granted at this time, then `None` is returned. 476 /// Otherwise, an RAII guard is returned which will release the shared access 477 /// when it is dropped. 478 /// 479 /// This function does not block. 480 #[inline] try_read(&self) -> Option<RwLockReadGuard<'_, R, T>>481 pub fn try_read(&self) -> Option<RwLockReadGuard<'_, R, T>> { 482 if self.raw.try_lock_shared() { 483 // SAFETY: The lock is held, as required. 484 Some(unsafe { self.make_read_guard_unchecked() }) 485 } else { 486 None 487 } 488 } 489 490 /// Locks this `RwLock` with exclusive write access, blocking the current 491 /// thread until it can be acquired. 492 /// 493 /// This function will not return while other writers or other readers 494 /// currently have access to the lock. 495 /// 496 /// Returns an RAII guard which will drop the write access of this `RwLock` 497 /// when dropped. 498 #[inline] write(&self) -> RwLockWriteGuard<'_, R, T>499 pub fn write(&self) -> RwLockWriteGuard<'_, R, T> { 500 self.raw.lock_exclusive(); 501 // SAFETY: The lock is held, as required. 502 unsafe { self.make_write_guard_unchecked() } 503 } 504 505 /// Attempts to lock this `RwLock` with exclusive write access. 506 /// 507 /// If the lock could not be acquired at this time, then `None` is returned. 508 /// Otherwise, an RAII guard is returned which will release the lock when 509 /// it is dropped. 510 /// 511 /// This function does not block. 512 #[inline] try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>>513 pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>> { 514 if self.raw.try_lock_exclusive() { 515 // SAFETY: The lock is held, as required. 516 Some(unsafe { self.make_write_guard_unchecked() }) 517 } else { 518 None 519 } 520 } 521 522 /// Returns a mutable reference to the underlying data. 523 /// 524 /// Since this call borrows the `RwLock` mutably, no actual locking needs to 525 /// take place---the mutable borrow statically guarantees no locks exist. 526 #[inline] get_mut(&mut self) -> &mut T527 pub fn get_mut(&mut self) -> &mut T { 528 unsafe { &mut *self.data.get() } 529 } 530 531 /// Checks whether this `RwLock` is currently locked in any way. 532 #[inline] is_locked(&self) -> bool533 pub fn is_locked(&self) -> bool { 534 self.raw.is_locked() 535 } 536 537 /// Check if this `RwLock` is currently exclusively locked. 538 #[inline] is_locked_exclusive(&self) -> bool539 pub fn is_locked_exclusive(&self) -> bool { 540 self.raw.is_locked_exclusive() 541 } 542 543 /// Forcibly unlocks a read lock. 544 /// 545 /// This is useful when combined with `mem::forget` to hold a lock without 546 /// the need to maintain a `RwLockReadGuard` object alive, for example when 547 /// dealing with FFI. 548 /// 549 /// # Safety 550 /// 551 /// This method must only be called if the current thread logically owns a 552 /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`. 553 /// Behavior is undefined if a rwlock is read-unlocked when not read-locked. 554 #[inline] force_unlock_read(&self)555 pub unsafe fn force_unlock_read(&self) { 556 self.raw.unlock_shared(); 557 } 558 559 /// Forcibly unlocks a write lock. 560 /// 561 /// This is useful when combined with `mem::forget` to hold a lock without 562 /// the need to maintain a `RwLockWriteGuard` object alive, for example when 563 /// dealing with FFI. 564 /// 565 /// # Safety 566 /// 567 /// This method must only be called if the current thread logically owns a 568 /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`. 569 /// Behavior is undefined if a rwlock is write-unlocked when not write-locked. 570 #[inline] force_unlock_write(&self)571 pub unsafe fn force_unlock_write(&self) { 572 self.raw.unlock_exclusive(); 573 } 574 575 /// Returns the underlying raw reader-writer lock object. 576 /// 577 /// Note that you will most likely need to import the `RawRwLock` trait from 578 /// `lock_api` to be able to call functions on the raw 579 /// reader-writer lock. 580 /// 581 /// # Safety 582 /// 583 /// This method is unsafe because it allows unlocking a mutex while 584 /// still holding a reference to a lock guard. raw(&self) -> &R585 pub unsafe fn raw(&self) -> &R { 586 &self.raw 587 } 588 589 /// Returns a raw pointer to the underlying data. 590 /// 591 /// This is useful when combined with `mem::forget` to hold a lock without 592 /// the need to maintain a `RwLockReadGuard` or `RwLockWriteGuard` object 593 /// alive, for example when dealing with FFI. 594 /// 595 /// # Safety 596 /// 597 /// You must ensure that there are no data races when dereferencing the 598 /// returned pointer, for example if the current thread logically owns a 599 /// `RwLockReadGuard` or `RwLockWriteGuard` but that guard has been discarded 600 /// using `mem::forget`. 601 #[inline] data_ptr(&self) -> *mut T602 pub fn data_ptr(&self) -> *mut T { 603 self.data.get() 604 } 605 606 /// Creates a new `RwLockReadGuard` without checking if the lock is held. 607 /// 608 /// # Safety 609 /// 610 /// This method must only be called if the thread logically holds a read lock. 611 /// 612 /// This function does not increment the read count of the lock. Calling this function when a 613 /// guard has already been produced is undefined behaviour unless the guard was forgotten 614 /// with `mem::forget`.` 615 #[cfg(feature = "arc_lock")] 616 #[inline] make_arc_read_guard_unchecked(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T>617 pub unsafe fn make_arc_read_guard_unchecked(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> { 618 ArcRwLockReadGuard { 619 rwlock: self.clone(), 620 marker: PhantomData, 621 } 622 } 623 624 /// Creates a new `RwLockWriteGuard` without checking if the lock is held. 625 /// 626 /// # Safety 627 /// 628 /// This method must only be called if the thread logically holds a write lock. 629 /// 630 /// Calling this function when a guard has already been produced is undefined behaviour unless 631 /// the guard was forgotten with `mem::forget`. 632 #[cfg(feature = "arc_lock")] 633 #[inline] make_arc_write_guard_unchecked(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T>634 pub unsafe fn make_arc_write_guard_unchecked(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T> { 635 ArcRwLockWriteGuard { 636 rwlock: self.clone(), 637 marker: PhantomData, 638 } 639 } 640 641 /// Locks this `RwLock` with read access, through an `Arc`. 642 /// 643 /// This method is similar to the `read` method; however, it requires the `RwLock` to be inside of an `Arc` 644 /// and the resulting read guard has no lifetime requirements. 645 #[cfg(feature = "arc_lock")] 646 #[inline] read_arc(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T>647 pub fn read_arc(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> { 648 self.raw.lock_shared(); 649 // SAFETY: locking guarantee is upheld 650 unsafe { self.make_arc_read_guard_unchecked() } 651 } 652 653 /// Attempts to lock this `RwLock` with read access, through an `Arc`. 654 /// 655 /// This method is similar to the `try_read` method; however, it requires the `RwLock` to be inside of an 656 /// `Arc` and the resulting read guard has no lifetime requirements. 657 #[cfg(feature = "arc_lock")] 658 #[inline] try_read_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>>659 pub fn try_read_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>> { 660 if self.raw.try_lock_shared() { 661 // SAFETY: locking guarantee is upheld 662 Some(unsafe { self.make_arc_read_guard_unchecked() }) 663 } else { 664 None 665 } 666 } 667 668 /// Locks this `RwLock` with write access, through an `Arc`. 669 /// 670 /// This method is similar to the `write` method; however, it requires the `RwLock` to be inside of an `Arc` 671 /// and the resulting write guard has no lifetime requirements. 672 #[cfg(feature = "arc_lock")] 673 #[inline] write_arc(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T>674 pub fn write_arc(self: &Arc<Self>) -> ArcRwLockWriteGuard<R, T> { 675 self.raw.lock_exclusive(); 676 // SAFETY: locking guarantee is upheld 677 unsafe { self.make_arc_write_guard_unchecked() } 678 } 679 680 /// Attempts to lock this `RwLock` with writ access, through an `Arc`. 681 /// 682 /// This method is similar to the `try_write` method; however, it requires the `RwLock` to be inside of an 683 /// `Arc` and the resulting write guard has no lifetime requirements. 684 #[cfg(feature = "arc_lock")] 685 #[inline] try_write_arc(self: &Arc<Self>) -> Option<ArcRwLockWriteGuard<R, T>>686 pub fn try_write_arc(self: &Arc<Self>) -> Option<ArcRwLockWriteGuard<R, T>> { 687 if self.raw.try_lock_exclusive() { 688 // SAFETY: locking guarantee is upheld 689 Some(unsafe { self.make_arc_write_guard_unchecked() }) 690 } else { 691 None 692 } 693 } 694 } 695 696 impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> { 697 /// Forcibly unlocks a read lock using a fair unlock protocol. 698 /// 699 /// This is useful when combined with `mem::forget` to hold a lock without 700 /// the need to maintain a `RwLockReadGuard` object alive, for example when 701 /// dealing with FFI. 702 /// 703 /// # Safety 704 /// 705 /// This method must only be called if the current thread logically owns a 706 /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`. 707 /// Behavior is undefined if a rwlock is read-unlocked when not read-locked. 708 #[inline] force_unlock_read_fair(&self)709 pub unsafe fn force_unlock_read_fair(&self) { 710 self.raw.unlock_shared_fair(); 711 } 712 713 /// Forcibly unlocks a write lock using a fair unlock protocol. 714 /// 715 /// This is useful when combined with `mem::forget` to hold a lock without 716 /// the need to maintain a `RwLockWriteGuard` object alive, for example when 717 /// dealing with FFI. 718 /// 719 /// # Safety 720 /// 721 /// This method must only be called if the current thread logically owns a 722 /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`. 723 /// Behavior is undefined if a rwlock is write-unlocked when not write-locked. 724 #[inline] force_unlock_write_fair(&self)725 pub unsafe fn force_unlock_write_fair(&self) { 726 self.raw.unlock_exclusive_fair(); 727 } 728 } 729 730 impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> { 731 /// Attempts to acquire this `RwLock` with shared read access until a timeout 732 /// is reached. 733 /// 734 /// If the access could not be granted before the timeout expires, then 735 /// `None` is returned. Otherwise, an RAII guard is returned which will 736 /// release the shared access when it is dropped. 737 #[inline] try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>>738 pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>> { 739 if self.raw.try_lock_shared_for(timeout) { 740 // SAFETY: The lock is held, as required. 741 Some(unsafe { self.make_read_guard_unchecked() }) 742 } else { 743 None 744 } 745 } 746 747 /// Attempts to acquire this `RwLock` with shared read access until a timeout 748 /// is reached. 749 /// 750 /// If the access could not be granted before the timeout expires, then 751 /// `None` is returned. Otherwise, an RAII guard is returned which will 752 /// release the shared access when it is dropped. 753 #[inline] try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>>754 pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>> { 755 if self.raw.try_lock_shared_until(timeout) { 756 // SAFETY: The lock is held, as required. 757 Some(unsafe { self.make_read_guard_unchecked() }) 758 } else { 759 None 760 } 761 } 762 763 /// Attempts to acquire this `RwLock` with exclusive write access until a 764 /// timeout is reached. 765 /// 766 /// If the access could not be granted before the timeout expires, then 767 /// `None` is returned. Otherwise, an RAII guard is returned which will 768 /// release the exclusive access when it is dropped. 769 #[inline] try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>>770 pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>> { 771 if self.raw.try_lock_exclusive_for(timeout) { 772 // SAFETY: The lock is held, as required. 773 Some(unsafe { self.make_write_guard_unchecked() }) 774 } else { 775 None 776 } 777 } 778 779 /// Attempts to acquire this `RwLock` with exclusive write access until a 780 /// timeout is reached. 781 /// 782 /// If the access could not be granted before the timeout expires, then 783 /// `None` is returned. Otherwise, an RAII guard is returned which will 784 /// release the exclusive access when it is dropped. 785 #[inline] try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>>786 pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>> { 787 if self.raw.try_lock_exclusive_until(timeout) { 788 // SAFETY: The lock is held, as required. 789 Some(unsafe { self.make_write_guard_unchecked() }) 790 } else { 791 None 792 } 793 } 794 795 /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. 796 /// 797 /// This method is similar to the `try_read_for` method; however, it requires the `RwLock` to be inside of an 798 /// `Arc` and the resulting read guard has no lifetime requirements. 799 #[cfg(feature = "arc_lock")] 800 #[inline] try_read_arc_for( self: &Arc<Self>, timeout: R::Duration, ) -> Option<ArcRwLockReadGuard<R, T>>801 pub fn try_read_arc_for( 802 self: &Arc<Self>, 803 timeout: R::Duration, 804 ) -> Option<ArcRwLockReadGuard<R, T>> { 805 if self.raw.try_lock_shared_for(timeout) { 806 // SAFETY: locking guarantee is upheld 807 Some(unsafe { self.make_arc_read_guard_unchecked() }) 808 } else { 809 None 810 } 811 } 812 813 /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. 814 /// 815 /// This method is similar to the `try_read_until` method; however, it requires the `RwLock` to be inside of 816 /// an `Arc` and the resulting read guard has no lifetime requirements. 817 #[cfg(feature = "arc_lock")] 818 #[inline] try_read_arc_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcRwLockReadGuard<R, T>>819 pub fn try_read_arc_until( 820 self: &Arc<Self>, 821 timeout: R::Instant, 822 ) -> Option<ArcRwLockReadGuard<R, T>> { 823 if self.raw.try_lock_shared_until(timeout) { 824 // SAFETY: locking guarantee is upheld 825 Some(unsafe { self.make_arc_read_guard_unchecked() }) 826 } else { 827 None 828 } 829 } 830 831 /// Attempts to acquire this `RwLock` with write access until a timeout is reached, through an `Arc`. 832 /// 833 /// This method is similar to the `try_write_for` method; however, it requires the `RwLock` to be inside of 834 /// an `Arc` and the resulting write guard has no lifetime requirements. 835 #[cfg(feature = "arc_lock")] 836 #[inline] try_write_arc_for( self: &Arc<Self>, timeout: R::Duration, ) -> Option<ArcRwLockWriteGuard<R, T>>837 pub fn try_write_arc_for( 838 self: &Arc<Self>, 839 timeout: R::Duration, 840 ) -> Option<ArcRwLockWriteGuard<R, T>> { 841 if self.raw.try_lock_exclusive_for(timeout) { 842 // SAFETY: locking guarantee is upheld 843 Some(unsafe { self.make_arc_write_guard_unchecked() }) 844 } else { 845 None 846 } 847 } 848 849 /// Attempts to acquire this `RwLock` with read access until a timeout is reached, through an `Arc`. 850 /// 851 /// This method is similar to the `try_write_until` method; however, it requires the `RwLock` to be inside of 852 /// an `Arc` and the resulting read guard has no lifetime requirements. 853 #[cfg(feature = "arc_lock")] 854 #[inline] try_write_arc_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcRwLockWriteGuard<R, T>>855 pub fn try_write_arc_until( 856 self: &Arc<Self>, 857 timeout: R::Instant, 858 ) -> Option<ArcRwLockWriteGuard<R, T>> { 859 if self.raw.try_lock_exclusive_until(timeout) { 860 // SAFETY: locking guarantee is upheld 861 Some(unsafe { self.make_arc_write_guard_unchecked() }) 862 } else { 863 None 864 } 865 } 866 } 867 868 impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> { 869 /// Locks this `RwLock` with shared read access, blocking the current thread 870 /// until it can be acquired. 871 /// 872 /// The calling thread will be blocked until there are no more writers which 873 /// hold the lock. There may be other readers currently inside the lock when 874 /// this method returns. 875 /// 876 /// Unlike `read`, this method is guaranteed to succeed without blocking if 877 /// another read lock is held at the time of the call. This allows a thread 878 /// to recursively lock a `RwLock`. However using this method can cause 879 /// writers to starve since readers no longer block if a writer is waiting 880 /// for the lock. 881 /// 882 /// Returns an RAII guard which will release this thread's shared access 883 /// once it is dropped. 884 #[inline] read_recursive(&self) -> RwLockReadGuard<'_, R, T>885 pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> { 886 self.raw.lock_shared_recursive(); 887 // SAFETY: The lock is held, as required. 888 unsafe { self.make_read_guard_unchecked() } 889 } 890 891 /// Attempts to acquire this `RwLock` with shared read access. 892 /// 893 /// If the access could not be granted at this time, then `None` is returned. 894 /// Otherwise, an RAII guard is returned which will release the shared access 895 /// when it is dropped. 896 /// 897 /// This method is guaranteed to succeed if another read lock is held at the 898 /// time of the call. See the documentation for `read_recursive` for details. 899 /// 900 /// This function does not block. 901 #[inline] try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>>902 pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>> { 903 if self.raw.try_lock_shared_recursive() { 904 // SAFETY: The lock is held, as required. 905 Some(unsafe { self.make_read_guard_unchecked() }) 906 } else { 907 None 908 } 909 } 910 911 /// Locks this `RwLock` with shared read access, through an `Arc`. 912 /// 913 /// This method is similar to the `read_recursive` method; however, it requires the `RwLock` to be inside of 914 /// an `Arc` and the resulting read guard has no lifetime requirements. 915 #[cfg(feature = "arc_lock")] 916 #[inline] read_arc_recursive(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T>917 pub fn read_arc_recursive(self: &Arc<Self>) -> ArcRwLockReadGuard<R, T> { 918 self.raw.lock_shared_recursive(); 919 // SAFETY: locking guarantee is upheld 920 unsafe { self.make_arc_read_guard_unchecked() } 921 } 922 923 /// Attempts to lock this `RwLock` with shared read access, through an `Arc`. 924 /// 925 /// This method is similar to the `try_read_recursive` method; however, it requires the `RwLock` to be inside 926 /// of an `Arc` and the resulting read guard has no lifetime requirements. 927 #[cfg(feature = "arc_lock")] 928 #[inline] try_read_recursive_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>>929 pub fn try_read_recursive_arc(self: &Arc<Self>) -> Option<ArcRwLockReadGuard<R, T>> { 930 if self.raw.try_lock_shared_recursive() { 931 // SAFETY: locking guarantee is upheld 932 Some(unsafe { self.make_arc_read_guard_unchecked() }) 933 } else { 934 None 935 } 936 } 937 } 938 939 impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> { 940 /// Attempts to acquire this `RwLock` with shared read access until a timeout 941 /// is reached. 942 /// 943 /// If the access could not be granted before the timeout expires, then 944 /// `None` is returned. Otherwise, an RAII guard is returned which will 945 /// release the shared access when it is dropped. 946 /// 947 /// This method is guaranteed to succeed without blocking if another read 948 /// lock is held at the time of the call. See the documentation for 949 /// `read_recursive` for details. 950 #[inline] try_read_recursive_for( &self, timeout: R::Duration, ) -> Option<RwLockReadGuard<'_, R, T>>951 pub fn try_read_recursive_for( 952 &self, 953 timeout: R::Duration, 954 ) -> Option<RwLockReadGuard<'_, R, T>> { 955 if self.raw.try_lock_shared_recursive_for(timeout) { 956 // SAFETY: The lock is held, as required. 957 Some(unsafe { self.make_read_guard_unchecked() }) 958 } else { 959 None 960 } 961 } 962 963 /// Attempts to acquire this `RwLock` with shared read access until a timeout 964 /// is reached. 965 /// 966 /// If the access could not be granted before the timeout expires, then 967 /// `None` is returned. Otherwise, an RAII guard is returned which will 968 /// release the shared access when it is dropped. 969 #[inline] try_read_recursive_until( &self, timeout: R::Instant, ) -> Option<RwLockReadGuard<'_, R, T>>970 pub fn try_read_recursive_until( 971 &self, 972 timeout: R::Instant, 973 ) -> Option<RwLockReadGuard<'_, R, T>> { 974 if self.raw.try_lock_shared_recursive_until(timeout) { 975 // SAFETY: The lock is held, as required. 976 Some(unsafe { self.make_read_guard_unchecked() }) 977 } else { 978 None 979 } 980 } 981 982 /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`. 983 /// 984 /// This method is similar to the `try_read_recursive_for` method; however, it requires the `RwLock` to be 985 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 986 #[cfg(feature = "arc_lock")] 987 #[inline] try_read_arc_recursive_for( self: &Arc<Self>, timeout: R::Duration, ) -> Option<ArcRwLockReadGuard<R, T>>988 pub fn try_read_arc_recursive_for( 989 self: &Arc<Self>, 990 timeout: R::Duration, 991 ) -> Option<ArcRwLockReadGuard<R, T>> { 992 if self.raw.try_lock_shared_recursive_for(timeout) { 993 // SAFETY: locking guarantee is upheld 994 Some(unsafe { self.make_arc_read_guard_unchecked() }) 995 } else { 996 None 997 } 998 } 999 1000 /// Attempts to lock this `RwLock` with read access until a timeout is reached, through an `Arc`. 1001 /// 1002 /// This method is similar to the `try_read_recursive_until` method; however, it requires the `RwLock` to be 1003 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 1004 #[cfg(feature = "arc_lock")] 1005 #[inline] try_read_arc_recursive_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcRwLockReadGuard<R, T>>1006 pub fn try_read_arc_recursive_until( 1007 self: &Arc<Self>, 1008 timeout: R::Instant, 1009 ) -> Option<ArcRwLockReadGuard<R, T>> { 1010 if self.raw.try_lock_shared_recursive_until(timeout) { 1011 // SAFETY: locking guarantee is upheld 1012 Some(unsafe { self.make_arc_read_guard_unchecked() }) 1013 } else { 1014 None 1015 } 1016 } 1017 } 1018 1019 impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> { 1020 /// Creates a new `RwLockUpgradableReadGuard` without checking if the lock is held. 1021 /// 1022 /// # Safety 1023 /// 1024 /// This method must only be called if the thread logically holds an upgradable read lock. 1025 /// 1026 /// This function does not increment the read count of the lock. Calling this function when a 1027 /// guard has already been produced is undefined behaviour unless the guard was forgotten 1028 /// with `mem::forget`. 1029 #[inline] make_upgradable_guard_unchecked(&self) -> RwLockUpgradableReadGuard<'_, R, T>1030 pub unsafe fn make_upgradable_guard_unchecked(&self) -> RwLockUpgradableReadGuard<'_, R, T> { 1031 RwLockUpgradableReadGuard { 1032 rwlock: self, 1033 marker: PhantomData, 1034 } 1035 } 1036 1037 /// Locks this `RwLock` with upgradable read access, blocking the current thread 1038 /// until it can be acquired. 1039 /// 1040 /// The calling thread will be blocked until there are no more writers or other 1041 /// upgradable reads which hold the lock. There may be other readers currently 1042 /// inside the lock when this method returns. 1043 /// 1044 /// Returns an RAII guard which will release this thread's shared access 1045 /// once it is dropped. 1046 #[inline] upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T>1047 pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> { 1048 self.raw.lock_upgradable(); 1049 // SAFETY: The lock is held, as required. 1050 unsafe { self.make_upgradable_guard_unchecked() } 1051 } 1052 1053 /// Attempts to acquire this `RwLock` with upgradable read access. 1054 /// 1055 /// If the access could not be granted at this time, then `None` is returned. 1056 /// Otherwise, an RAII guard is returned which will release the shared access 1057 /// when it is dropped. 1058 /// 1059 /// This function does not block. 1060 #[inline] try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>>1061 pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>> { 1062 if self.raw.try_lock_upgradable() { 1063 // SAFETY: The lock is held, as required. 1064 Some(unsafe { self.make_upgradable_guard_unchecked() }) 1065 } else { 1066 None 1067 } 1068 } 1069 1070 /// Creates a new `ArcRwLockUpgradableReadGuard` without checking if the lock is held. 1071 /// 1072 /// # Safety 1073 /// 1074 /// This method must only be called if the thread logically holds an upgradable read lock. 1075 /// 1076 /// This function does not increment the read count of the lock. Calling this function when a 1077 /// guard has already been produced is undefined behaviour unless the guard was forgotten 1078 /// with `mem::forget`.` 1079 #[cfg(feature = "arc_lock")] 1080 #[inline] make_upgradable_arc_guard_unchecked( self: &Arc<Self>, ) -> ArcRwLockUpgradableReadGuard<R, T>1081 pub unsafe fn make_upgradable_arc_guard_unchecked( 1082 self: &Arc<Self>, 1083 ) -> ArcRwLockUpgradableReadGuard<R, T> { 1084 ArcRwLockUpgradableReadGuard { 1085 rwlock: self.clone(), 1086 marker: PhantomData, 1087 } 1088 } 1089 1090 /// Locks this `RwLock` with upgradable read access, through an `Arc`. 1091 /// 1092 /// This method is similar to the `upgradable_read` method; however, it requires the `RwLock` to be 1093 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 1094 #[cfg(feature = "arc_lock")] 1095 #[inline] upgradable_read_arc(self: &Arc<Self>) -> ArcRwLockUpgradableReadGuard<R, T>1096 pub fn upgradable_read_arc(self: &Arc<Self>) -> ArcRwLockUpgradableReadGuard<R, T> { 1097 self.raw.lock_upgradable(); 1098 // SAFETY: locking guarantee is upheld 1099 unsafe { self.make_upgradable_arc_guard_unchecked() } 1100 } 1101 1102 /// Attempts to lock this `RwLock` with upgradable read access, through an `Arc`. 1103 /// 1104 /// This method is similar to the `try_upgradable_read` method; however, it requires the `RwLock` to be 1105 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 1106 #[cfg(feature = "arc_lock")] 1107 #[inline] try_upgradable_read_arc(self: &Arc<Self>) -> Option<ArcRwLockUpgradableReadGuard<R, T>>1108 pub fn try_upgradable_read_arc(self: &Arc<Self>) -> Option<ArcRwLockUpgradableReadGuard<R, T>> { 1109 if self.raw.try_lock_upgradable() { 1110 // SAFETY: locking guarantee is upheld 1111 Some(unsafe { self.make_upgradable_arc_guard_unchecked() }) 1112 } else { 1113 None 1114 } 1115 } 1116 } 1117 1118 impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> { 1119 /// Attempts to acquire this `RwLock` with upgradable read access until a timeout 1120 /// is reached. 1121 /// 1122 /// If the access could not be granted before the timeout expires, then 1123 /// `None` is returned. Otherwise, an RAII guard is returned which will 1124 /// release the shared access when it is dropped. 1125 #[inline] try_upgradable_read_for( &self, timeout: R::Duration, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>>1126 pub fn try_upgradable_read_for( 1127 &self, 1128 timeout: R::Duration, 1129 ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> { 1130 if self.raw.try_lock_upgradable_for(timeout) { 1131 // SAFETY: The lock is held, as required. 1132 Some(unsafe { self.make_upgradable_guard_unchecked() }) 1133 } else { 1134 None 1135 } 1136 } 1137 1138 /// Attempts to acquire this `RwLock` with upgradable read access until a timeout 1139 /// is reached. 1140 /// 1141 /// If the access could not be granted before the timeout expires, then 1142 /// `None` is returned. Otherwise, an RAII guard is returned which will 1143 /// release the shared access when it is dropped. 1144 #[inline] try_upgradable_read_until( &self, timeout: R::Instant, ) -> Option<RwLockUpgradableReadGuard<'_, R, T>>1145 pub fn try_upgradable_read_until( 1146 &self, 1147 timeout: R::Instant, 1148 ) -> Option<RwLockUpgradableReadGuard<'_, R, T>> { 1149 if self.raw.try_lock_upgradable_until(timeout) { 1150 // SAFETY: The lock is held, as required. 1151 Some(unsafe { self.make_upgradable_guard_unchecked() }) 1152 } else { 1153 None 1154 } 1155 } 1156 1157 /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`. 1158 /// 1159 /// This method is similar to the `try_upgradable_read_for` method; however, it requires the `RwLock` to be 1160 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 1161 #[cfg(feature = "arc_lock")] 1162 #[inline] try_upgradable_read_arc_for( self: &Arc<Self>, timeout: R::Duration, ) -> Option<ArcRwLockUpgradableReadGuard<R, T>>1163 pub fn try_upgradable_read_arc_for( 1164 self: &Arc<Self>, 1165 timeout: R::Duration, 1166 ) -> Option<ArcRwLockUpgradableReadGuard<R, T>> { 1167 if self.raw.try_lock_upgradable_for(timeout) { 1168 // SAFETY: locking guarantee is upheld 1169 Some(unsafe { self.make_upgradable_arc_guard_unchecked() }) 1170 } else { 1171 None 1172 } 1173 } 1174 1175 /// Attempts to lock this `RwLock` with upgradable access until a timeout is reached, through an `Arc`. 1176 /// 1177 /// This method is similar to the `try_upgradable_read_until` method; however, it requires the `RwLock` to be 1178 /// inside of an `Arc` and the resulting read guard has no lifetime requirements. 1179 #[cfg(feature = "arc_lock")] 1180 #[inline] try_upgradable_read_arc_until( self: &Arc<Self>, timeout: R::Instant, ) -> Option<ArcRwLockUpgradableReadGuard<R, T>>1181 pub fn try_upgradable_read_arc_until( 1182 self: &Arc<Self>, 1183 timeout: R::Instant, 1184 ) -> Option<ArcRwLockUpgradableReadGuard<R, T>> { 1185 if self.raw.try_lock_upgradable_until(timeout) { 1186 // SAFETY: locking guarantee is upheld 1187 Some(unsafe { self.make_upgradable_arc_guard_unchecked() }) 1188 } else { 1189 None 1190 } 1191 } 1192 } 1193 1194 impl<R: RawRwLock, T: ?Sized + Default> Default for RwLock<R, T> { 1195 #[inline] default() -> RwLock<R, T>1196 fn default() -> RwLock<R, T> { 1197 RwLock::new(Default::default()) 1198 } 1199 } 1200 1201 impl<R: RawRwLock, T> From<T> for RwLock<R, T> { 1202 #[inline] from(t: T) -> RwLock<R, T>1203 fn from(t: T) -> RwLock<R, T> { 1204 RwLock::new(t) 1205 } 1206 } 1207 1208 impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1209 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1210 let mut d = f.debug_struct("RwLock"); 1211 match self.try_read() { 1212 Some(guard) => d.field("data", &&*guard), 1213 None => { 1214 // Additional format_args! here is to remove quotes around <locked> in debug output. 1215 d.field("data", &format_args!("<locked>")) 1216 } 1217 }; 1218 d.finish() 1219 } 1220 } 1221 1222 /// RAII structure used to release the shared read access of a lock when 1223 /// dropped. 1224 #[clippy::has_significant_drop] 1225 #[must_use = "if unused the RwLock will immediately unlock"] 1226 pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> { 1227 rwlock: &'a RwLock<R, T>, 1228 marker: PhantomData<(&'a T, R::GuardMarker)>, 1229 } 1230 1231 unsafe impl<R: RawRwLock + Sync, T: Sync + ?Sized> Sync for RwLockReadGuard<'_, R, T> {} 1232 1233 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> { 1234 /// Returns a reference to the original reader-writer lock object. rwlock(s: &Self) -> &'a RwLock<R, T>1235 pub fn rwlock(s: &Self) -> &'a RwLock<R, T> { 1236 s.rwlock 1237 } 1238 1239 /// Make a new `MappedRwLockReadGuard` for a component of the locked data. 1240 /// 1241 /// This operation cannot fail as the `RwLockReadGuard` passed 1242 /// in already locked the data. 1243 /// 1244 /// This is an associated function that needs to be 1245 /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of 1246 /// the same name on the contents of the locked data. 1247 #[inline] map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,1248 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> 1249 where 1250 F: FnOnce(&T) -> &U, 1251 { 1252 let raw = &s.rwlock.raw; 1253 let data = f(unsafe { &*s.rwlock.data.get() }); 1254 mem::forget(s); 1255 MappedRwLockReadGuard { 1256 raw, 1257 data, 1258 marker: PhantomData, 1259 } 1260 } 1261 1262 /// Attempts to make a new `MappedRwLockReadGuard` for a component of the 1263 /// locked data. Returns the original guard if the closure returns `None`. 1264 /// 1265 /// This operation cannot fail as the `RwLockReadGuard` passed 1266 /// in already locked the data. 1267 /// 1268 /// This is an associated function that needs to be 1269 /// used as `RwLockReadGuard::try_map(...)`. A method would interfere with methods of 1270 /// the same name on the contents of the locked data. 1271 #[inline] try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,1272 pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> 1273 where 1274 F: FnOnce(&T) -> Option<&U>, 1275 { 1276 let raw = &s.rwlock.raw; 1277 let data = match f(unsafe { &*s.rwlock.data.get() }) { 1278 Some(data) => data, 1279 None => return Err(s), 1280 }; 1281 mem::forget(s); 1282 Ok(MappedRwLockReadGuard { 1283 raw, 1284 data, 1285 marker: PhantomData, 1286 }) 1287 } 1288 1289 /// Temporarily unlocks the `RwLock` to execute the given function. 1290 /// 1291 /// This is safe because `&mut` guarantees that there exist no other 1292 /// references to the data protected by the `RwLock`. 1293 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1294 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 1295 where 1296 F: FnOnce() -> U, 1297 { 1298 // Safety: An RwLockReadGuard always holds a shared lock. 1299 unsafe { 1300 s.rwlock.raw.unlock_shared(); 1301 } 1302 defer!(s.rwlock.raw.lock_shared()); 1303 f() 1304 } 1305 } 1306 1307 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> { 1308 /// Unlocks the `RwLock` using a fair unlock protocol. 1309 /// 1310 /// By default, `RwLock` is unfair and allow the current thread to re-lock 1311 /// the `RwLock` before another has the chance to acquire the lock, even if 1312 /// that thread has been blocked on the `RwLock` for a long time. This is 1313 /// the default because it allows much higher throughput as it avoids 1314 /// forcing a context switch on every `RwLock` unlock. This can result in one 1315 /// thread acquiring a `RwLock` many more times than other threads. 1316 /// 1317 /// However in some cases it can be beneficial to ensure fairness by forcing 1318 /// the lock to pass on to a waiting thread if there is one. This is done by 1319 /// using this method instead of dropping the `RwLockReadGuard` normally. 1320 #[inline] unlock_fair(s: Self)1321 pub fn unlock_fair(s: Self) { 1322 // Safety: An RwLockReadGuard always holds a shared lock. 1323 unsafe { 1324 s.rwlock.raw.unlock_shared_fair(); 1325 } 1326 mem::forget(s); 1327 } 1328 1329 /// Temporarily unlocks the `RwLock` to execute the given function. 1330 /// 1331 /// The `RwLock` is unlocked a fair unlock protocol. 1332 /// 1333 /// This is safe because `&mut` guarantees that there exist no other 1334 /// references to the data protected by the `RwLock`. 1335 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1336 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 1337 where 1338 F: FnOnce() -> U, 1339 { 1340 // Safety: An RwLockReadGuard always holds a shared lock. 1341 unsafe { 1342 s.rwlock.raw.unlock_shared_fair(); 1343 } 1344 defer!(s.rwlock.raw.lock_shared()); 1345 f() 1346 } 1347 1348 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 1349 /// 1350 /// This method is functionally equivalent to calling `unlock_fair` followed 1351 /// by `read`, however it can be much more efficient in the case where there 1352 /// are no waiting threads. 1353 #[inline] bump(s: &mut Self)1354 pub fn bump(s: &mut Self) { 1355 // Safety: An RwLockReadGuard always holds a shared lock. 1356 unsafe { 1357 s.rwlock.raw.bump_shared(); 1358 } 1359 } 1360 } 1361 1362 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T> { 1363 type Target = T; 1364 #[inline] deref(&self) -> &T1365 fn deref(&self) -> &T { 1366 unsafe { &*self.rwlock.data.get() } 1367 } 1368 } 1369 1370 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> { 1371 #[inline] drop(&mut self)1372 fn drop(&mut self) { 1373 // Safety: An RwLockReadGuard always holds a shared lock. 1374 unsafe { 1375 self.rwlock.raw.unlock_shared(); 1376 } 1377 } 1378 } 1379 1380 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1381 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1382 fmt::Debug::fmt(&**self, f) 1383 } 1384 } 1385 1386 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display 1387 for RwLockReadGuard<'a, R, T> 1388 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1389 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1390 (**self).fmt(f) 1391 } 1392 } 1393 1394 #[cfg(feature = "owning_ref")] 1395 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {} 1396 1397 /// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. 1398 /// 1399 /// This is similar to the `RwLockReadGuard` struct, except instead of using a reference to unlock the `RwLock` 1400 /// it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` lifetime. 1401 #[cfg(feature = "arc_lock")] 1402 #[clippy::has_significant_drop] 1403 #[must_use = "if unused the RwLock will immediately unlock"] 1404 pub struct ArcRwLockReadGuard<R: RawRwLock, T: ?Sized> { 1405 rwlock: Arc<RwLock<R, T>>, 1406 marker: PhantomData<R::GuardMarker>, 1407 } 1408 1409 #[cfg(feature = "arc_lock")] 1410 impl<R: RawRwLock, T: ?Sized> ArcRwLockReadGuard<R, T> { 1411 /// Returns a reference to the rwlock, contained in its `Arc`. rwlock(s: &Self) -> &Arc<RwLock<R, T>>1412 pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> { 1413 &s.rwlock 1414 } 1415 1416 /// Temporarily unlocks the `RwLock` to execute the given function. 1417 /// 1418 /// This is functionally identical to the `unlocked` method on [`RwLockReadGuard`]. 1419 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1420 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 1421 where 1422 F: FnOnce() -> U, 1423 { 1424 // Safety: An RwLockReadGuard always holds a shared lock. 1425 unsafe { 1426 s.rwlock.raw.unlock_shared(); 1427 } 1428 defer!(s.rwlock.raw.lock_shared()); 1429 f() 1430 } 1431 } 1432 1433 #[cfg(feature = "arc_lock")] 1434 impl<R: RawRwLockFair, T: ?Sized> ArcRwLockReadGuard<R, T> { 1435 /// Unlocks the `RwLock` using a fair unlock protocol. 1436 /// 1437 /// This is functionally identical to the `unlock_fair` method on [`RwLockReadGuard`]. 1438 #[inline] unlock_fair(s: Self)1439 pub fn unlock_fair(s: Self) { 1440 // Safety: An RwLockReadGuard always holds a shared lock. 1441 unsafe { 1442 s.rwlock.raw.unlock_shared_fair(); 1443 } 1444 1445 // SAFETY: ensure the Arc has its refcount decremented 1446 let mut s = ManuallyDrop::new(s); 1447 unsafe { ptr::drop_in_place(&mut s.rwlock) }; 1448 } 1449 1450 /// Temporarily unlocks the `RwLock` to execute the given function. 1451 /// 1452 /// This is functionally identical to the `unlocked_fair` method on [`RwLockReadGuard`]. 1453 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1454 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 1455 where 1456 F: FnOnce() -> U, 1457 { 1458 // Safety: An RwLockReadGuard always holds a shared lock. 1459 unsafe { 1460 s.rwlock.raw.unlock_shared_fair(); 1461 } 1462 defer!(s.rwlock.raw.lock_shared()); 1463 f() 1464 } 1465 1466 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 1467 /// 1468 /// This is functionally identical to the `bump` method on [`RwLockReadGuard`]. 1469 #[inline] bump(s: &mut Self)1470 pub fn bump(s: &mut Self) { 1471 // Safety: An RwLockReadGuard always holds a shared lock. 1472 unsafe { 1473 s.rwlock.raw.bump_shared(); 1474 } 1475 } 1476 } 1477 1478 #[cfg(feature = "arc_lock")] 1479 impl<R: RawRwLock, T: ?Sized> Deref for ArcRwLockReadGuard<R, T> { 1480 type Target = T; 1481 #[inline] deref(&self) -> &T1482 fn deref(&self) -> &T { 1483 unsafe { &*self.rwlock.data.get() } 1484 } 1485 } 1486 1487 #[cfg(feature = "arc_lock")] 1488 impl<R: RawRwLock, T: ?Sized> Drop for ArcRwLockReadGuard<R, T> { 1489 #[inline] drop(&mut self)1490 fn drop(&mut self) { 1491 // Safety: An RwLockReadGuard always holds a shared lock. 1492 unsafe { 1493 self.rwlock.raw.unlock_shared(); 1494 } 1495 } 1496 } 1497 1498 #[cfg(feature = "arc_lock")] 1499 impl<R: RawRwLock, T: fmt::Debug + ?Sized> fmt::Debug for ArcRwLockReadGuard<R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1500 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1501 fmt::Debug::fmt(&**self, f) 1502 } 1503 } 1504 1505 #[cfg(feature = "arc_lock")] 1506 impl<R: RawRwLock, T: fmt::Display + ?Sized> fmt::Display for ArcRwLockReadGuard<R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1507 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1508 (**self).fmt(f) 1509 } 1510 } 1511 1512 /// RAII structure used to release the exclusive write access of a lock when 1513 /// dropped. 1514 #[clippy::has_significant_drop] 1515 #[must_use = "if unused the RwLock will immediately unlock"] 1516 pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> { 1517 rwlock: &'a RwLock<R, T>, 1518 marker: PhantomData<(&'a mut T, R::GuardMarker)>, 1519 } 1520 1521 unsafe impl<R: RawRwLock + Sync, T: Sync + ?Sized> Sync for RwLockWriteGuard<'_, R, T> {} 1522 1523 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { 1524 /// Returns a reference to the original reader-writer lock object. rwlock(s: &Self) -> &'a RwLock<R, T>1525 pub fn rwlock(s: &Self) -> &'a RwLock<R, T> { 1526 s.rwlock 1527 } 1528 1529 /// Make a new `MappedRwLockWriteGuard` for a component of the locked data. 1530 /// 1531 /// This operation cannot fail as the `RwLockWriteGuard` passed 1532 /// in already locked the data. 1533 /// 1534 /// This is an associated function that needs to be 1535 /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of 1536 /// the same name on the contents of the locked data. 1537 #[inline] map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,1538 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> 1539 where 1540 F: FnOnce(&mut T) -> &mut U, 1541 { 1542 let raw = &s.rwlock.raw; 1543 let data = f(unsafe { &mut *s.rwlock.data.get() }); 1544 mem::forget(s); 1545 MappedRwLockWriteGuard { 1546 raw, 1547 data, 1548 marker: PhantomData, 1549 } 1550 } 1551 1552 /// Attempts to make a new `MappedRwLockWriteGuard` for a component of the 1553 /// locked data. The original guard is return if the closure returns `None`. 1554 /// 1555 /// This operation cannot fail as the `RwLockWriteGuard` passed 1556 /// in already locked the data. 1557 /// 1558 /// This is an associated function that needs to be 1559 /// used as `RwLockWriteGuard::try_map(...)`. A method would interfere with methods of 1560 /// the same name on the contents of the locked data. 1561 #[inline] try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,1562 pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> 1563 where 1564 F: FnOnce(&mut T) -> Option<&mut U>, 1565 { 1566 let raw = &s.rwlock.raw; 1567 let data = match f(unsafe { &mut *s.rwlock.data.get() }) { 1568 Some(data) => data, 1569 None => return Err(s), 1570 }; 1571 mem::forget(s); 1572 Ok(MappedRwLockWriteGuard { 1573 raw, 1574 data, 1575 marker: PhantomData, 1576 }) 1577 } 1578 1579 /// Temporarily unlocks the `RwLock` to execute the given function. 1580 /// 1581 /// This is safe because `&mut` guarantees that there exist no other 1582 /// references to the data protected by the `RwLock`. 1583 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1584 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 1585 where 1586 F: FnOnce() -> U, 1587 { 1588 // Safety: An RwLockReadGuard always holds a shared lock. 1589 unsafe { 1590 s.rwlock.raw.unlock_exclusive(); 1591 } 1592 defer!(s.rwlock.raw.lock_exclusive()); 1593 f() 1594 } 1595 } 1596 1597 impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { 1598 /// Atomically downgrades a write lock into a read lock without allowing any 1599 /// writers to take exclusive access of the lock in the meantime. 1600 /// 1601 /// Note that if there are any writers currently waiting to take the lock 1602 /// then other readers may not be able to acquire the lock even if it was 1603 /// downgraded. downgrade(s: Self) -> RwLockReadGuard<'a, R, T>1604 pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> { 1605 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1606 unsafe { 1607 s.rwlock.raw.downgrade(); 1608 } 1609 let rwlock = s.rwlock; 1610 mem::forget(s); 1611 RwLockReadGuard { 1612 rwlock, 1613 marker: PhantomData, 1614 } 1615 } 1616 } 1617 1618 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { 1619 /// Atomically downgrades a write lock into an upgradable read lock without allowing any 1620 /// writers to take exclusive access of the lock in the meantime. 1621 /// 1622 /// Note that if there are any writers currently waiting to take the lock 1623 /// then other readers may not be able to acquire the lock even if it was 1624 /// downgraded. downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T>1625 pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> { 1626 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1627 unsafe { 1628 s.rwlock.raw.downgrade_to_upgradable(); 1629 } 1630 let rwlock = s.rwlock; 1631 mem::forget(s); 1632 RwLockUpgradableReadGuard { 1633 rwlock, 1634 marker: PhantomData, 1635 } 1636 } 1637 } 1638 1639 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> { 1640 /// Unlocks the `RwLock` using a fair unlock protocol. 1641 /// 1642 /// By default, `RwLock` is unfair and allow the current thread to re-lock 1643 /// the `RwLock` before another has the chance to acquire the lock, even if 1644 /// that thread has been blocked on the `RwLock` for a long time. This is 1645 /// the default because it allows much higher throughput as it avoids 1646 /// forcing a context switch on every `RwLock` unlock. This can result in one 1647 /// thread acquiring a `RwLock` many more times than other threads. 1648 /// 1649 /// However in some cases it can be beneficial to ensure fairness by forcing 1650 /// the lock to pass on to a waiting thread if there is one. This is done by 1651 /// using this method instead of dropping the `RwLockWriteGuard` normally. 1652 #[inline] unlock_fair(s: Self)1653 pub fn unlock_fair(s: Self) { 1654 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1655 unsafe { 1656 s.rwlock.raw.unlock_exclusive_fair(); 1657 } 1658 mem::forget(s); 1659 } 1660 1661 /// Temporarily unlocks the `RwLock` to execute the given function. 1662 /// 1663 /// The `RwLock` is unlocked a fair unlock protocol. 1664 /// 1665 /// This is safe because `&mut` guarantees that there exist no other 1666 /// references to the data protected by the `RwLock`. 1667 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1668 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 1669 where 1670 F: FnOnce() -> U, 1671 { 1672 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1673 unsafe { 1674 s.rwlock.raw.unlock_exclusive_fair(); 1675 } 1676 defer!(s.rwlock.raw.lock_exclusive()); 1677 f() 1678 } 1679 1680 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 1681 /// 1682 /// This method is functionally equivalent to calling `unlock_fair` followed 1683 /// by `write`, however it can be much more efficient in the case where there 1684 /// are no waiting threads. 1685 #[inline] bump(s: &mut Self)1686 pub fn bump(s: &mut Self) { 1687 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1688 unsafe { 1689 s.rwlock.raw.bump_exclusive(); 1690 } 1691 } 1692 } 1693 1694 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, R, T> { 1695 type Target = T; 1696 #[inline] deref(&self) -> &T1697 fn deref(&self) -> &T { 1698 unsafe { &*self.rwlock.data.get() } 1699 } 1700 } 1701 1702 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R, T> { 1703 #[inline] deref_mut(&mut self) -> &mut T1704 fn deref_mut(&mut self) -> &mut T { 1705 unsafe { &mut *self.rwlock.data.get() } 1706 } 1707 } 1708 1709 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> { 1710 #[inline] drop(&mut self)1711 fn drop(&mut self) { 1712 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1713 unsafe { 1714 self.rwlock.raw.unlock_exclusive(); 1715 } 1716 } 1717 } 1718 1719 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1720 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1721 fmt::Debug::fmt(&**self, f) 1722 } 1723 } 1724 1725 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display 1726 for RwLockWriteGuard<'a, R, T> 1727 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1728 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1729 (**self).fmt(f) 1730 } 1731 } 1732 1733 #[cfg(feature = "owning_ref")] 1734 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {} 1735 1736 /// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. 1737 /// This is similar to the `RwLockWriteGuard` struct, except instead of using a reference to unlock the `RwLock` 1738 /// it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` lifetime. 1739 #[cfg(feature = "arc_lock")] 1740 #[clippy::has_significant_drop] 1741 #[must_use = "if unused the RwLock will immediately unlock"] 1742 pub struct ArcRwLockWriteGuard<R: RawRwLock, T: ?Sized> { 1743 rwlock: Arc<RwLock<R, T>>, 1744 marker: PhantomData<R::GuardMarker>, 1745 } 1746 1747 #[cfg(feature = "arc_lock")] 1748 impl<R: RawRwLock, T: ?Sized> ArcRwLockWriteGuard<R, T> { 1749 /// Returns a reference to the rwlock, contained in its `Arc`. rwlock(s: &Self) -> &Arc<RwLock<R, T>>1750 pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> { 1751 &s.rwlock 1752 } 1753 1754 /// Temporarily unlocks the `RwLock` to execute the given function. 1755 /// 1756 /// This is functionally equivalent to the `unlocked` method on [`RwLockWriteGuard`]. 1757 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1758 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 1759 where 1760 F: FnOnce() -> U, 1761 { 1762 // Safety: An RwLockWriteGuard always holds a shared lock. 1763 unsafe { 1764 s.rwlock.raw.unlock_exclusive(); 1765 } 1766 defer!(s.rwlock.raw.lock_exclusive()); 1767 f() 1768 } 1769 } 1770 1771 #[cfg(feature = "arc_lock")] 1772 impl<R: RawRwLockDowngrade, T: ?Sized> ArcRwLockWriteGuard<R, T> { 1773 /// Atomically downgrades a write lock into a read lock without allowing any 1774 /// writers to take exclusive access of the lock in the meantime. 1775 /// 1776 /// This is functionally equivalent to the `downgrade` method on [`RwLockWriteGuard`]. downgrade(s: Self) -> ArcRwLockReadGuard<R, T>1777 pub fn downgrade(s: Self) -> ArcRwLockReadGuard<R, T> { 1778 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1779 unsafe { 1780 s.rwlock.raw.downgrade(); 1781 } 1782 1783 // SAFETY: prevent the arc's refcount from changing using ManuallyDrop and ptr::read 1784 let s = ManuallyDrop::new(s); 1785 let rwlock = unsafe { ptr::read(&s.rwlock) }; 1786 1787 ArcRwLockReadGuard { 1788 rwlock, 1789 marker: PhantomData, 1790 } 1791 } 1792 } 1793 1794 #[cfg(feature = "arc_lock")] 1795 impl<R: RawRwLockUpgradeDowngrade, T: ?Sized> ArcRwLockWriteGuard<R, T> { 1796 /// Atomically downgrades a write lock into an upgradable read lock without allowing any 1797 /// writers to take exclusive access of the lock in the meantime. 1798 /// 1799 /// This is functionally identical to the `downgrade_to_upgradable` method on [`RwLockWriteGuard`]. downgrade_to_upgradable(s: Self) -> ArcRwLockUpgradableReadGuard<R, T>1800 pub fn downgrade_to_upgradable(s: Self) -> ArcRwLockUpgradableReadGuard<R, T> { 1801 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1802 unsafe { 1803 s.rwlock.raw.downgrade_to_upgradable(); 1804 } 1805 1806 // SAFETY: same as above 1807 let s = ManuallyDrop::new(s); 1808 let rwlock = unsafe { ptr::read(&s.rwlock) }; 1809 1810 ArcRwLockUpgradableReadGuard { 1811 rwlock, 1812 marker: PhantomData, 1813 } 1814 } 1815 } 1816 1817 #[cfg(feature = "arc_lock")] 1818 impl<R: RawRwLockFair, T: ?Sized> ArcRwLockWriteGuard<R, T> { 1819 /// Unlocks the `RwLock` using a fair unlock protocol. 1820 /// 1821 /// This is functionally equivalent to the `unlock_fair` method on [`RwLockWriteGuard`]. 1822 #[inline] unlock_fair(s: Self)1823 pub fn unlock_fair(s: Self) { 1824 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1825 unsafe { 1826 s.rwlock.raw.unlock_exclusive_fair(); 1827 } 1828 1829 // SAFETY: prevent the Arc from leaking memory 1830 let mut s = ManuallyDrop::new(s); 1831 unsafe { ptr::drop_in_place(&mut s.rwlock) }; 1832 } 1833 1834 /// Temporarily unlocks the `RwLock` to execute the given function. 1835 /// 1836 /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockWriteGuard`]. 1837 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1838 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 1839 where 1840 F: FnOnce() -> U, 1841 { 1842 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1843 unsafe { 1844 s.rwlock.raw.unlock_exclusive_fair(); 1845 } 1846 defer!(s.rwlock.raw.lock_exclusive()); 1847 f() 1848 } 1849 1850 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 1851 /// 1852 /// This method is functionally equivalent to the `bump` method on [`RwLockWriteGuard`]. 1853 #[inline] bump(s: &mut Self)1854 pub fn bump(s: &mut Self) { 1855 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1856 unsafe { 1857 s.rwlock.raw.bump_exclusive(); 1858 } 1859 } 1860 } 1861 1862 #[cfg(feature = "arc_lock")] 1863 impl<R: RawRwLock, T: ?Sized> Deref for ArcRwLockWriteGuard<R, T> { 1864 type Target = T; 1865 #[inline] deref(&self) -> &T1866 fn deref(&self) -> &T { 1867 unsafe { &*self.rwlock.data.get() } 1868 } 1869 } 1870 1871 #[cfg(feature = "arc_lock")] 1872 impl<R: RawRwLock, T: ?Sized> DerefMut for ArcRwLockWriteGuard<R, T> { 1873 #[inline] deref_mut(&mut self) -> &mut T1874 fn deref_mut(&mut self) -> &mut T { 1875 unsafe { &mut *self.rwlock.data.get() } 1876 } 1877 } 1878 1879 #[cfg(feature = "arc_lock")] 1880 impl<R: RawRwLock, T: ?Sized> Drop for ArcRwLockWriteGuard<R, T> { 1881 #[inline] drop(&mut self)1882 fn drop(&mut self) { 1883 // Safety: An RwLockWriteGuard always holds an exclusive lock. 1884 unsafe { 1885 self.rwlock.raw.unlock_exclusive(); 1886 } 1887 } 1888 } 1889 1890 #[cfg(feature = "arc_lock")] 1891 impl<R: RawRwLock, T: fmt::Debug + ?Sized> fmt::Debug for ArcRwLockWriteGuard<R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1892 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1893 fmt::Debug::fmt(&**self, f) 1894 } 1895 } 1896 1897 #[cfg(feature = "arc_lock")] 1898 impl<R: RawRwLock, T: fmt::Display + ?Sized> fmt::Display for ArcRwLockWriteGuard<R, T> { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1899 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 1900 (**self).fmt(f) 1901 } 1902 } 1903 1904 /// RAII structure used to release the upgradable read access of a lock when 1905 /// dropped. 1906 #[clippy::has_significant_drop] 1907 #[must_use = "if unused the RwLock will immediately unlock"] 1908 pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> { 1909 rwlock: &'a RwLock<R, T>, 1910 marker: PhantomData<(&'a T, R::GuardMarker)>, 1911 } 1912 1913 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync 1914 for RwLockUpgradableReadGuard<'a, R, T> 1915 { 1916 } 1917 1918 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { 1919 /// Returns a reference to the original reader-writer lock object. rwlock(s: &Self) -> &'a RwLock<R, T>1920 pub fn rwlock(s: &Self) -> &'a RwLock<R, T> { 1921 s.rwlock 1922 } 1923 1924 /// Temporarily unlocks the `RwLock` to execute the given function. 1925 /// 1926 /// This is safe because `&mut` guarantees that there exist no other 1927 /// references to the data protected by the `RwLock`. 1928 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,1929 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 1930 where 1931 F: FnOnce() -> U, 1932 { 1933 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1934 unsafe { 1935 s.rwlock.raw.unlock_upgradable(); 1936 } 1937 defer!(s.rwlock.raw.lock_upgradable()); 1938 f() 1939 } 1940 1941 /// Atomically upgrades an upgradable read lock lock into an exclusive write lock, 1942 /// blocking the current thread until it can be acquired. upgrade(s: Self) -> RwLockWriteGuard<'a, R, T>1943 pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> { 1944 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1945 unsafe { 1946 s.rwlock.raw.upgrade(); 1947 } 1948 let rwlock = s.rwlock; 1949 mem::forget(s); 1950 RwLockWriteGuard { 1951 rwlock, 1952 marker: PhantomData, 1953 } 1954 } 1955 1956 /// Tries to atomically upgrade an upgradable read lock into an exclusive write lock. 1957 /// 1958 /// If the access could not be granted at this time, then the current guard is returned. try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self>1959 pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> { 1960 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1961 if unsafe { s.rwlock.raw.try_upgrade() } { 1962 let rwlock = s.rwlock; 1963 mem::forget(s); 1964 Ok(RwLockWriteGuard { 1965 rwlock, 1966 marker: PhantomData, 1967 }) 1968 } else { 1969 Err(s) 1970 } 1971 } 1972 } 1973 1974 impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { 1975 /// Unlocks the `RwLock` using a fair unlock protocol. 1976 /// 1977 /// By default, `RwLock` is unfair and allow the current thread to re-lock 1978 /// the `RwLock` before another has the chance to acquire the lock, even if 1979 /// that thread has been blocked on the `RwLock` for a long time. This is 1980 /// the default because it allows much higher throughput as it avoids 1981 /// forcing a context switch on every `RwLock` unlock. This can result in one 1982 /// thread acquiring a `RwLock` many more times than other threads. 1983 /// 1984 /// However in some cases it can be beneficial to ensure fairness by forcing 1985 /// the lock to pass on to a waiting thread if there is one. This is done by 1986 /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally. 1987 #[inline] unlock_fair(s: Self)1988 pub fn unlock_fair(s: Self) { 1989 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 1990 unsafe { 1991 s.rwlock.raw.unlock_upgradable_fair(); 1992 } 1993 mem::forget(s); 1994 } 1995 1996 /// Temporarily unlocks the `RwLock` to execute the given function. 1997 /// 1998 /// The `RwLock` is unlocked a fair unlock protocol. 1999 /// 2000 /// This is safe because `&mut` guarantees that there exist no other 2001 /// references to the data protected by the `RwLock`. 2002 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,2003 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 2004 where 2005 F: FnOnce() -> U, 2006 { 2007 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2008 unsafe { 2009 s.rwlock.raw.unlock_upgradable_fair(); 2010 } 2011 defer!(s.rwlock.raw.lock_upgradable()); 2012 f() 2013 } 2014 2015 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 2016 /// 2017 /// This method is functionally equivalent to calling `unlock_fair` followed 2018 /// by `upgradable_read`, however it can be much more efficient in the case where there 2019 /// are no waiting threads. 2020 #[inline] bump(s: &mut Self)2021 pub fn bump(s: &mut Self) { 2022 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2023 unsafe { 2024 s.rwlock.raw.bump_upgradable(); 2025 } 2026 } 2027 } 2028 2029 impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { 2030 /// Atomically downgrades an upgradable read lock lock into a shared read lock 2031 /// without allowing any writers to take exclusive access of the lock in the 2032 /// meantime. 2033 /// 2034 /// Note that if there are any writers currently waiting to take the lock 2035 /// then other readers may not be able to acquire the lock even if it was 2036 /// downgraded. downgrade(s: Self) -> RwLockReadGuard<'a, R, T>2037 pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> { 2038 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2039 unsafe { 2040 s.rwlock.raw.downgrade_upgradable(); 2041 } 2042 let rwlock = s.rwlock; 2043 mem::forget(s); 2044 RwLockReadGuard { 2045 rwlock, 2046 marker: PhantomData, 2047 } 2048 } 2049 2050 /// First, atomically upgrades an upgradable read lock lock into an exclusive write lock, 2051 /// blocking the current thread until it can be acquired. 2052 /// 2053 /// Then, calls the provided closure with an exclusive reference to the lock's data. 2054 /// 2055 /// Finally, atomically downgrades the lock back to an upgradable read lock. 2056 /// The closure's return value is wrapped in `Some` and returned. 2057 /// 2058 /// This function only requires a mutable reference to the guard, unlike 2059 /// `upgrade` which takes the guard by value. with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Ret2060 pub fn with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Ret { 2061 unsafe { 2062 self.rwlock.raw.upgrade(); 2063 } 2064 2065 // Safety: We just upgraded the lock, so we have mutable access to the data. 2066 // This will restore the state the lock was in at the start of the function. 2067 defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); 2068 2069 // Safety: We upgraded the lock, so we have mutable access to the data. 2070 // When this function returns, whether by drop or panic, 2071 // the drop guard will downgrade it back to an upgradeable lock. 2072 f(unsafe { &mut *self.rwlock.data.get() }) 2073 } 2074 2075 /// First, tries to atomically upgrade an upgradable read lock into an exclusive write lock. 2076 /// 2077 /// If the access could not be granted at this time, then `None` is returned. 2078 /// 2079 /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, 2080 /// and finally downgrades the lock back to an upgradable read lock. 2081 /// The closure's return value is wrapped in `Some` and returned. 2082 /// 2083 /// This function only requires a mutable reference to the guard, unlike 2084 /// `try_upgrade` which takes the guard by value. try_with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Option<Ret>2085 pub fn try_with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Option<Ret> { 2086 if unsafe { self.rwlock.raw.try_upgrade() } { 2087 // Safety: We just upgraded the lock, so we have mutable access to the data. 2088 // This will restore the state the lock was in at the start of the function. 2089 defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); 2090 2091 // Safety: We upgraded the lock, so we have mutable access to the data. 2092 // When this function returns, whether by drop or panic, 2093 // the drop guard will downgrade it back to an upgradeable lock. 2094 Some(f(unsafe { &mut *self.rwlock.data.get() })) 2095 } else { 2096 None 2097 } 2098 } 2099 } 2100 2101 impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> { 2102 /// Tries to atomically upgrade an upgradable read lock into an exclusive 2103 /// write lock, until a timeout is reached. 2104 /// 2105 /// If the access could not be granted before the timeout expires, then 2106 /// the current guard is returned. try_upgrade_for( s: Self, timeout: R::Duration, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>2107 pub fn try_upgrade_for( 2108 s: Self, 2109 timeout: R::Duration, 2110 ) -> Result<RwLockWriteGuard<'a, R, T>, Self> { 2111 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2112 if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } { 2113 let rwlock = s.rwlock; 2114 mem::forget(s); 2115 Ok(RwLockWriteGuard { 2116 rwlock, 2117 marker: PhantomData, 2118 }) 2119 } else { 2120 Err(s) 2121 } 2122 } 2123 2124 /// Tries to atomically upgrade an upgradable read lock into an exclusive 2125 /// write lock, until a timeout is reached. 2126 /// 2127 /// If the access could not be granted before the timeout expires, then 2128 /// the current guard is returned. 2129 #[inline] try_upgrade_until( s: Self, timeout: R::Instant, ) -> Result<RwLockWriteGuard<'a, R, T>, Self>2130 pub fn try_upgrade_until( 2131 s: Self, 2132 timeout: R::Instant, 2133 ) -> Result<RwLockWriteGuard<'a, R, T>, Self> { 2134 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2135 if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } { 2136 let rwlock = s.rwlock; 2137 mem::forget(s); 2138 Ok(RwLockWriteGuard { 2139 rwlock, 2140 marker: PhantomData, 2141 }) 2142 } else { 2143 Err(s) 2144 } 2145 } 2146 } 2147 2148 impl<'a, R: RawRwLockUpgradeTimed + RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> 2149 RwLockUpgradableReadGuard<'a, R, T> 2150 { 2151 /// Tries to atomically upgrade an upgradable read lock into an exclusive 2152 /// write lock, until a timeout is reached. 2153 /// 2154 /// If the access could not be granted before the timeout expires, then 2155 /// `None` is returned. 2156 /// 2157 /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, 2158 /// and finally downgrades the lock back to an upgradable read lock. 2159 /// The closure's return value is wrapped in `Some` and returned. 2160 /// 2161 /// This function only requires a mutable reference to the guard, unlike 2162 /// `try_upgrade_for` which takes the guard by value. try_with_upgraded_for<Ret, F: FnOnce(&mut T) -> Ret>( &mut self, timeout: R::Duration, f: F, ) -> Option<Ret>2163 pub fn try_with_upgraded_for<Ret, F: FnOnce(&mut T) -> Ret>( 2164 &mut self, 2165 timeout: R::Duration, 2166 f: F, 2167 ) -> Option<Ret> { 2168 if unsafe { self.rwlock.raw.try_upgrade_for(timeout) } { 2169 // Safety: We just upgraded the lock, so we have mutable access to the data. 2170 // This will restore the state the lock was in at the start of the function. 2171 defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); 2172 2173 // Safety: We upgraded the lock, so we have mutable access to the data. 2174 // When this function returns, whether by drop or panic, 2175 // the drop guard will downgrade it back to an upgradeable lock. 2176 Some(f(unsafe { &mut *self.rwlock.data.get() })) 2177 } else { 2178 None 2179 } 2180 } 2181 2182 /// Tries to atomically upgrade an upgradable read lock into an exclusive 2183 /// write lock, until a timeout is reached. 2184 /// 2185 /// If the access could not be granted before the timeout expires, then 2186 /// `None` is returned. 2187 /// 2188 /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, 2189 /// and finally downgrades the lock back to an upgradable read lock. 2190 /// The closure's return value is wrapped in `Some` and returned. 2191 /// 2192 /// This function only requires a mutable reference to the guard, unlike 2193 /// `try_upgrade_until` which takes the guard by value. try_with_upgraded_until<Ret, F: FnOnce(&mut T) -> Ret>( &mut self, timeout: R::Instant, f: F, ) -> Option<Ret>2194 pub fn try_with_upgraded_until<Ret, F: FnOnce(&mut T) -> Ret>( 2195 &mut self, 2196 timeout: R::Instant, 2197 f: F, 2198 ) -> Option<Ret> { 2199 if unsafe { self.rwlock.raw.try_upgrade_until(timeout) } { 2200 // Safety: We just upgraded the lock, so we have mutable access to the data. 2201 // This will restore the state the lock was in at the start of the function. 2202 defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); 2203 2204 // Safety: We upgraded the lock, so we have mutable access to the data. 2205 // When this function returns, whether by drop or panic, 2206 // the drop guard will downgrade it back to an upgradeable lock. 2207 Some(f(unsafe { &mut *self.rwlock.data.get() })) 2208 } else { 2209 None 2210 } 2211 } 2212 } 2213 2214 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, R, T> { 2215 type Target = T; 2216 #[inline] deref(&self) -> &T2217 fn deref(&self) -> &T { 2218 unsafe { &*self.rwlock.data.get() } 2219 } 2220 } 2221 2222 impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> { 2223 #[inline] drop(&mut self)2224 fn drop(&mut self) { 2225 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2226 unsafe { 2227 self.rwlock.raw.unlock_upgradable(); 2228 } 2229 } 2230 } 2231 2232 impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug 2233 for RwLockUpgradableReadGuard<'a, R, T> 2234 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2235 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2236 fmt::Debug::fmt(&**self, f) 2237 } 2238 } 2239 2240 impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display 2241 for RwLockUpgradableReadGuard<'a, R, T> 2242 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2243 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2244 (**self).fmt(f) 2245 } 2246 } 2247 2248 #[cfg(feature = "owning_ref")] 2249 unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress 2250 for RwLockUpgradableReadGuard<'a, R, T> 2251 { 2252 } 2253 2254 /// An RAII rwlock guard returned by the `Arc` locking operations on `RwLock`. 2255 /// This is similar to the `RwLockUpgradableReadGuard` struct, except instead of using a reference to unlock the 2256 /// `RwLock` it uses an `Arc<RwLock>`. This has several advantages, most notably that it has an `'static` 2257 /// lifetime. 2258 #[cfg(feature = "arc_lock")] 2259 #[clippy::has_significant_drop] 2260 #[must_use = "if unused the RwLock will immediately unlock"] 2261 pub struct ArcRwLockUpgradableReadGuard<R: RawRwLockUpgrade, T: ?Sized> { 2262 rwlock: Arc<RwLock<R, T>>, 2263 marker: PhantomData<R::GuardMarker>, 2264 } 2265 2266 #[cfg(feature = "arc_lock")] 2267 impl<R: RawRwLockUpgrade, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { 2268 /// Returns a reference to the rwlock, contained in its original `Arc`. rwlock(s: &Self) -> &Arc<RwLock<R, T>>2269 pub fn rwlock(s: &Self) -> &Arc<RwLock<R, T>> { 2270 &s.rwlock 2271 } 2272 2273 /// Temporarily unlocks the `RwLock` to execute the given function. 2274 /// 2275 /// This is functionally identical to the `unlocked` method on [`RwLockUpgradableReadGuard`]. 2276 #[inline] unlocked<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,2277 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U 2278 where 2279 F: FnOnce() -> U, 2280 { 2281 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2282 unsafe { 2283 s.rwlock.raw.unlock_upgradable(); 2284 } 2285 defer!(s.rwlock.raw.lock_upgradable()); 2286 f() 2287 } 2288 2289 /// Atomically upgrades an upgradable read lock lock into an exclusive write lock, 2290 /// blocking the current thread until it can be acquired. upgrade(s: Self) -> ArcRwLockWriteGuard<R, T>2291 pub fn upgrade(s: Self) -> ArcRwLockWriteGuard<R, T> { 2292 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2293 unsafe { 2294 s.rwlock.raw.upgrade(); 2295 } 2296 2297 // SAFETY: avoid incrementing or decrementing the refcount using ManuallyDrop and reading the Arc out 2298 // of the struct 2299 let s = ManuallyDrop::new(s); 2300 let rwlock = unsafe { ptr::read(&s.rwlock) }; 2301 2302 ArcRwLockWriteGuard { 2303 rwlock, 2304 marker: PhantomData, 2305 } 2306 } 2307 2308 /// Tries to atomically upgrade an upgradable read lock into an exclusive write lock. 2309 /// 2310 /// If the access could not be granted at this time, then the current guard is returned. try_upgrade(s: Self) -> Result<ArcRwLockWriteGuard<R, T>, Self>2311 pub fn try_upgrade(s: Self) -> Result<ArcRwLockWriteGuard<R, T>, Self> { 2312 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2313 if unsafe { s.rwlock.raw.try_upgrade() } { 2314 // SAFETY: same as above 2315 let s = ManuallyDrop::new(s); 2316 let rwlock = unsafe { ptr::read(&s.rwlock) }; 2317 2318 Ok(ArcRwLockWriteGuard { 2319 rwlock, 2320 marker: PhantomData, 2321 }) 2322 } else { 2323 Err(s) 2324 } 2325 } 2326 } 2327 2328 #[cfg(feature = "arc_lock")] 2329 impl<R: RawRwLockUpgradeFair, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { 2330 /// Unlocks the `RwLock` using a fair unlock protocol. 2331 /// 2332 /// This is functionally identical to the `unlock_fair` method on [`RwLockUpgradableReadGuard`]. 2333 #[inline] unlock_fair(s: Self)2334 pub fn unlock_fair(s: Self) { 2335 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2336 unsafe { 2337 s.rwlock.raw.unlock_upgradable_fair(); 2338 } 2339 2340 // SAFETY: make sure we decrement the refcount properly 2341 let mut s = ManuallyDrop::new(s); 2342 unsafe { ptr::drop_in_place(&mut s.rwlock) }; 2343 } 2344 2345 /// Temporarily unlocks the `RwLock` to execute the given function. 2346 /// 2347 /// This is functionally equivalent to the `unlocked_fair` method on [`RwLockUpgradableReadGuard`]. 2348 #[inline] unlocked_fair<F, U>(s: &mut Self, f: F) -> U where F: FnOnce() -> U,2349 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U 2350 where 2351 F: FnOnce() -> U, 2352 { 2353 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2354 unsafe { 2355 s.rwlock.raw.unlock_upgradable_fair(); 2356 } 2357 defer!(s.rwlock.raw.lock_upgradable()); 2358 f() 2359 } 2360 2361 /// Temporarily yields the `RwLock` to a waiting thread if there is one. 2362 /// 2363 /// This method is functionally equivalent to calling `bump` on [`RwLockUpgradableReadGuard`]. 2364 #[inline] bump(s: &mut Self)2365 pub fn bump(s: &mut Self) { 2366 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2367 unsafe { 2368 s.rwlock.raw.bump_upgradable(); 2369 } 2370 } 2371 } 2372 2373 #[cfg(feature = "arc_lock")] 2374 impl<R: RawRwLockUpgradeDowngrade, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { 2375 /// Atomically downgrades an upgradable read lock lock into a shared read lock 2376 /// without allowing any writers to take exclusive access of the lock in the 2377 /// meantime. 2378 /// 2379 /// Note that if there are any writers currently waiting to take the lock 2380 /// then other readers may not be able to acquire the lock even if it was 2381 /// downgraded. downgrade(s: Self) -> ArcRwLockReadGuard<R, T>2382 pub fn downgrade(s: Self) -> ArcRwLockReadGuard<R, T> { 2383 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2384 unsafe { 2385 s.rwlock.raw.downgrade_upgradable(); 2386 } 2387 2388 // SAFETY: use ManuallyDrop and ptr::read to ensure the refcount is not changed 2389 let s = ManuallyDrop::new(s); 2390 let rwlock = unsafe { ptr::read(&s.rwlock) }; 2391 2392 ArcRwLockReadGuard { 2393 rwlock, 2394 marker: PhantomData, 2395 } 2396 } 2397 2398 /// First, atomically upgrades an upgradable read lock lock into an exclusive write lock, 2399 /// blocking the current thread until it can be acquired. 2400 /// 2401 /// Then, calls the provided closure with an exclusive reference to the lock's data. 2402 /// 2403 /// Finally, atomically downgrades the lock back to an upgradable read lock. 2404 /// The closure's return value is returned. 2405 /// 2406 /// This function only requires a mutable reference to the guard, unlike 2407 /// `upgrade` which takes the guard by value. with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Ret2408 pub fn with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Ret { 2409 unsafe { 2410 self.rwlock.raw.upgrade(); 2411 } 2412 2413 // Safety: We just upgraded the lock, so we have mutable access to the data. 2414 // This will restore the state the lock was in at the start of the function. 2415 defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); 2416 2417 // Safety: We upgraded the lock, so we have mutable access to the data. 2418 // When this function returns, whether by drop or panic, 2419 // the drop guard will downgrade it back to an upgradeable lock. 2420 f(unsafe { &mut *self.rwlock.data.get() }) 2421 } 2422 2423 /// First, tries to atomically upgrade an upgradable read lock into an exclusive write lock. 2424 /// 2425 /// If the access could not be granted at this time, then `None` is returned. 2426 /// 2427 /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, 2428 /// and finally downgrades the lock back to an upgradable read lock. 2429 /// The closure's return value is wrapped in `Some` and returned. 2430 /// 2431 /// This function only requires a mutable reference to the guard, unlike 2432 /// `try_upgrade` which takes the guard by value. try_with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Option<Ret>2433 pub fn try_with_upgraded<Ret, F: FnOnce(&mut T) -> Ret>(&mut self, f: F) -> Option<Ret> { 2434 if unsafe { self.rwlock.raw.try_upgrade() } { 2435 // Safety: We just upgraded the lock, so we have mutable access to the data. 2436 // This will restore the state the lock was in at the start of the function. 2437 defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); 2438 2439 // Safety: We upgraded the lock, so we have mutable access to the data. 2440 // When this function returns, whether by drop or panic, 2441 // the drop guard will downgrade it back to an upgradeable lock. 2442 Some(f(unsafe { &mut *self.rwlock.data.get() })) 2443 } else { 2444 None 2445 } 2446 } 2447 } 2448 2449 #[cfg(feature = "arc_lock")] 2450 impl<R: RawRwLockUpgradeTimed, T: ?Sized> ArcRwLockUpgradableReadGuard<R, T> { 2451 /// Tries to atomically upgrade an upgradable read lock into an exclusive 2452 /// write lock, until a timeout is reached. 2453 /// 2454 /// If the access could not be granted before the timeout expires, then 2455 /// the current guard is returned. try_upgrade_for( s: Self, timeout: R::Duration, ) -> Result<ArcRwLockWriteGuard<R, T>, Self>2456 pub fn try_upgrade_for( 2457 s: Self, 2458 timeout: R::Duration, 2459 ) -> Result<ArcRwLockWriteGuard<R, T>, Self> { 2460 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2461 if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } { 2462 // SAFETY: same as above 2463 let s = ManuallyDrop::new(s); 2464 let rwlock = unsafe { ptr::read(&s.rwlock) }; 2465 2466 Ok(ArcRwLockWriteGuard { 2467 rwlock, 2468 marker: PhantomData, 2469 }) 2470 } else { 2471 Err(s) 2472 } 2473 } 2474 2475 /// Tries to atomically upgrade an upgradable read lock into an exclusive 2476 /// write lock, until a timeout is reached. 2477 /// 2478 /// If the access could not be granted before the timeout expires, then 2479 /// the current guard is returned. 2480 #[inline] try_upgrade_until( s: Self, timeout: R::Instant, ) -> Result<ArcRwLockWriteGuard<R, T>, Self>2481 pub fn try_upgrade_until( 2482 s: Self, 2483 timeout: R::Instant, 2484 ) -> Result<ArcRwLockWriteGuard<R, T>, Self> { 2485 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2486 if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } { 2487 // SAFETY: same as above 2488 let s = ManuallyDrop::new(s); 2489 let rwlock = unsafe { ptr::read(&s.rwlock) }; 2490 2491 Ok(ArcRwLockWriteGuard { 2492 rwlock, 2493 marker: PhantomData, 2494 }) 2495 } else { 2496 Err(s) 2497 } 2498 } 2499 } 2500 2501 #[cfg(feature = "arc_lock")] 2502 impl<R: RawRwLockUpgradeTimed + RawRwLockUpgradeDowngrade, T: ?Sized> 2503 ArcRwLockUpgradableReadGuard<R, T> 2504 { 2505 /// Tries to atomically upgrade an upgradable read lock into an exclusive 2506 /// write lock, until a timeout is reached. 2507 /// 2508 /// If the access could not be granted before the timeout expires, then 2509 /// `None` is returned. 2510 /// 2511 /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, 2512 /// and finally downgrades the lock back to an upgradable read lock. 2513 /// The closure's return value is wrapped in `Some` and returned. 2514 /// 2515 /// This function only requires a mutable reference to the guard, unlike 2516 /// `try_upgrade_for` which takes the guard by value. try_with_upgraded_for<Ret, F: FnOnce(&mut T) -> Ret>( &mut self, timeout: R::Duration, f: F, ) -> Option<Ret>2517 pub fn try_with_upgraded_for<Ret, F: FnOnce(&mut T) -> Ret>( 2518 &mut self, 2519 timeout: R::Duration, 2520 f: F, 2521 ) -> Option<Ret> { 2522 if unsafe { self.rwlock.raw.try_upgrade_for(timeout) } { 2523 // Safety: We just upgraded the lock, so we have mutable access to the data. 2524 // This will restore the state the lock was in at the start of the function. 2525 defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); 2526 2527 // Safety: We upgraded the lock, so we have mutable access to the data. 2528 // When this function returns, whether by drop or panic, 2529 // the drop guard will downgrade it back to an upgradeable lock. 2530 Some(f(unsafe { &mut *self.rwlock.data.get() })) 2531 } else { 2532 None 2533 } 2534 } 2535 2536 /// Tries to atomically upgrade an upgradable read lock into an exclusive 2537 /// write lock, until a timeout is reached. 2538 /// 2539 /// If the access could not be granted before the timeout expires, then 2540 /// `None` is returned. 2541 /// 2542 /// Otherwise, calls the provided closure with an exclusive reference to the lock's data, 2543 /// and finally downgrades the lock back to an upgradable read lock. 2544 /// The closure's return value is wrapped in `Some` and returned. 2545 /// 2546 /// This function only requires a mutable reference to the guard, unlike 2547 /// `try_upgrade_until` which takes the guard by value. try_with_upgraded_until<Ret, F: FnOnce(&mut T) -> Ret>( &mut self, timeout: R::Instant, f: F, ) -> Option<Ret>2548 pub fn try_with_upgraded_until<Ret, F: FnOnce(&mut T) -> Ret>( 2549 &mut self, 2550 timeout: R::Instant, 2551 f: F, 2552 ) -> Option<Ret> { 2553 if unsafe { self.rwlock.raw.try_upgrade_until(timeout) } { 2554 // Safety: We just upgraded the lock, so we have mutable access to the data. 2555 // This will restore the state the lock was in at the start of the function. 2556 defer!(unsafe { self.rwlock.raw.downgrade_to_upgradable() }); 2557 2558 // Safety: We upgraded the lock, so we have mutable access to the data. 2559 // When this function returns, whether by drop or panic, 2560 // the drop guard will downgrade it back to an upgradeable lock. 2561 Some(f(unsafe { &mut *self.rwlock.data.get() })) 2562 } else { 2563 None 2564 } 2565 } 2566 } 2567 2568 #[cfg(feature = "arc_lock")] 2569 impl<R: RawRwLockUpgrade, T: ?Sized> Deref for ArcRwLockUpgradableReadGuard<R, T> { 2570 type Target = T; 2571 #[inline] deref(&self) -> &T2572 fn deref(&self) -> &T { 2573 unsafe { &*self.rwlock.data.get() } 2574 } 2575 } 2576 2577 #[cfg(feature = "arc_lock")] 2578 impl<R: RawRwLockUpgrade, T: ?Sized> Drop for ArcRwLockUpgradableReadGuard<R, T> { 2579 #[inline] drop(&mut self)2580 fn drop(&mut self) { 2581 // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock. 2582 unsafe { 2583 self.rwlock.raw.unlock_upgradable(); 2584 } 2585 } 2586 } 2587 2588 #[cfg(feature = "arc_lock")] 2589 impl<R: RawRwLockUpgrade, T: fmt::Debug + ?Sized> fmt::Debug 2590 for ArcRwLockUpgradableReadGuard<R, T> 2591 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2592 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2593 fmt::Debug::fmt(&**self, f) 2594 } 2595 } 2596 2597 #[cfg(feature = "arc_lock")] 2598 impl<R: RawRwLockUpgrade, T: fmt::Display + ?Sized> fmt::Display 2599 for ArcRwLockUpgradableReadGuard<R, T> 2600 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2601 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2602 (**self).fmt(f) 2603 } 2604 } 2605 2606 /// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a 2607 /// subfield of the protected data. 2608 /// 2609 /// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the 2610 /// former doesn't support temporarily unlocking and re-locking, since that 2611 /// could introduce soundness issues if the locked object is modified by another 2612 /// thread. 2613 #[clippy::has_significant_drop] 2614 #[must_use = "if unused the RwLock will immediately unlock"] 2615 pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> { 2616 raw: &'a R, 2617 data: *const T, 2618 marker: PhantomData<&'a T>, 2619 } 2620 2621 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {} 2622 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Send for MappedRwLockReadGuard<'a, R, T> where 2623 R::GuardMarker: Send 2624 { 2625 } 2626 2627 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> { 2628 /// Make a new `MappedRwLockReadGuard` for a component of the locked data. 2629 /// 2630 /// This operation cannot fail as the `MappedRwLockReadGuard` passed 2631 /// in already locked the data. 2632 /// 2633 /// This is an associated function that needs to be 2634 /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of 2635 /// the same name on the contents of the locked data. 2636 #[inline] map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> where F: FnOnce(&T) -> &U,2637 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, R, U> 2638 where 2639 F: FnOnce(&T) -> &U, 2640 { 2641 let raw = s.raw; 2642 let data = f(unsafe { &*s.data }); 2643 mem::forget(s); 2644 MappedRwLockReadGuard { 2645 raw, 2646 data, 2647 marker: PhantomData, 2648 } 2649 } 2650 2651 /// Attempts to make a new `MappedRwLockReadGuard` for a component of the 2652 /// locked data. The original guard is return if the closure returns `None`. 2653 /// 2654 /// This operation cannot fail as the `MappedRwLockReadGuard` passed 2655 /// in already locked the data. 2656 /// 2657 /// This is an associated function that needs to be 2658 /// used as `MappedRwLockReadGuard::try_map(...)`. A method would interfere with methods of 2659 /// the same name on the contents of the locked data. 2660 #[inline] try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> where F: FnOnce(&T) -> Option<&U>,2661 pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, R, U>, Self> 2662 where 2663 F: FnOnce(&T) -> Option<&U>, 2664 { 2665 let raw = s.raw; 2666 let data = match f(unsafe { &*s.data }) { 2667 Some(data) => data, 2668 None => return Err(s), 2669 }; 2670 mem::forget(s); 2671 Ok(MappedRwLockReadGuard { 2672 raw, 2673 data, 2674 marker: PhantomData, 2675 }) 2676 } 2677 } 2678 2679 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> { 2680 /// Unlocks the `RwLock` using a fair unlock protocol. 2681 /// 2682 /// By default, `RwLock` is unfair and allow the current thread to re-lock 2683 /// the `RwLock` before another has the chance to acquire the lock, even if 2684 /// that thread has been blocked on the `RwLock` for a long time. This is 2685 /// the default because it allows much higher throughput as it avoids 2686 /// forcing a context switch on every `RwLock` unlock. This can result in one 2687 /// thread acquiring a `RwLock` many more times than other threads. 2688 /// 2689 /// However in some cases it can be beneficial to ensure fairness by forcing 2690 /// the lock to pass on to a waiting thread if there is one. This is done by 2691 /// using this method instead of dropping the `MappedRwLockReadGuard` normally. 2692 #[inline] unlock_fair(s: Self)2693 pub fn unlock_fair(s: Self) { 2694 // Safety: A MappedRwLockReadGuard always holds a shared lock. 2695 unsafe { 2696 s.raw.unlock_shared_fair(); 2697 } 2698 mem::forget(s); 2699 } 2700 } 2701 2702 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, R, T> { 2703 type Target = T; 2704 #[inline] deref(&self) -> &T2705 fn deref(&self) -> &T { 2706 unsafe { &*self.data } 2707 } 2708 } 2709 2710 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> { 2711 #[inline] drop(&mut self)2712 fn drop(&mut self) { 2713 // Safety: A MappedRwLockReadGuard always holds a shared lock. 2714 unsafe { 2715 self.raw.unlock_shared(); 2716 } 2717 } 2718 } 2719 2720 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug 2721 for MappedRwLockReadGuard<'a, R, T> 2722 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2723 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2724 fmt::Debug::fmt(&**self, f) 2725 } 2726 } 2727 2728 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display 2729 for MappedRwLockReadGuard<'a, R, T> 2730 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2731 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2732 (**self).fmt(f) 2733 } 2734 } 2735 2736 #[cfg(feature = "owning_ref")] 2737 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress 2738 for MappedRwLockReadGuard<'a, R, T> 2739 { 2740 } 2741 2742 /// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a 2743 /// subfield of the protected data. 2744 /// 2745 /// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the 2746 /// former doesn't support temporarily unlocking and re-locking, since that 2747 /// could introduce soundness issues if the locked object is modified by another 2748 /// thread. 2749 #[clippy::has_significant_drop] 2750 #[must_use = "if unused the RwLock will immediately unlock"] 2751 pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> { 2752 raw: &'a R, 2753 data: *mut T, 2754 marker: PhantomData<&'a mut T>, 2755 } 2756 2757 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync 2758 for MappedRwLockWriteGuard<'a, R, T> 2759 { 2760 } 2761 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Send + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where 2762 R::GuardMarker: Send 2763 { 2764 } 2765 2766 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> { 2767 /// Make a new `MappedRwLockWriteGuard` for a component of the locked data. 2768 /// 2769 /// This operation cannot fail as the `MappedRwLockWriteGuard` passed 2770 /// in already locked the data. 2771 /// 2772 /// This is an associated function that needs to be 2773 /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of 2774 /// the same name on the contents of the locked data. 2775 #[inline] map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> where F: FnOnce(&mut T) -> &mut U,2776 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, R, U> 2777 where 2778 F: FnOnce(&mut T) -> &mut U, 2779 { 2780 let raw = s.raw; 2781 let data = f(unsafe { &mut *s.data }); 2782 mem::forget(s); 2783 MappedRwLockWriteGuard { 2784 raw, 2785 data, 2786 marker: PhantomData, 2787 } 2788 } 2789 2790 /// Attempts to make a new `MappedRwLockWriteGuard` for a component of the 2791 /// locked data. The original guard is return if the closure returns `None`. 2792 /// 2793 /// This operation cannot fail as the `MappedRwLockWriteGuard` passed 2794 /// in already locked the data. 2795 /// 2796 /// This is an associated function that needs to be 2797 /// used as `MappedRwLockWriteGuard::try_map(...)`. A method would interfere with methods of 2798 /// the same name on the contents of the locked data. 2799 #[inline] try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> where F: FnOnce(&mut T) -> Option<&mut U>,2800 pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, R, U>, Self> 2801 where 2802 F: FnOnce(&mut T) -> Option<&mut U>, 2803 { 2804 let raw = s.raw; 2805 let data = match f(unsafe { &mut *s.data }) { 2806 Some(data) => data, 2807 None => return Err(s), 2808 }; 2809 mem::forget(s); 2810 Ok(MappedRwLockWriteGuard { 2811 raw, 2812 data, 2813 marker: PhantomData, 2814 }) 2815 } 2816 } 2817 2818 impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> { 2819 /// Unlocks the `RwLock` using a fair unlock protocol. 2820 /// 2821 /// By default, `RwLock` is unfair and allow the current thread to re-lock 2822 /// the `RwLock` before another has the chance to acquire the lock, even if 2823 /// that thread has been blocked on the `RwLock` for a long time. This is 2824 /// the default because it allows much higher throughput as it avoids 2825 /// forcing a context switch on every `RwLock` unlock. This can result in one 2826 /// thread acquiring a `RwLock` many more times than other threads. 2827 /// 2828 /// However in some cases it can be beneficial to ensure fairness by forcing 2829 /// the lock to pass on to a waiting thread if there is one. This is done by 2830 /// using this method instead of dropping the `MappedRwLockWriteGuard` normally. 2831 #[inline] unlock_fair(s: Self)2832 pub fn unlock_fair(s: Self) { 2833 // Safety: A MappedRwLockWriteGuard always holds an exclusive lock. 2834 unsafe { 2835 s.raw.unlock_exclusive_fair(); 2836 } 2837 mem::forget(s); 2838 } 2839 } 2840 2841 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, R, T> { 2842 type Target = T; 2843 #[inline] deref(&self) -> &T2844 fn deref(&self) -> &T { 2845 unsafe { &*self.data } 2846 } 2847 } 2848 2849 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, R, T> { 2850 #[inline] deref_mut(&mut self) -> &mut T2851 fn deref_mut(&mut self) -> &mut T { 2852 unsafe { &mut *self.data } 2853 } 2854 } 2855 2856 impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> { 2857 #[inline] drop(&mut self)2858 fn drop(&mut self) { 2859 // Safety: A MappedRwLockWriteGuard always holds an exclusive lock. 2860 unsafe { 2861 self.raw.unlock_exclusive(); 2862 } 2863 } 2864 } 2865 2866 impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug 2867 for MappedRwLockWriteGuard<'a, R, T> 2868 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2869 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2870 fmt::Debug::fmt(&**self, f) 2871 } 2872 } 2873 2874 impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display 2875 for MappedRwLockWriteGuard<'a, R, T> 2876 { fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2877 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 2878 (**self).fmt(f) 2879 } 2880 } 2881 2882 #[cfg(feature = "owning_ref")] 2883 unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress 2884 for MappedRwLockWriteGuard<'a, R, T> 2885 { 2886 } 2887