1 use core::sync::atomic::{AtomicPtr, Ordering}; 2 3 use std::sync::RwLock; 4 5 use super::sealed::{CaS, InnerStrategy, Protected}; 6 use crate::as_raw::AsRaw; 7 use crate::ref_cnt::RefCnt; 8 9 impl<T: RefCnt> Protected<T> for T { 10 #[inline] from_inner(ptr: T) -> Self11 fn from_inner(ptr: T) -> Self { 12 ptr 13 } 14 15 #[inline] into_inner(self) -> T16 fn into_inner(self) -> T { 17 self 18 } 19 } 20 21 impl<T: RefCnt> InnerStrategy<T> for RwLock<()> { 22 type Protected = T; load(&self, storage: &AtomicPtr<T::Base>) -> T23 unsafe fn load(&self, storage: &AtomicPtr<T::Base>) -> T { 24 let _guard = self.read().expect("We don't panic in here"); 25 let ptr = storage.load(Ordering::Acquire); 26 let ptr = T::from_ptr(ptr as *const T::Base); 27 T::inc(&ptr); 28 29 ptr 30 } 31 wait_for_readers(&self, _: *const T::Base, _: &AtomicPtr<T::Base>)32 unsafe fn wait_for_readers(&self, _: *const T::Base, _: &AtomicPtr<T::Base>) { 33 // By acquiring the write lock, we make sure there are no read locks present across it. 34 drop(self.write().expect("We don't panic in here")); 35 } 36 } 37 38 impl<T: RefCnt> CaS<T> for RwLock<()> { compare_and_swap<C: AsRaw<T::Base>>( &self, storage: &AtomicPtr<T::Base>, current: C, new: T, ) -> Self::Protected39 unsafe fn compare_and_swap<C: AsRaw<T::Base>>( 40 &self, 41 storage: &AtomicPtr<T::Base>, 42 current: C, 43 new: T, 44 ) -> Self::Protected { 45 let _lock = self.write(); 46 let cur = current.as_raw(); 47 let new = T::into_ptr(new); 48 let swapped = storage.compare_exchange(cur, new, Ordering::AcqRel, Ordering::Relaxed); 49 let old = match swapped { 50 Ok(old) => old, 51 Err(old) => old, 52 }; 53 let old = T::from_ptr(old as *const T::Base); 54 if swapped.is_err() { 55 // If the new didn't go in, we need to destroy it and increment count in the old that 56 // we just duplicated 57 T::inc(&old); 58 drop(T::from_ptr(new)); 59 } 60 drop(current); 61 old 62 } 63 } 64