• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use core::borrow::{Borrow, BorrowMut};
2 use core::cmp;
3 use core::fmt;
4 use core::marker::PhantomData;
5 use core::mem::{self, MaybeUninit};
6 use core::ops::{Deref, DerefMut};
7 use core::slice;
8 use core::sync::atomic::Ordering;
9 
10 use crate::alloc::alloc;
11 use crate::alloc::boxed::Box;
12 use crate::guard::Guard;
13 use crate::primitive::sync::atomic::AtomicUsize;
14 use crossbeam_utils::atomic::AtomicConsume;
15 
16 /// Given ordering for the success case in a compare-exchange operation, returns the strongest
17 /// appropriate ordering for the failure case.
18 #[inline]
strongest_failure_ordering(ord: Ordering) -> Ordering19 fn strongest_failure_ordering(ord: Ordering) -> Ordering {
20     use self::Ordering::*;
21     match ord {
22         Relaxed | Release => Relaxed,
23         Acquire | AcqRel => Acquire,
24         _ => SeqCst,
25     }
26 }
27 
28 /// The error returned on failed compare-and-set operation.
29 // TODO: remove in the next major version.
30 #[deprecated(note = "Use `CompareExchangeError` instead")]
31 pub type CompareAndSetError<'g, T, P> = CompareExchangeError<'g, T, P>;
32 
33 /// The error returned on failed compare-and-swap operation.
34 pub struct CompareExchangeError<'g, T: ?Sized + Pointable, P: Pointer<T>> {
35     /// The value in the atomic pointer at the time of the failed operation.
36     pub current: Shared<'g, T>,
37 
38     /// The new value, which the operation failed to store.
39     pub new: P,
40 }
41 
42 impl<T, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareExchangeError<'_, T, P> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result43     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
44         f.debug_struct("CompareExchangeError")
45             .field("current", &self.current)
46             .field("new", &self.new)
47             .finish()
48     }
49 }
50 
51 /// Memory orderings for compare-and-set operations.
52 ///
53 /// A compare-and-set operation can have different memory orderings depending on whether it
54 /// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
55 ///
56 /// The two ways of specifying orderings for compare-and-set are:
57 ///
58 /// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
59 ///    ordering is chosen.
60 /// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
61 ///    for the failure case.
62 // TODO: remove in the next major version.
63 #[deprecated(
64     note = "`compare_and_set` and `compare_and_set_weak` that use this trait are deprecated, \
65             use `compare_exchange` or `compare_exchange_weak instead`"
66 )]
67 pub trait CompareAndSetOrdering {
68     /// The ordering of the operation when it succeeds.
success(&self) -> Ordering69     fn success(&self) -> Ordering;
70 
71     /// The ordering of the operation when it fails.
72     ///
73     /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
74     /// the success ordering.
failure(&self) -> Ordering75     fn failure(&self) -> Ordering;
76 }
77 
78 #[allow(deprecated)]
79 impl CompareAndSetOrdering for Ordering {
80     #[inline]
success(&self) -> Ordering81     fn success(&self) -> Ordering {
82         *self
83     }
84 
85     #[inline]
failure(&self) -> Ordering86     fn failure(&self) -> Ordering {
87         strongest_failure_ordering(*self)
88     }
89 }
90 
91 #[allow(deprecated)]
92 impl CompareAndSetOrdering for (Ordering, Ordering) {
93     #[inline]
success(&self) -> Ordering94     fn success(&self) -> Ordering {
95         self.0
96     }
97 
98     #[inline]
failure(&self) -> Ordering99     fn failure(&self) -> Ordering {
100         self.1
101     }
102 }
103 
104 /// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
105 #[inline]
low_bits<T: ?Sized + Pointable>() -> usize106 fn low_bits<T: ?Sized + Pointable>() -> usize {
107     (1 << T::ALIGN.trailing_zeros()) - 1
108 }
109 
110 /// Panics if the pointer is not properly unaligned.
111 #[inline]
ensure_aligned<T: ?Sized + Pointable>(raw: usize)112 fn ensure_aligned<T: ?Sized + Pointable>(raw: usize) {
113     assert_eq!(raw & low_bits::<T>(), 0, "unaligned pointer");
114 }
115 
116 /// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
117 ///
118 /// `tag` is truncated to fit into the unused bits of the pointer to `T`.
119 #[inline]
compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize120 fn compose_tag<T: ?Sized + Pointable>(data: usize, tag: usize) -> usize {
121     (data & !low_bits::<T>()) | (tag & low_bits::<T>())
122 }
123 
124 /// Decomposes a tagged pointer `data` into the pointer and the tag.
125 #[inline]
decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize)126 fn decompose_tag<T: ?Sized + Pointable>(data: usize) -> (usize, usize) {
127     (data & !low_bits::<T>(), data & low_bits::<T>())
128 }
129 
130 /// Types that are pointed to by a single word.
131 ///
132 /// In concurrent programming, it is necessary to represent an object within a word because atomic
133 /// operations (e.g., reads, writes, read-modify-writes) support only single words.  This trait
134 /// qualifies such types that are pointed to by a single word.
135 ///
136 /// The trait generalizes `Box<T>` for a sized type `T`.  In a box, an object of type `T` is
137 /// allocated in heap and it is owned by a single-word pointer.  This trait is also implemented for
138 /// `[MaybeUninit<T>]` by storing its size along with its elements and pointing to the pair of array
139 /// size and elements.
140 ///
141 /// Pointers to `Pointable` types can be stored in [`Atomic`], [`Owned`], and [`Shared`].  In
142 /// particular, Crossbeam supports dynamically sized slices as follows.
143 ///
144 /// ```
145 /// use std::mem::MaybeUninit;
146 /// use crossbeam_epoch::Owned;
147 ///
148 /// let o = Owned::<[MaybeUninit<i32>]>::init(10); // allocating [i32; 10]
149 /// ```
150 pub trait Pointable {
151     /// The alignment of pointer.
152     const ALIGN: usize;
153 
154     /// The type for initializers.
155     type Init;
156 
157     /// Initializes a with the given initializer.
158     ///
159     /// # Safety
160     ///
161     /// The result should be a multiple of `ALIGN`.
init(init: Self::Init) -> usize162     unsafe fn init(init: Self::Init) -> usize;
163 
164     /// Dereferences the given pointer.
165     ///
166     /// # Safety
167     ///
168     /// - The given `ptr` should have been initialized with [`Pointable::init`].
169     /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
170     /// - `ptr` should not be mutably dereferenced by [`Pointable::deref_mut`] concurrently.
deref<'a>(ptr: usize) -> &'a Self171     unsafe fn deref<'a>(ptr: usize) -> &'a Self;
172 
173     /// Mutably dereferences the given pointer.
174     ///
175     /// # Safety
176     ///
177     /// - The given `ptr` should have been initialized with [`Pointable::init`].
178     /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
179     /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
180     ///   concurrently.
deref_mut<'a>(ptr: usize) -> &'a mut Self181     unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self;
182 
183     /// Drops the object pointed to by the given pointer.
184     ///
185     /// # Safety
186     ///
187     /// - The given `ptr` should have been initialized with [`Pointable::init`].
188     /// - `ptr` should not have yet been dropped by [`Pointable::drop`].
189     /// - `ptr` should not be dereferenced by [`Pointable::deref`] or [`Pointable::deref_mut`]
190     ///   concurrently.
drop(ptr: usize)191     unsafe fn drop(ptr: usize);
192 }
193 
194 impl<T> Pointable for T {
195     const ALIGN: usize = mem::align_of::<T>();
196 
197     type Init = T;
198 
init(init: Self::Init) -> usize199     unsafe fn init(init: Self::Init) -> usize {
200         Box::into_raw(Box::new(init)) as usize
201     }
202 
deref<'a>(ptr: usize) -> &'a Self203     unsafe fn deref<'a>(ptr: usize) -> &'a Self {
204         &*(ptr as *const T)
205     }
206 
deref_mut<'a>(ptr: usize) -> &'a mut Self207     unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
208         &mut *(ptr as *mut T)
209     }
210 
drop(ptr: usize)211     unsafe fn drop(ptr: usize) {
212         drop(Box::from_raw(ptr as *mut T));
213     }
214 }
215 
216 /// Array with size.
217 ///
218 /// # Memory layout
219 ///
220 /// An array consisting of size and elements:
221 ///
222 /// ```text
223 ///          elements
224 ///          |
225 ///          |
226 /// ------------------------------------
227 /// | size | 0 | 1 | 2 | 3 | 4 | 5 | 6 |
228 /// ------------------------------------
229 /// ```
230 ///
231 /// Its memory layout is different from that of `Box<[T]>` in that size is in the allocation (not
232 /// along with pointer as in `Box<[T]>`).
233 ///
234 /// Elements are not present in the type, but they will be in the allocation.
235 /// ```
236 ///
237 // TODO(@jeehoonkang): once we bump the minimum required Rust version to 1.44 or newer, use
238 // [`alloc::alloc::Layout::extend`] instead.
239 #[repr(C)]
240 struct Array<T> {
241     size: usize,
242     elements: [MaybeUninit<T>; 0],
243 }
244 
245 impl<T> Pointable for [MaybeUninit<T>] {
246     const ALIGN: usize = mem::align_of::<Array<T>>();
247 
248     type Init = usize;
249 
init(size: Self::Init) -> usize250     unsafe fn init(size: Self::Init) -> usize {
251         let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * size;
252         let align = mem::align_of::<Array<T>>();
253         let layout = alloc::Layout::from_size_align(size, align).unwrap();
254         let ptr = alloc::alloc(layout) as *mut Array<T>;
255         (*ptr).size = size;
256         ptr as usize
257     }
258 
deref<'a>(ptr: usize) -> &'a Self259     unsafe fn deref<'a>(ptr: usize) -> &'a Self {
260         let array = &*(ptr as *const Array<T>);
261         slice::from_raw_parts(array.elements.as_ptr() as *const _, array.size)
262     }
263 
deref_mut<'a>(ptr: usize) -> &'a mut Self264     unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut Self {
265         let array = &*(ptr as *mut Array<T>);
266         slice::from_raw_parts_mut(array.elements.as_ptr() as *mut _, array.size)
267     }
268 
drop(ptr: usize)269     unsafe fn drop(ptr: usize) {
270         let array = &*(ptr as *mut Array<T>);
271         let size = mem::size_of::<Array<T>>() + mem::size_of::<MaybeUninit<T>>() * array.size;
272         let align = mem::align_of::<Array<T>>();
273         let layout = alloc::Layout::from_size_align(size, align).unwrap();
274         alloc::dealloc(ptr as *mut u8, layout);
275     }
276 }
277 
278 /// An atomic pointer that can be safely shared between threads.
279 ///
280 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
281 /// least significant bits of the address. For example, the tag for a pointer to a sized type `T`
282 /// should be less than `(1 << mem::align_of::<T>().trailing_zeros())`.
283 ///
284 /// Any method that loads the pointer must be passed a reference to a [`Guard`].
285 ///
286 /// Crossbeam supports dynamically sized types.  See [`Pointable`] for details.
287 pub struct Atomic<T: ?Sized + Pointable> {
288     data: AtomicUsize,
289     _marker: PhantomData<*mut T>,
290 }
291 
292 unsafe impl<T: ?Sized + Pointable + Send + Sync> Send for Atomic<T> {}
293 unsafe impl<T: ?Sized + Pointable + Send + Sync> Sync for Atomic<T> {}
294 
295 impl<T> Atomic<T> {
296     /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
297     ///
298     /// # Examples
299     ///
300     /// ```
301     /// use crossbeam_epoch::Atomic;
302     ///
303     /// let a = Atomic::new(1234);
304     /// ```
new(init: T) -> Atomic<T>305     pub fn new(init: T) -> Atomic<T> {
306         Self::init(init)
307     }
308 }
309 
310 impl<T: ?Sized + Pointable> Atomic<T> {
311     /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
312     ///
313     /// # Examples
314     ///
315     /// ```
316     /// use crossbeam_epoch::Atomic;
317     ///
318     /// let a = Atomic::<i32>::init(1234);
319     /// ```
init(init: T::Init) -> Atomic<T>320     pub fn init(init: T::Init) -> Atomic<T> {
321         Self::from(Owned::init(init))
322     }
323 
324     /// Returns a new atomic pointer pointing to the tagged pointer `data`.
from_usize(data: usize) -> Self325     fn from_usize(data: usize) -> Self {
326         Self {
327             data: AtomicUsize::new(data),
328             _marker: PhantomData,
329         }
330     }
331 
332     /// Returns a new null atomic pointer.
333     ///
334     /// # Examples
335     ///
336     /// ```
337     /// use crossbeam_epoch::Atomic;
338     ///
339     /// let a = Atomic::<i32>::null();
340     /// ```
341     ///
342     #[cfg_attr(all(feature = "nightly", not(crossbeam_loom)), const_fn::const_fn)]
null() -> Atomic<T>343     pub fn null() -> Atomic<T> {
344         Self {
345             data: AtomicUsize::new(0),
346             _marker: PhantomData,
347         }
348     }
349 
350     /// Loads a `Shared` from the atomic pointer.
351     ///
352     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
353     /// operation.
354     ///
355     /// # Examples
356     ///
357     /// ```
358     /// use crossbeam_epoch::{self as epoch, Atomic};
359     /// use std::sync::atomic::Ordering::SeqCst;
360     ///
361     /// let a = Atomic::new(1234);
362     /// let guard = &epoch::pin();
363     /// let p = a.load(SeqCst, guard);
364     /// ```
load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T>365     pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
366         unsafe { Shared::from_usize(self.data.load(ord)) }
367     }
368 
369     /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
370     ///
371     /// This is similar to the "acquire" ordering, except that an ordering is
372     /// only guaranteed with operations that "depend on" the result of the load.
373     /// However consume loads are usually much faster than acquire loads on
374     /// architectures with a weak memory model since they don't require memory
375     /// fence instructions.
376     ///
377     /// The exact definition of "depend on" is a bit vague, but it works as you
378     /// would expect in practice since a lot of software, especially the Linux
379     /// kernel, rely on this behavior.
380     ///
381     /// # Examples
382     ///
383     /// ```
384     /// use crossbeam_epoch::{self as epoch, Atomic};
385     ///
386     /// let a = Atomic::new(1234);
387     /// let guard = &epoch::pin();
388     /// let p = a.load_consume(guard);
389     /// ```
load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T>390     pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
391         unsafe { Shared::from_usize(self.data.load_consume()) }
392     }
393 
394     /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
395     ///
396     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
397     /// operation.
398     ///
399     /// # Examples
400     ///
401     /// ```
402     /// use crossbeam_epoch::{Atomic, Owned, Shared};
403     /// use std::sync::atomic::Ordering::SeqCst;
404     ///
405     /// let a = Atomic::new(1234);
406     /// a.store(Shared::null(), SeqCst);
407     /// a.store(Owned::new(1234), SeqCst);
408     /// ```
store<P: Pointer<T>>(&self, new: P, ord: Ordering)409     pub fn store<P: Pointer<T>>(&self, new: P, ord: Ordering) {
410         self.data.store(new.into_usize(), ord);
411     }
412 
413     /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
414     /// `Shared`.
415     ///
416     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
417     /// operation.
418     ///
419     /// # Examples
420     ///
421     /// ```
422     /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
423     /// use std::sync::atomic::Ordering::SeqCst;
424     ///
425     /// let a = Atomic::new(1234);
426     /// let guard = &epoch::pin();
427     /// let p = a.swap(Shared::null(), SeqCst, guard);
428     /// ```
swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T>429     pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
430         unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
431     }
432 
433     /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
434     /// value is the same as `current`. The tag is also taken into account, so two pointers to the
435     /// same object, but with different tags, will not be considered equal.
436     ///
437     /// The return value is a result indicating whether the new pointer was written. On success the
438     /// pointer that was written is returned. On failure the actual current value and `new` are
439     /// returned.
440     ///
441     /// This method takes two `Ordering` arguments to describe the memory
442     /// ordering of this operation. `success` describes the required ordering for the
443     /// read-modify-write operation that takes place if the comparison with `current` succeeds.
444     /// `failure` describes the required ordering for the load operation that takes place when
445     /// the comparison fails. Using `Acquire` as success ordering makes the store part
446     /// of this operation `Relaxed`, and using `Release` makes the successful load
447     /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
448     /// and must be equivalent to or weaker than the success ordering.
449     ///
450     /// # Examples
451     ///
452     /// ```
453     /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
454     /// use std::sync::atomic::Ordering::SeqCst;
455     ///
456     /// let a = Atomic::new(1234);
457     ///
458     /// let guard = &epoch::pin();
459     /// let curr = a.load(SeqCst, guard);
460     /// let res1 = a.compare_exchange(curr, Shared::null(), SeqCst, SeqCst, guard);
461     /// let res2 = a.compare_exchange(curr, Owned::new(5678), SeqCst, SeqCst, guard);
462     /// ```
compare_exchange<'g, P>( &self, current: Shared<'_, T>, new: P, success: Ordering, failure: Ordering, _: &'g Guard, ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>> where P: Pointer<T>,463     pub fn compare_exchange<'g, P>(
464         &self,
465         current: Shared<'_, T>,
466         new: P,
467         success: Ordering,
468         failure: Ordering,
469         _: &'g Guard,
470     ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
471     where
472         P: Pointer<T>,
473     {
474         let new = new.into_usize();
475         self.data
476             .compare_exchange(current.into_usize(), new, success, failure)
477             .map(|_| unsafe { Shared::from_usize(new) })
478             .map_err(|current| unsafe {
479                 CompareExchangeError {
480                     current: Shared::from_usize(current),
481                     new: P::from_usize(new),
482                 }
483             })
484     }
485 
486     /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
487     /// value is the same as `current`. The tag is also taken into account, so two pointers to the
488     /// same object, but with different tags, will not be considered equal.
489     ///
490     /// Unlike [`compare_exchange`], this method is allowed to spuriously fail even when comparison
491     /// succeeds, which can result in more efficient code on some platforms.  The return value is a
492     /// result indicating whether the new pointer was written. On success the pointer that was
493     /// written is returned. On failure the actual current value and `new` are returned.
494     ///
495     /// This method takes two `Ordering` arguments to describe the memory
496     /// ordering of this operation. `success` describes the required ordering for the
497     /// read-modify-write operation that takes place if the comparison with `current` succeeds.
498     /// `failure` describes the required ordering for the load operation that takes place when
499     /// the comparison fails. Using `Acquire` as success ordering makes the store part
500     /// of this operation `Relaxed`, and using `Release` makes the successful load
501     /// `Relaxed`. The failure ordering can only be `SeqCst`, `Acquire` or `Relaxed`
502     /// and must be equivalent to or weaker than the success ordering.
503     ///
504     /// [`compare_exchange`]: Atomic::compare_exchange
505     ///
506     /// # Examples
507     ///
508     /// ```
509     /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
510     /// use std::sync::atomic::Ordering::SeqCst;
511     ///
512     /// let a = Atomic::new(1234);
513     /// let guard = &epoch::pin();
514     ///
515     /// let mut new = Owned::new(5678);
516     /// let mut ptr = a.load(SeqCst, guard);
517     /// loop {
518     ///     match a.compare_exchange_weak(ptr, new, SeqCst, SeqCst, guard) {
519     ///         Ok(p) => {
520     ///             ptr = p;
521     ///             break;
522     ///         }
523     ///         Err(err) => {
524     ///             ptr = err.current;
525     ///             new = err.new;
526     ///         }
527     ///     }
528     /// }
529     ///
530     /// let mut curr = a.load(SeqCst, guard);
531     /// loop {
532     ///     match a.compare_exchange_weak(curr, Shared::null(), SeqCst, SeqCst, guard) {
533     ///         Ok(_) => break,
534     ///         Err(err) => curr = err.current,
535     ///     }
536     /// }
537     /// ```
compare_exchange_weak<'g, P>( &self, current: Shared<'_, T>, new: P, success: Ordering, failure: Ordering, _: &'g Guard, ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>> where P: Pointer<T>,538     pub fn compare_exchange_weak<'g, P>(
539         &self,
540         current: Shared<'_, T>,
541         new: P,
542         success: Ordering,
543         failure: Ordering,
544         _: &'g Guard,
545     ) -> Result<Shared<'g, T>, CompareExchangeError<'g, T, P>>
546     where
547         P: Pointer<T>,
548     {
549         let new = new.into_usize();
550         self.data
551             .compare_exchange_weak(current.into_usize(), new, success, failure)
552             .map(|_| unsafe { Shared::from_usize(new) })
553             .map_err(|current| unsafe {
554                 CompareExchangeError {
555                     current: Shared::from_usize(current),
556                     new: P::from_usize(new),
557                 }
558             })
559     }
560 
561     /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
562     /// value is the same as `current`. The tag is also taken into account, so two pointers to the
563     /// same object, but with different tags, will not be considered equal.
564     ///
565     /// The return value is a result indicating whether the new pointer was written. On success the
566     /// pointer that was written is returned. On failure the actual current value and `new` are
567     /// returned.
568     ///
569     /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
570     /// ordering of this operation.
571     ///
572     /// # Migrating to `compare_exchange`
573     ///
574     /// `compare_and_set` is equivalent to `compare_exchange` with the following mapping for
575     /// memory orderings:
576     ///
577     /// Original | Success | Failure
578     /// -------- | ------- | -------
579     /// Relaxed  | Relaxed | Relaxed
580     /// Acquire  | Acquire | Acquire
581     /// Release  | Release | Relaxed
582     /// AcqRel   | AcqRel  | Acquire
583     /// SeqCst   | SeqCst  | SeqCst
584     ///
585     /// # Examples
586     ///
587     /// ```
588     /// # #![allow(deprecated)]
589     /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
590     /// use std::sync::atomic::Ordering::SeqCst;
591     ///
592     /// let a = Atomic::new(1234);
593     ///
594     /// let guard = &epoch::pin();
595     /// let curr = a.load(SeqCst, guard);
596     /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
597     /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
598     /// ```
599     // TODO: remove in the next major version.
600     #[allow(deprecated)]
601     #[deprecated(note = "Use `compare_exchange` instead")]
compare_and_set<'g, O, P>( &self, current: Shared<'_, T>, new: P, ord: O, guard: &'g Guard, ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>> where O: CompareAndSetOrdering, P: Pointer<T>,602     pub fn compare_and_set<'g, O, P>(
603         &self,
604         current: Shared<'_, T>,
605         new: P,
606         ord: O,
607         guard: &'g Guard,
608     ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
609     where
610         O: CompareAndSetOrdering,
611         P: Pointer<T>,
612     {
613         self.compare_exchange(current, new, ord.success(), ord.failure(), guard)
614     }
615 
616     /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
617     /// value is the same as `current`. The tag is also taken into account, so two pointers to the
618     /// same object, but with different tags, will not be considered equal.
619     ///
620     /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
621     /// succeeds, which can result in more efficient code on some platforms.  The return value is a
622     /// result indicating whether the new pointer was written. On success the pointer that was
623     /// written is returned. On failure the actual current value and `new` are returned.
624     ///
625     /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
626     /// ordering of this operation.
627     ///
628     /// [`compare_and_set`]: Atomic::compare_and_set
629     ///
630     /// # Migrating to `compare_exchange_weak`
631     ///
632     /// `compare_and_set_weak` is equivalent to `compare_exchange_weak` with the following mapping for
633     /// memory orderings:
634     ///
635     /// Original | Success | Failure
636     /// -------- | ------- | -------
637     /// Relaxed  | Relaxed | Relaxed
638     /// Acquire  | Acquire | Acquire
639     /// Release  | Release | Relaxed
640     /// AcqRel   | AcqRel  | Acquire
641     /// SeqCst   | SeqCst  | SeqCst
642     ///
643     /// # Examples
644     ///
645     /// ```
646     /// # #![allow(deprecated)]
647     /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
648     /// use std::sync::atomic::Ordering::SeqCst;
649     ///
650     /// let a = Atomic::new(1234);
651     /// let guard = &epoch::pin();
652     ///
653     /// let mut new = Owned::new(5678);
654     /// let mut ptr = a.load(SeqCst, guard);
655     /// loop {
656     ///     match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
657     ///         Ok(p) => {
658     ///             ptr = p;
659     ///             break;
660     ///         }
661     ///         Err(err) => {
662     ///             ptr = err.current;
663     ///             new = err.new;
664     ///         }
665     ///     }
666     /// }
667     ///
668     /// let mut curr = a.load(SeqCst, guard);
669     /// loop {
670     ///     match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
671     ///         Ok(_) => break,
672     ///         Err(err) => curr = err.current,
673     ///     }
674     /// }
675     /// ```
676     // TODO: remove in the next major version.
677     #[allow(deprecated)]
678     #[deprecated(note = "Use `compare_exchange_weak` instead")]
compare_and_set_weak<'g, O, P>( &self, current: Shared<'_, T>, new: P, ord: O, guard: &'g Guard, ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>> where O: CompareAndSetOrdering, P: Pointer<T>,679     pub fn compare_and_set_weak<'g, O, P>(
680         &self,
681         current: Shared<'_, T>,
682         new: P,
683         ord: O,
684         guard: &'g Guard,
685     ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
686     where
687         O: CompareAndSetOrdering,
688         P: Pointer<T>,
689     {
690         self.compare_exchange_weak(current, new, ord.success(), ord.failure(), guard)
691     }
692 
693     /// Bitwise "and" with the current tag.
694     ///
695     /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
696     /// new tag to the result. Returns the previous pointer.
697     ///
698     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
699     /// operation.
700     ///
701     /// # Examples
702     ///
703     /// ```
704     /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
705     /// use std::sync::atomic::Ordering::SeqCst;
706     ///
707     /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
708     /// let guard = &epoch::pin();
709     /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
710     /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
711     /// ```
fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T>712     pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
713         unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
714     }
715 
716     /// Bitwise "or" with the current tag.
717     ///
718     /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
719     /// new tag to the result. Returns the previous pointer.
720     ///
721     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
722     /// operation.
723     ///
724     /// # Examples
725     ///
726     /// ```
727     /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
728     /// use std::sync::atomic::Ordering::SeqCst;
729     ///
730     /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
731     /// let guard = &epoch::pin();
732     /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
733     /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
734     /// ```
fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T>735     pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
736         unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
737     }
738 
739     /// Bitwise "xor" with the current tag.
740     ///
741     /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
742     /// new tag to the result. Returns the previous pointer.
743     ///
744     /// This method takes an [`Ordering`] argument which describes the memory ordering of this
745     /// operation.
746     ///
747     /// # Examples
748     ///
749     /// ```
750     /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
751     /// use std::sync::atomic::Ordering::SeqCst;
752     ///
753     /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
754     /// let guard = &epoch::pin();
755     /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
756     /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
757     /// ```
fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T>758     pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
759         unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
760     }
761 
762     /// Takes ownership of the pointee.
763     ///
764     /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
765     /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
766     /// destructors of data structures.
767     ///
768     /// # Panics
769     ///
770     /// Panics if this pointer is null, but only in debug mode.
771     ///
772     /// # Safety
773     ///
774     /// This method may be called only if the pointer is valid and nobody else is holding a
775     /// reference to the same object.
776     ///
777     /// # Examples
778     ///
779     /// ```rust
780     /// # use std::mem;
781     /// # use crossbeam_epoch::Atomic;
782     /// struct DataStructure {
783     ///     ptr: Atomic<usize>,
784     /// }
785     ///
786     /// impl Drop for DataStructure {
787     ///     fn drop(&mut self) {
788     ///         // By now the DataStructure lives only in our thread and we are sure we don't hold
789     ///         // any Shared or & to it ourselves.
790     ///         unsafe {
791     ///             drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
792     ///         }
793     ///     }
794     /// }
795     /// ```
into_owned(self) -> Owned<T>796     pub unsafe fn into_owned(self) -> Owned<T> {
797         #[cfg(crossbeam_loom)]
798         {
799             // FIXME: loom does not yet support into_inner, so we use unsync_load for now,
800             // which should have the same synchronization properties:
801             // https://github.com/tokio-rs/loom/issues/117
802             Owned::from_usize(self.data.unsync_load())
803         }
804         #[cfg(not(crossbeam_loom))]
805         {
806             Owned::from_usize(self.data.into_inner())
807         }
808     }
809 }
810 
811 impl<T: ?Sized + Pointable> fmt::Debug for Atomic<T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result812     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
813         let data = self.data.load(Ordering::SeqCst);
814         let (raw, tag) = decompose_tag::<T>(data);
815 
816         f.debug_struct("Atomic")
817             .field("raw", &raw)
818             .field("tag", &tag)
819             .finish()
820     }
821 }
822 
823 impl<T: ?Sized + Pointable> fmt::Pointer for Atomic<T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result824     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
825         let data = self.data.load(Ordering::SeqCst);
826         let (raw, _) = decompose_tag::<T>(data);
827         fmt::Pointer::fmt(&(unsafe { T::deref(raw) as *const _ }), f)
828     }
829 }
830 
831 impl<T: ?Sized + Pointable> Clone for Atomic<T> {
832     /// Returns a copy of the atomic value.
833     ///
834     /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
835     /// atomics or fences.
clone(&self) -> Self836     fn clone(&self) -> Self {
837         let data = self.data.load(Ordering::Relaxed);
838         Atomic::from_usize(data)
839     }
840 }
841 
842 impl<T: ?Sized + Pointable> Default for Atomic<T> {
default() -> Self843     fn default() -> Self {
844         Atomic::null()
845     }
846 }
847 
848 impl<T: ?Sized + Pointable> From<Owned<T>> for Atomic<T> {
849     /// Returns a new atomic pointer pointing to `owned`.
850     ///
851     /// # Examples
852     ///
853     /// ```
854     /// use crossbeam_epoch::{Atomic, Owned};
855     ///
856     /// let a = Atomic::<i32>::from(Owned::new(1234));
857     /// ```
from(owned: Owned<T>) -> Self858     fn from(owned: Owned<T>) -> Self {
859         let data = owned.data;
860         mem::forget(owned);
861         Self::from_usize(data)
862     }
863 }
864 
865 impl<T> From<Box<T>> for Atomic<T> {
from(b: Box<T>) -> Self866     fn from(b: Box<T>) -> Self {
867         Self::from(Owned::from(b))
868     }
869 }
870 
871 impl<T> From<T> for Atomic<T> {
from(t: T) -> Self872     fn from(t: T) -> Self {
873         Self::new(t)
874     }
875 }
876 
877 impl<'g, T: ?Sized + Pointable> From<Shared<'g, T>> for Atomic<T> {
878     /// Returns a new atomic pointer pointing to `ptr`.
879     ///
880     /// # Examples
881     ///
882     /// ```
883     /// use crossbeam_epoch::{Atomic, Shared};
884     ///
885     /// let a = Atomic::<i32>::from(Shared::<i32>::null());
886     /// ```
from(ptr: Shared<'g, T>) -> Self887     fn from(ptr: Shared<'g, T>) -> Self {
888         Self::from_usize(ptr.data)
889     }
890 }
891 
892 impl<T> From<*const T> for Atomic<T> {
893     /// Returns a new atomic pointer pointing to `raw`.
894     ///
895     /// # Examples
896     ///
897     /// ```
898     /// use std::ptr;
899     /// use crossbeam_epoch::Atomic;
900     ///
901     /// let a = Atomic::<i32>::from(ptr::null::<i32>());
902     /// ```
from(raw: *const T) -> Self903     fn from(raw: *const T) -> Self {
904         Self::from_usize(raw as usize)
905     }
906 }
907 
908 /// A trait for either `Owned` or `Shared` pointers.
909 pub trait Pointer<T: ?Sized + Pointable> {
910     /// Returns the machine representation of the pointer.
into_usize(self) -> usize911     fn into_usize(self) -> usize;
912 
913     /// Returns a new pointer pointing to the tagged pointer `data`.
914     ///
915     /// # Safety
916     ///
917     /// The given `data` should have been created by `Pointer::into_usize()`, and one `data` should
918     /// not be converted back by `Pointer::from_usize()` multiple times.
from_usize(data: usize) -> Self919     unsafe fn from_usize(data: usize) -> Self;
920 }
921 
922 /// An owned heap-allocated object.
923 ///
924 /// This type is very similar to `Box<T>`.
925 ///
926 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
927 /// least significant bits of the address.
928 pub struct Owned<T: ?Sized + Pointable> {
929     data: usize,
930     _marker: PhantomData<Box<T>>,
931 }
932 
933 impl<T: ?Sized + Pointable> Pointer<T> for Owned<T> {
934     #[inline]
into_usize(self) -> usize935     fn into_usize(self) -> usize {
936         let data = self.data;
937         mem::forget(self);
938         data
939     }
940 
941     /// Returns a new pointer pointing to the tagged pointer `data`.
942     ///
943     /// # Panics
944     ///
945     /// Panics if the data is zero in debug mode.
946     #[inline]
from_usize(data: usize) -> Self947     unsafe fn from_usize(data: usize) -> Self {
948         debug_assert!(data != 0, "converting zero into `Owned`");
949         Owned {
950             data,
951             _marker: PhantomData,
952         }
953     }
954 }
955 
956 impl<T> Owned<T> {
957     /// Returns a new owned pointer pointing to `raw`.
958     ///
959     /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
960     /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
961     /// the same raw pointer.
962     ///
963     /// # Panics
964     ///
965     /// Panics if `raw` is not properly aligned.
966     ///
967     /// # Safety
968     ///
969     /// The given `raw` should have been derived from `Owned`, and one `raw` should not be converted
970     /// back by `Owned::from_raw()` multiple times.
971     ///
972     /// # Examples
973     ///
974     /// ```
975     /// use crossbeam_epoch::Owned;
976     ///
977     /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
978     /// ```
from_raw(raw: *mut T) -> Owned<T>979     pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
980         let raw = raw as usize;
981         ensure_aligned::<T>(raw);
982         Self::from_usize(raw)
983     }
984 
985     /// Converts the owned pointer into a `Box`.
986     ///
987     /// # Examples
988     ///
989     /// ```
990     /// use crossbeam_epoch::Owned;
991     ///
992     /// let o = Owned::new(1234);
993     /// let b: Box<i32> = o.into_box();
994     /// assert_eq!(*b, 1234);
995     /// ```
into_box(self) -> Box<T>996     pub fn into_box(self) -> Box<T> {
997         let (raw, _) = decompose_tag::<T>(self.data);
998         mem::forget(self);
999         unsafe { Box::from_raw(raw as *mut _) }
1000     }
1001 
1002     /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1003     ///
1004     /// # Examples
1005     ///
1006     /// ```
1007     /// use crossbeam_epoch::Owned;
1008     ///
1009     /// let o = Owned::new(1234);
1010     /// ```
new(init: T) -> Owned<T>1011     pub fn new(init: T) -> Owned<T> {
1012         Self::init(init)
1013     }
1014 }
1015 
1016 impl<T: ?Sized + Pointable> Owned<T> {
1017     /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
1018     ///
1019     /// # Examples
1020     ///
1021     /// ```
1022     /// use crossbeam_epoch::Owned;
1023     ///
1024     /// let o = Owned::<i32>::init(1234);
1025     /// ```
init(init: T::Init) -> Owned<T>1026     pub fn init(init: T::Init) -> Owned<T> {
1027         unsafe { Self::from_usize(T::init(init)) }
1028     }
1029 
1030     /// Converts the owned pointer into a [`Shared`].
1031     ///
1032     /// # Examples
1033     ///
1034     /// ```
1035     /// use crossbeam_epoch::{self as epoch, Owned};
1036     ///
1037     /// let o = Owned::new(1234);
1038     /// let guard = &epoch::pin();
1039     /// let p = o.into_shared(guard);
1040     /// ```
1041     #[allow(clippy::needless_lifetimes)]
into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T>1042     pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
1043         unsafe { Shared::from_usize(self.into_usize()) }
1044     }
1045 
1046     /// Returns the tag stored within the pointer.
1047     ///
1048     /// # Examples
1049     ///
1050     /// ```
1051     /// use crossbeam_epoch::Owned;
1052     ///
1053     /// assert_eq!(Owned::new(1234).tag(), 0);
1054     /// ```
tag(&self) -> usize1055     pub fn tag(&self) -> usize {
1056         let (_, tag) = decompose_tag::<T>(self.data);
1057         tag
1058     }
1059 
1060     /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1061     /// unused bits of the pointer to `T`.
1062     ///
1063     /// # Examples
1064     ///
1065     /// ```
1066     /// use crossbeam_epoch::Owned;
1067     ///
1068     /// let o = Owned::new(0u64);
1069     /// assert_eq!(o.tag(), 0);
1070     /// let o = o.with_tag(2);
1071     /// assert_eq!(o.tag(), 2);
1072     /// ```
with_tag(self, tag: usize) -> Owned<T>1073     pub fn with_tag(self, tag: usize) -> Owned<T> {
1074         let data = self.into_usize();
1075         unsafe { Self::from_usize(compose_tag::<T>(data, tag)) }
1076     }
1077 }
1078 
1079 impl<T: ?Sized + Pointable> Drop for Owned<T> {
drop(&mut self)1080     fn drop(&mut self) {
1081         let (raw, _) = decompose_tag::<T>(self.data);
1082         unsafe {
1083             T::drop(raw);
1084         }
1085     }
1086 }
1087 
1088 impl<T: ?Sized + Pointable> fmt::Debug for Owned<T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1089     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1090         let (raw, tag) = decompose_tag::<T>(self.data);
1091 
1092         f.debug_struct("Owned")
1093             .field("raw", &raw)
1094             .field("tag", &tag)
1095             .finish()
1096     }
1097 }
1098 
1099 impl<T: Clone> Clone for Owned<T> {
clone(&self) -> Self1100     fn clone(&self) -> Self {
1101         Owned::new((**self).clone()).with_tag(self.tag())
1102     }
1103 }
1104 
1105 impl<T: ?Sized + Pointable> Deref for Owned<T> {
1106     type Target = T;
1107 
deref(&self) -> &T1108     fn deref(&self) -> &T {
1109         let (raw, _) = decompose_tag::<T>(self.data);
1110         unsafe { T::deref(raw) }
1111     }
1112 }
1113 
1114 impl<T: ?Sized + Pointable> DerefMut for Owned<T> {
deref_mut(&mut self) -> &mut T1115     fn deref_mut(&mut self) -> &mut T {
1116         let (raw, _) = decompose_tag::<T>(self.data);
1117         unsafe { T::deref_mut(raw) }
1118     }
1119 }
1120 
1121 impl<T> From<T> for Owned<T> {
from(t: T) -> Self1122     fn from(t: T) -> Self {
1123         Owned::new(t)
1124     }
1125 }
1126 
1127 impl<T> From<Box<T>> for Owned<T> {
1128     /// Returns a new owned pointer pointing to `b`.
1129     ///
1130     /// # Panics
1131     ///
1132     /// Panics if the pointer (the `Box`) is not properly aligned.
1133     ///
1134     /// # Examples
1135     ///
1136     /// ```
1137     /// use crossbeam_epoch::Owned;
1138     ///
1139     /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
1140     /// ```
from(b: Box<T>) -> Self1141     fn from(b: Box<T>) -> Self {
1142         unsafe { Self::from_raw(Box::into_raw(b)) }
1143     }
1144 }
1145 
1146 impl<T: ?Sized + Pointable> Borrow<T> for Owned<T> {
borrow(&self) -> &T1147     fn borrow(&self) -> &T {
1148         self.deref()
1149     }
1150 }
1151 
1152 impl<T: ?Sized + Pointable> BorrowMut<T> for Owned<T> {
borrow_mut(&mut self) -> &mut T1153     fn borrow_mut(&mut self) -> &mut T {
1154         self.deref_mut()
1155     }
1156 }
1157 
1158 impl<T: ?Sized + Pointable> AsRef<T> for Owned<T> {
as_ref(&self) -> &T1159     fn as_ref(&self) -> &T {
1160         self.deref()
1161     }
1162 }
1163 
1164 impl<T: ?Sized + Pointable> AsMut<T> for Owned<T> {
as_mut(&mut self) -> &mut T1165     fn as_mut(&mut self) -> &mut T {
1166         self.deref_mut()
1167     }
1168 }
1169 
1170 /// A pointer to an object protected by the epoch GC.
1171 ///
1172 /// The pointer is valid for use only during the lifetime `'g`.
1173 ///
1174 /// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
1175 /// least significant bits of the address.
1176 pub struct Shared<'g, T: 'g + ?Sized + Pointable> {
1177     data: usize,
1178     _marker: PhantomData<(&'g (), *const T)>,
1179 }
1180 
1181 impl<T: ?Sized + Pointable> Clone for Shared<'_, T> {
clone(&self) -> Self1182     fn clone(&self) -> Self {
1183         Self {
1184             data: self.data,
1185             _marker: PhantomData,
1186         }
1187     }
1188 }
1189 
1190 impl<T: ?Sized + Pointable> Copy for Shared<'_, T> {}
1191 
1192 impl<T: ?Sized + Pointable> Pointer<T> for Shared<'_, T> {
1193     #[inline]
into_usize(self) -> usize1194     fn into_usize(self) -> usize {
1195         self.data
1196     }
1197 
1198     #[inline]
from_usize(data: usize) -> Self1199     unsafe fn from_usize(data: usize) -> Self {
1200         Shared {
1201             data,
1202             _marker: PhantomData,
1203         }
1204     }
1205 }
1206 
1207 impl<'g, T> Shared<'g, T> {
1208     /// Converts the pointer to a raw pointer (without the tag).
1209     ///
1210     /// # Examples
1211     ///
1212     /// ```
1213     /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1214     /// use std::sync::atomic::Ordering::SeqCst;
1215     ///
1216     /// let o = Owned::new(1234);
1217     /// let raw = &*o as *const _;
1218     /// let a = Atomic::from(o);
1219     ///
1220     /// let guard = &epoch::pin();
1221     /// let p = a.load(SeqCst, guard);
1222     /// assert_eq!(p.as_raw(), raw);
1223     /// ```
1224     #[allow(clippy::trivially_copy_pass_by_ref)]
as_raw(&self) -> *const T1225     pub fn as_raw(&self) -> *const T {
1226         let (raw, _) = decompose_tag::<T>(self.data);
1227         raw as *const _
1228     }
1229 }
1230 
1231 impl<'g, T: ?Sized + Pointable> Shared<'g, T> {
1232     /// Returns a new null pointer.
1233     ///
1234     /// # Examples
1235     ///
1236     /// ```
1237     /// use crossbeam_epoch::Shared;
1238     ///
1239     /// let p = Shared::<i32>::null();
1240     /// assert!(p.is_null());
1241     /// ```
null() -> Shared<'g, T>1242     pub fn null() -> Shared<'g, T> {
1243         Shared {
1244             data: 0,
1245             _marker: PhantomData,
1246         }
1247     }
1248 
1249     /// Returns `true` if the pointer is null.
1250     ///
1251     /// # Examples
1252     ///
1253     /// ```
1254     /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1255     /// use std::sync::atomic::Ordering::SeqCst;
1256     ///
1257     /// let a = Atomic::null();
1258     /// let guard = &epoch::pin();
1259     /// assert!(a.load(SeqCst, guard).is_null());
1260     /// a.store(Owned::new(1234), SeqCst);
1261     /// assert!(!a.load(SeqCst, guard).is_null());
1262     /// ```
1263     #[allow(clippy::trivially_copy_pass_by_ref)]
is_null(&self) -> bool1264     pub fn is_null(&self) -> bool {
1265         let (raw, _) = decompose_tag::<T>(self.data);
1266         raw == 0
1267     }
1268 
1269     /// Dereferences the pointer.
1270     ///
1271     /// Returns a reference to the pointee that is valid during the lifetime `'g`.
1272     ///
1273     /// # Safety
1274     ///
1275     /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1276     ///
1277     /// Another concern is the possibility of data races due to lack of proper synchronization.
1278     /// For example, consider the following scenario:
1279     ///
1280     /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1281     /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1282     ///
1283     /// The problem is that relaxed orderings don't synchronize initialization of the object with
1284     /// the read from the second thread. This is a data race. A possible solution would be to use
1285     /// `Release` and `Acquire` orderings.
1286     ///
1287     /// # Examples
1288     ///
1289     /// ```
1290     /// use crossbeam_epoch::{self as epoch, Atomic};
1291     /// use std::sync::atomic::Ordering::SeqCst;
1292     ///
1293     /// let a = Atomic::new(1234);
1294     /// let guard = &epoch::pin();
1295     /// let p = a.load(SeqCst, guard);
1296     /// unsafe {
1297     ///     assert_eq!(p.deref(), &1234);
1298     /// }
1299     /// ```
1300     #[allow(clippy::trivially_copy_pass_by_ref)]
1301     #[allow(clippy::should_implement_trait)]
deref(&self) -> &'g T1302     pub unsafe fn deref(&self) -> &'g T {
1303         let (raw, _) = decompose_tag::<T>(self.data);
1304         T::deref(raw)
1305     }
1306 
1307     /// Dereferences the pointer.
1308     ///
1309     /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1310     ///
1311     /// # Safety
1312     ///
1313     /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1314     ///   actual object at the same time.
1315     ///
1316     ///   The user must know that there are no concurrent accesses towards the object itself.
1317     ///
1318     /// * Other than the above, all safety concerns of `deref()` applies here.
1319     ///
1320     /// # Examples
1321     ///
1322     /// ```
1323     /// use crossbeam_epoch::{self as epoch, Atomic};
1324     /// use std::sync::atomic::Ordering::SeqCst;
1325     ///
1326     /// let a = Atomic::new(vec![1, 2, 3, 4]);
1327     /// let guard = &epoch::pin();
1328     ///
1329     /// let mut p = a.load(SeqCst, guard);
1330     /// unsafe {
1331     ///     assert!(!p.is_null());
1332     ///     let b = p.deref_mut();
1333     ///     assert_eq!(b, &vec![1, 2, 3, 4]);
1334     ///     b.push(5);
1335     ///     assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1336     /// }
1337     ///
1338     /// let p = a.load(SeqCst, guard);
1339     /// unsafe {
1340     ///     assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1341     /// }
1342     /// ```
1343     #[allow(clippy::should_implement_trait)]
deref_mut(&mut self) -> &'g mut T1344     pub unsafe fn deref_mut(&mut self) -> &'g mut T {
1345         let (raw, _) = decompose_tag::<T>(self.data);
1346         T::deref_mut(raw)
1347     }
1348 
1349     /// Converts the pointer to a reference.
1350     ///
1351     /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1352     ///
1353     /// # Safety
1354     ///
1355     /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1356     ///
1357     /// Another concern is the possibility of data races due to lack of proper synchronization.
1358     /// For example, consider the following scenario:
1359     ///
1360     /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1361     /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1362     ///
1363     /// The problem is that relaxed orderings don't synchronize initialization of the object with
1364     /// the read from the second thread. This is a data race. A possible solution would be to use
1365     /// `Release` and `Acquire` orderings.
1366     ///
1367     /// # Examples
1368     ///
1369     /// ```
1370     /// use crossbeam_epoch::{self as epoch, Atomic};
1371     /// use std::sync::atomic::Ordering::SeqCst;
1372     ///
1373     /// let a = Atomic::new(1234);
1374     /// let guard = &epoch::pin();
1375     /// let p = a.load(SeqCst, guard);
1376     /// unsafe {
1377     ///     assert_eq!(p.as_ref(), Some(&1234));
1378     /// }
1379     /// ```
1380     #[allow(clippy::trivially_copy_pass_by_ref)]
as_ref(&self) -> Option<&'g T>1381     pub unsafe fn as_ref(&self) -> Option<&'g T> {
1382         let (raw, _) = decompose_tag::<T>(self.data);
1383         if raw == 0 {
1384             None
1385         } else {
1386             Some(T::deref(raw))
1387         }
1388     }
1389 
1390     /// Takes ownership of the pointee.
1391     ///
1392     /// # Panics
1393     ///
1394     /// Panics if this pointer is null, but only in debug mode.
1395     ///
1396     /// # Safety
1397     ///
1398     /// This method may be called only if the pointer is valid and nobody else is holding a
1399     /// reference to the same object.
1400     ///
1401     /// # Examples
1402     ///
1403     /// ```
1404     /// use crossbeam_epoch::{self as epoch, Atomic};
1405     /// use std::sync::atomic::Ordering::SeqCst;
1406     ///
1407     /// let a = Atomic::new(1234);
1408     /// unsafe {
1409     ///     let guard = &epoch::unprotected();
1410     ///     let p = a.load(SeqCst, guard);
1411     ///     drop(p.into_owned());
1412     /// }
1413     /// ```
into_owned(self) -> Owned<T>1414     pub unsafe fn into_owned(self) -> Owned<T> {
1415         debug_assert!(!self.is_null(), "converting a null `Shared` into `Owned`");
1416         Owned::from_usize(self.data)
1417     }
1418 
1419     /// Returns the tag stored within the pointer.
1420     ///
1421     /// # Examples
1422     ///
1423     /// ```
1424     /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1425     /// use std::sync::atomic::Ordering::SeqCst;
1426     ///
1427     /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1428     /// let guard = &epoch::pin();
1429     /// let p = a.load(SeqCst, guard);
1430     /// assert_eq!(p.tag(), 2);
1431     /// ```
1432     #[allow(clippy::trivially_copy_pass_by_ref)]
tag(&self) -> usize1433     pub fn tag(&self) -> usize {
1434         let (_, tag) = decompose_tag::<T>(self.data);
1435         tag
1436     }
1437 
1438     /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1439     /// unused bits of the pointer to `T`.
1440     ///
1441     /// # Examples
1442     ///
1443     /// ```
1444     /// use crossbeam_epoch::{self as epoch, Atomic};
1445     /// use std::sync::atomic::Ordering::SeqCst;
1446     ///
1447     /// let a = Atomic::new(0u64);
1448     /// let guard = &epoch::pin();
1449     /// let p1 = a.load(SeqCst, guard);
1450     /// let p2 = p1.with_tag(2);
1451     ///
1452     /// assert_eq!(p1.tag(), 0);
1453     /// assert_eq!(p2.tag(), 2);
1454     /// assert_eq!(p1.as_raw(), p2.as_raw());
1455     /// ```
1456     #[allow(clippy::trivially_copy_pass_by_ref)]
with_tag(&self, tag: usize) -> Shared<'g, T>1457     pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
1458         unsafe { Self::from_usize(compose_tag::<T>(self.data, tag)) }
1459     }
1460 }
1461 
1462 impl<T> From<*const T> for Shared<'_, T> {
1463     /// Returns a new pointer pointing to `raw`.
1464     ///
1465     /// # Panics
1466     ///
1467     /// Panics if `raw` is not properly aligned.
1468     ///
1469     /// # Examples
1470     ///
1471     /// ```
1472     /// use crossbeam_epoch::Shared;
1473     ///
1474     /// let p = Shared::from(Box::into_raw(Box::new(1234)) as *const _);
1475     /// assert!(!p.is_null());
1476     /// ```
from(raw: *const T) -> Self1477     fn from(raw: *const T) -> Self {
1478         let raw = raw as usize;
1479         ensure_aligned::<T>(raw);
1480         unsafe { Self::from_usize(raw) }
1481     }
1482 }
1483 
1484 impl<'g, T: ?Sized + Pointable> PartialEq<Shared<'g, T>> for Shared<'g, T> {
eq(&self, other: &Self) -> bool1485     fn eq(&self, other: &Self) -> bool {
1486         self.data == other.data
1487     }
1488 }
1489 
1490 impl<T: ?Sized + Pointable> Eq for Shared<'_, T> {}
1491 
1492 impl<'g, T: ?Sized + Pointable> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
partial_cmp(&self, other: &Self) -> Option<cmp::Ordering>1493     fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1494         self.data.partial_cmp(&other.data)
1495     }
1496 }
1497 
1498 impl<T: ?Sized + Pointable> Ord for Shared<'_, T> {
cmp(&self, other: &Self) -> cmp::Ordering1499     fn cmp(&self, other: &Self) -> cmp::Ordering {
1500         self.data.cmp(&other.data)
1501     }
1502 }
1503 
1504 impl<T: ?Sized + Pointable> fmt::Debug for Shared<'_, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1505     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1506         let (raw, tag) = decompose_tag::<T>(self.data);
1507 
1508         f.debug_struct("Shared")
1509             .field("raw", &raw)
1510             .field("tag", &tag)
1511             .finish()
1512     }
1513 }
1514 
1515 impl<T: ?Sized + Pointable> fmt::Pointer for Shared<'_, T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result1516     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1517         fmt::Pointer::fmt(&(unsafe { self.deref() as *const _ }), f)
1518     }
1519 }
1520 
1521 impl<T: ?Sized + Pointable> Default for Shared<'_, T> {
default() -> Self1522     fn default() -> Self {
1523         Shared::null()
1524     }
1525 }
1526 
1527 #[cfg(all(test, not(crossbeam_loom)))]
1528 mod tests {
1529     use super::Shared;
1530 
1531     #[test]
valid_tag_i8()1532     fn valid_tag_i8() {
1533         Shared::<i8>::null().with_tag(0);
1534     }
1535 
1536     #[test]
valid_tag_i64()1537     fn valid_tag_i64() {
1538         Shared::<i64>::null().with_tag(7);
1539     }
1540 
1541     #[cfg(feature = "nightly")]
1542     #[test]
const_atomic_null()1543     fn const_atomic_null() {
1544         use super::Atomic;
1545         const _: Atomic<u8> = Atomic::<u8>::null();
1546     }
1547 }
1548