• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use core::fmt;
2 use core::mem;
3 
4 use scopeguard::defer;
5 
6 use crate::atomic::Shared;
7 use crate::collector::Collector;
8 use crate::deferred::Deferred;
9 use crate::internal::Local;
10 
11 /// A guard that keeps the current thread pinned.
12 ///
13 /// # Pinning
14 ///
15 /// The current thread is pinned by calling [`pin`], which returns a new guard:
16 ///
17 /// ```
18 /// use crossbeam_epoch as epoch;
19 ///
20 /// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference.
21 /// // This is not really necessary, but makes passing references to the guard a bit easier.
22 /// let guard = &epoch::pin();
23 /// ```
24 ///
25 /// When a guard gets dropped, the current thread is automatically unpinned.
26 ///
27 /// # Pointers on the stack
28 ///
29 /// Having a guard allows us to create pointers on the stack to heap-allocated objects.
30 /// For example:
31 ///
32 /// ```
33 /// use crossbeam_epoch::{self as epoch, Atomic};
34 /// use std::sync::atomic::Ordering::SeqCst;
35 ///
36 /// // Create a heap-allocated number.
37 /// let a = Atomic::new(777);
38 ///
39 /// // Pin the current thread.
40 /// let guard = &epoch::pin();
41 ///
42 /// // Load the heap-allocated object and create pointer `p` on the stack.
43 /// let p = a.load(SeqCst, guard);
44 ///
45 /// // Dereference the pointer and print the value:
46 /// if let Some(num) = unsafe { p.as_ref() } {
47 ///     println!("The number is {}.", num);
48 /// }
49 /// # unsafe { drop(a.into_owned()); } // avoid leak
50 /// ```
51 ///
52 /// # Multiple guards
53 ///
54 /// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the
55 /// thread will actually be pinned only when the first guard is created and unpinned when the last
56 /// one is dropped:
57 ///
58 /// ```
59 /// use crossbeam_epoch as epoch;
60 ///
61 /// let guard1 = epoch::pin();
62 /// let guard2 = epoch::pin();
63 /// assert!(epoch::is_pinned());
64 /// drop(guard1);
65 /// assert!(epoch::is_pinned());
66 /// drop(guard2);
67 /// assert!(!epoch::is_pinned());
68 /// ```
69 ///
70 /// [`pin`]: super::pin
71 pub struct Guard {
72     pub(crate) local: *const Local,
73 }
74 
75 impl Guard {
76     /// Stores a function so that it can be executed at some point after all currently pinned
77     /// threads get unpinned.
78     ///
79     /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
80     /// becomes full, some functions are moved into the global cache. At the same time, some
81     /// functions from both local and global caches may get executed in order to incrementally
82     /// clean up the caches as they fill up.
83     ///
84     /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
85     /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
86     /// never run, but the epoch-based garbage collection will make an effort to execute it
87     /// reasonably soon.
88     ///
89     /// If this method is called from an [`unprotected`] guard, the function will simply be
90     /// executed immediately.
defer<F, R>(&self, f: F) where F: FnOnce() -> R, F: Send + 'static,91     pub fn defer<F, R>(&self, f: F)
92     where
93         F: FnOnce() -> R,
94         F: Send + 'static,
95     {
96         unsafe {
97             self.defer_unchecked(f);
98         }
99     }
100 
101     /// Stores a function so that it can be executed at some point after all currently pinned
102     /// threads get unpinned.
103     ///
104     /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
105     /// becomes full, some functions are moved into the global cache. At the same time, some
106     /// functions from both local and global caches may get executed in order to incrementally
107     /// clean up the caches as they fill up.
108     ///
109     /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
110     /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
111     /// never run, but the epoch-based garbage collection will make an effort to execute it
112     /// reasonably soon.
113     ///
114     /// If this method is called from an [`unprotected`] guard, the function will simply be
115     /// executed immediately.
116     ///
117     /// # Safety
118     ///
119     /// The given function must not hold reference onto the stack. It is highly recommended that
120     /// the passed function is **always** marked with `move` in order to prevent accidental
121     /// borrows.
122     ///
123     /// ```
124     /// use crossbeam_epoch as epoch;
125     ///
126     /// let guard = &epoch::pin();
127     /// let message = "Hello!";
128     /// unsafe {
129     ///     // ALWAYS use `move` when sending a closure into `defer_unchecked`.
130     ///     guard.defer_unchecked(move || {
131     ///         println!("{}", message);
132     ///     });
133     /// }
134     /// ```
135     ///
136     /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by
137     /// the closure must be `Send`.
138     ///
139     /// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove
140     /// `F: Send` for typical use cases. For example, consider the following code snippet, which
141     /// exemplifies the typical use case of deferring the deallocation of a shared reference:
142     ///
143     /// ```ignore
144     /// let shared = Owned::new(7i32).into_shared(guard);
145     /// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`!
146     /// ```
147     ///
148     /// While `Shared` is not `Send`, it's safe for another thread to call the deferred function,
149     /// because it's called only after the grace period and `shared` is no longer shared with other
150     /// threads. But we don't expect type systems to prove this.
151     ///
152     /// # Examples
153     ///
154     /// When a heap-allocated object in a data structure becomes unreachable, it has to be
155     /// deallocated. However, the current thread and other threads may be still holding references
156     /// on the stack to that same object. Therefore it cannot be deallocated before those references
157     /// get dropped. This method can defer deallocation until all those threads get unpinned and
158     /// consequently drop all their references on the stack.
159     ///
160     /// ```
161     /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
162     /// use std::sync::atomic::Ordering::SeqCst;
163     ///
164     /// let a = Atomic::new("foo");
165     ///
166     /// // Now suppose that `a` is shared among multiple threads and concurrently
167     /// // accessed and modified...
168     ///
169     /// // Pin the current thread.
170     /// let guard = &epoch::pin();
171     ///
172     /// // Steal the object currently stored in `a` and swap it with another one.
173     /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
174     ///
175     /// if !p.is_null() {
176     ///     // The object `p` is pointing to is now unreachable.
177     ///     // Defer its deallocation until all currently pinned threads get unpinned.
178     ///     unsafe {
179     ///         // ALWAYS use `move` when sending a closure into `defer_unchecked`.
180     ///         guard.defer_unchecked(move || {
181     ///             println!("{} is now being deallocated.", p.deref());
182     ///             // Now we have unique access to the object pointed to by `p` and can turn it
183     ///             // into an `Owned`. Dropping the `Owned` will deallocate the object.
184     ///             drop(p.into_owned());
185     ///         });
186     ///     }
187     /// }
188     /// # unsafe { drop(a.into_owned()); } // avoid leak
189     /// ```
defer_unchecked<F, R>(&self, f: F) where F: FnOnce() -> R,190     pub unsafe fn defer_unchecked<F, R>(&self, f: F)
191     where
192         F: FnOnce() -> R,
193     {
194         if let Some(local) = self.local.as_ref() {
195             local.defer(Deferred::new(move || drop(f())), self);
196         } else {
197             drop(f());
198         }
199     }
200 
201     /// Stores a destructor for an object so that it can be deallocated and dropped at some point
202     /// after all currently pinned threads get unpinned.
203     ///
204     /// This method first stores the destructor into the thread-local (or handle-local) cache. If
205     /// this cache becomes full, some destructors are moved into the global cache. At the same
206     /// time, some destructors from both local and global caches may get executed in order to
207     /// incrementally clean up the caches as they fill up.
208     ///
209     /// There is no guarantee when exactly the destructor will be executed. The only guarantee is
210     /// that it won't be executed until all currently pinned threads get unpinned. In theory, the
211     /// destructor might never run, but the epoch-based garbage collection will make an effort to
212     /// execute it reasonably soon.
213     ///
214     /// If this method is called from an [`unprotected`] guard, the destructor will simply be
215     /// executed immediately.
216     ///
217     /// # Safety
218     ///
219     /// The object must not be reachable by other threads anymore, otherwise it might be still in
220     /// use when the destructor runs.
221     ///
222     /// Apart from that, keep in mind that another thread may execute the destructor, so the object
223     /// must be sendable to other threads.
224     ///
225     /// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove
226     /// `T: Send` for typical use cases. For example, consider the following code snippet, which
227     /// exemplifies the typical use case of deferring the deallocation of a shared reference:
228     ///
229     /// ```ignore
230     /// let shared = Owned::new(7i32).into_shared(guard);
231     /// guard.defer_destroy(shared); // `Shared` is not `Send`!
232     /// ```
233     ///
234     /// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because
235     /// it's called only after the grace period and `shared` is no longer shared with other
236     /// threads. But we don't expect type systems to prove this.
237     ///
238     /// # Examples
239     ///
240     /// When a heap-allocated object in a data structure becomes unreachable, it has to be
241     /// deallocated. However, the current thread and other threads may be still holding references
242     /// on the stack to that same object. Therefore it cannot be deallocated before those references
243     /// get dropped. This method can defer deallocation until all those threads get unpinned and
244     /// consequently drop all their references on the stack.
245     ///
246     /// ```
247     /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
248     /// use std::sync::atomic::Ordering::SeqCst;
249     ///
250     /// let a = Atomic::new("foo");
251     ///
252     /// // Now suppose that `a` is shared among multiple threads and concurrently
253     /// // accessed and modified...
254     ///
255     /// // Pin the current thread.
256     /// let guard = &epoch::pin();
257     ///
258     /// // Steal the object currently stored in `a` and swap it with another one.
259     /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
260     ///
261     /// if !p.is_null() {
262     ///     // The object `p` is pointing to is now unreachable.
263     ///     // Defer its deallocation until all currently pinned threads get unpinned.
264     ///     unsafe {
265     ///         guard.defer_destroy(p);
266     ///     }
267     /// }
268     /// # unsafe { drop(a.into_owned()); } // avoid leak
269     /// ```
defer_destroy<T>(&self, ptr: Shared<'_, T>)270     pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) {
271         self.defer_unchecked(move || ptr.into_owned());
272     }
273 
274     /// Clears up the thread-local cache of deferred functions by executing them or moving into the
275     /// global cache.
276     ///
277     /// Call this method after deferring execution of a function if you want to get it executed as
278     /// soon as possible. Flushing will make sure it is residing in in the global cache, so that
279     /// any thread has a chance of taking the function and executing it.
280     ///
281     /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens).
282     ///
283     /// # Examples
284     ///
285     /// ```
286     /// use crossbeam_epoch as epoch;
287     ///
288     /// let guard = &epoch::pin();
289     /// guard.defer(move || {
290     ///     println!("This better be printed as soon as possible!");
291     /// });
292     /// guard.flush();
293     /// ```
flush(&self)294     pub fn flush(&self) {
295         if let Some(local) = unsafe { self.local.as_ref() } {
296             local.flush(self);
297         }
298     }
299 
300     /// Unpins and then immediately re-pins the thread.
301     ///
302     /// This method is useful when you don't want delay the advancement of the global epoch by
303     /// holding an old epoch. For safety, you should not maintain any guard-based reference across
304     /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this
305     /// is the only active guard for the current thread.
306     ///
307     /// If this method is called from an [`unprotected`] guard, then the call will be just no-op.
308     ///
309     /// # Examples
310     ///
311     /// ```
312     /// use crossbeam_epoch::{self as epoch, Atomic};
313     /// use std::sync::atomic::Ordering::SeqCst;
314     ///
315     /// let a = Atomic::new(777);
316     /// let mut guard = epoch::pin();
317     /// {
318     ///     let p = a.load(SeqCst, &guard);
319     ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
320     /// }
321     /// guard.repin();
322     /// {
323     ///     let p = a.load(SeqCst, &guard);
324     ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
325     /// }
326     /// # unsafe { drop(a.into_owned()); } // avoid leak
327     /// ```
repin(&mut self)328     pub fn repin(&mut self) {
329         if let Some(local) = unsafe { self.local.as_ref() } {
330             local.repin();
331         }
332     }
333 
334     /// Temporarily unpins the thread, executes the given function and then re-pins the thread.
335     ///
336     /// This method is useful when you need to perform a long-running operation (e.g. sleeping)
337     /// and don't need to maintain any guard-based reference across the call (the latter is enforced
338     /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the
339     /// current thread.
340     ///
341     /// If this method is called from an [`unprotected`] guard, then the passed function is called
342     /// directly without unpinning the thread.
343     ///
344     /// # Examples
345     ///
346     /// ```
347     /// use crossbeam_epoch::{self as epoch, Atomic};
348     /// use std::sync::atomic::Ordering::SeqCst;
349     /// use std::thread;
350     /// use std::time::Duration;
351     ///
352     /// let a = Atomic::new(777);
353     /// let mut guard = epoch::pin();
354     /// {
355     ///     let p = a.load(SeqCst, &guard);
356     ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
357     /// }
358     /// guard.repin_after(|| thread::sleep(Duration::from_millis(50)));
359     /// {
360     ///     let p = a.load(SeqCst, &guard);
361     ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
362     /// }
363     /// # unsafe { drop(a.into_owned()); } // avoid leak
364     /// ```
repin_after<F, R>(&mut self, f: F) -> R where F: FnOnce() -> R,365     pub fn repin_after<F, R>(&mut self, f: F) -> R
366     where
367         F: FnOnce() -> R,
368     {
369         if let Some(local) = unsafe { self.local.as_ref() } {
370             // We need to acquire a handle here to ensure the Local doesn't
371             // disappear from under us.
372             local.acquire_handle();
373             local.unpin();
374         }
375 
376         // Ensure the Guard is re-pinned even if the function panics
377         defer! {
378             if let Some(local) = unsafe { self.local.as_ref() } {
379                 mem::forget(local.pin());
380                 local.release_handle();
381             }
382         }
383 
384         f()
385     }
386 
387     /// Returns the `Collector` associated with this guard.
388     ///
389     /// This method is useful when you need to ensure that all guards used with
390     /// a data structure come from the same collector.
391     ///
392     /// If this method is called from an [`unprotected`] guard, then `None` is returned.
393     ///
394     /// # Examples
395     ///
396     /// ```
397     /// use crossbeam_epoch as epoch;
398     ///
399     /// let guard1 = epoch::pin();
400     /// let guard2 = epoch::pin();
401     /// assert!(guard1.collector() == guard2.collector());
402     /// ```
collector(&self) -> Option<&Collector>403     pub fn collector(&self) -> Option<&Collector> {
404         unsafe { self.local.as_ref().map(|local| local.collector()) }
405     }
406 }
407 
408 impl Drop for Guard {
409     #[inline]
drop(&mut self)410     fn drop(&mut self) {
411         if let Some(local) = unsafe { self.local.as_ref() } {
412             local.unpin();
413         }
414     }
415 }
416 
417 impl fmt::Debug for Guard {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result418     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
419         f.pad("Guard { .. }")
420     }
421 }
422 
423 /// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s.
424 ///
425 /// This guard should be used in special occasions only. Note that it doesn't actually keep any
426 /// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely.
427 ///
428 /// Note that calling [`defer`] with a dummy guard will not defer the function - it will just
429 /// execute the function immediately.
430 ///
431 /// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`.
432 ///
433 /// # Safety
434 ///
435 /// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the
436 /// [`Atomic`] is not being concurrently modified by other threads.
437 ///
438 /// # Examples
439 ///
440 /// ```
441 /// use crossbeam_epoch::{self as epoch, Atomic};
442 /// use std::sync::atomic::Ordering::Relaxed;
443 ///
444 /// let a = Atomic::new(7);
445 ///
446 /// unsafe {
447 ///     // Load `a` without pinning the current thread.
448 ///     a.load(Relaxed, epoch::unprotected());
449 ///
450 ///     // It's possible to create more dummy guards by calling `clone()`.
451 ///     let dummy = &epoch::unprotected().clone();
452 ///
453 ///     dummy.defer(move || {
454 ///         println!("This gets executed immediately.");
455 ///     });
456 ///
457 ///     // Dropping `dummy` doesn't affect the current thread - it's just a noop.
458 /// }
459 /// # unsafe { drop(a.into_owned()); } // avoid leak
460 /// ```
461 ///
462 /// The most common use of this function is when constructing or destructing a data structure.
463 ///
464 /// For example, we can use a dummy guard in the destructor of a Treiber stack because at that
465 /// point no other thread could concurrently modify the [`Atomic`]s we are accessing.
466 ///
467 /// If we were to actually pin the current thread during destruction, that would just unnecessarily
468 /// delay garbage collection and incur some performance cost, so in cases like these `unprotected`
469 /// is very helpful.
470 ///
471 /// ```
472 /// use crossbeam_epoch::{self as epoch, Atomic};
473 /// use std::mem::ManuallyDrop;
474 /// use std::sync::atomic::Ordering::Relaxed;
475 ///
476 /// struct Stack<T> {
477 ///     head: Atomic<Node<T>>,
478 /// }
479 ///
480 /// struct Node<T> {
481 ///     data: ManuallyDrop<T>,
482 ///     next: Atomic<Node<T>>,
483 /// }
484 ///
485 /// impl<T> Drop for Stack<T> {
486 ///     fn drop(&mut self) {
487 ///         unsafe {
488 ///             // Unprotected load.
489 ///             let mut node = self.head.load(Relaxed, epoch::unprotected());
490 ///
491 ///             while let Some(n) = node.as_ref() {
492 ///                 // Unprotected load.
493 ///                 let next = n.next.load(Relaxed, epoch::unprotected());
494 ///
495 ///                 // Take ownership of the node, then drop its data and deallocate it.
496 ///                 let mut o = node.into_owned();
497 ///                 ManuallyDrop::drop(&mut o.data);
498 ///                 drop(o);
499 ///
500 ///                 node = next;
501 ///             }
502 ///         }
503 ///     }
504 /// }
505 /// ```
506 ///
507 /// [`Atomic`]: super::Atomic
508 /// [`defer`]: Guard::defer
509 #[inline]
unprotected() -> &'static Guard510 pub unsafe fn unprotected() -> &'static Guard {
511     // An unprotected guard is just a `Guard` with its field `local` set to null.
512     // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in
513     // a `static`
514     struct GuardWrapper(Guard);
515     unsafe impl Sync for GuardWrapper {}
516     static UNPROTECTED: GuardWrapper = GuardWrapper(Guard {
517         local: core::ptr::null(),
518     });
519     &UNPROTECTED.0
520 }
521