• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use core::fmt;
2 use core::mem;
3 
4 use scopeguard::defer;
5 
6 use crate::atomic::Shared;
7 use crate::collector::Collector;
8 use crate::deferred::Deferred;
9 use crate::internal::Local;
10 
11 /// A guard that keeps the current thread pinned.
12 ///
13 /// # Pinning
14 ///
15 /// The current thread is pinned by calling [`pin`], which returns a new guard:
16 ///
17 /// ```
18 /// use crossbeam_epoch as epoch;
19 ///
20 /// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference.
21 /// // This is not really necessary, but makes passing references to the guard a bit easier.
22 /// let guard = &epoch::pin();
23 /// ```
24 ///
25 /// When a guard gets dropped, the current thread is automatically unpinned.
26 ///
27 /// # Pointers on the stack
28 ///
29 /// Having a guard allows us to create pointers on the stack to heap-allocated objects.
30 /// For example:
31 ///
32 /// ```
33 /// use crossbeam_epoch::{self as epoch, Atomic};
34 /// use std::sync::atomic::Ordering::SeqCst;
35 ///
36 /// // Create a heap-allocated number.
37 /// let a = Atomic::new(777);
38 ///
39 /// // Pin the current thread.
40 /// let guard = &epoch::pin();
41 ///
42 /// // Load the heap-allocated object and create pointer `p` on the stack.
43 /// let p = a.load(SeqCst, guard);
44 ///
45 /// // Dereference the pointer and print the value:
46 /// if let Some(num) = unsafe { p.as_ref() } {
47 ///     println!("The number is {}.", num);
48 /// }
49 /// ```
50 ///
51 /// # Multiple guards
52 ///
53 /// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the
54 /// thread will actually be pinned only when the first guard is created and unpinned when the last
55 /// one is dropped:
56 ///
57 /// ```
58 /// use crossbeam_epoch as epoch;
59 ///
60 /// let guard1 = epoch::pin();
61 /// let guard2 = epoch::pin();
62 /// assert!(epoch::is_pinned());
63 /// drop(guard1);
64 /// assert!(epoch::is_pinned());
65 /// drop(guard2);
66 /// assert!(!epoch::is_pinned());
67 /// ```
68 ///
69 /// [`pin`]: super::pin
70 pub struct Guard {
71     pub(crate) local: *const Local,
72 }
73 
74 impl Guard {
75     /// Stores a function so that it can be executed at some point after all currently pinned
76     /// threads get unpinned.
77     ///
78     /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
79     /// becomes full, some functions are moved into the global cache. At the same time, some
80     /// functions from both local and global caches may get executed in order to incrementally
81     /// clean up the caches as they fill up.
82     ///
83     /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
84     /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
85     /// never run, but the epoch-based garbage collection will make an effort to execute it
86     /// reasonably soon.
87     ///
88     /// If this method is called from an [`unprotected`] guard, the function will simply be
89     /// executed immediately.
defer<F, R>(&self, f: F) where F: FnOnce() -> R, F: Send + 'static,90     pub fn defer<F, R>(&self, f: F)
91     where
92         F: FnOnce() -> R,
93         F: Send + 'static,
94     {
95         unsafe {
96             self.defer_unchecked(f);
97         }
98     }
99 
100     /// Stores a function so that it can be executed at some point after all currently pinned
101     /// threads get unpinned.
102     ///
103     /// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
104     /// becomes full, some functions are moved into the global cache. At the same time, some
105     /// functions from both local and global caches may get executed in order to incrementally
106     /// clean up the caches as they fill up.
107     ///
108     /// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
109     /// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
110     /// never run, but the epoch-based garbage collection will make an effort to execute it
111     /// reasonably soon.
112     ///
113     /// If this method is called from an [`unprotected`] guard, the function will simply be
114     /// executed immediately.
115     ///
116     /// # Safety
117     ///
118     /// The given function must not hold reference onto the stack. It is highly recommended that
119     /// the passed function is **always** marked with `move` in order to prevent accidental
120     /// borrows.
121     ///
122     /// ```
123     /// use crossbeam_epoch as epoch;
124     ///
125     /// let guard = &epoch::pin();
126     /// let message = "Hello!";
127     /// unsafe {
128     ///     // ALWAYS use `move` when sending a closure into `defer_unchecked`.
129     ///     guard.defer_unchecked(move || {
130     ///         println!("{}", message);
131     ///     });
132     /// }
133     /// ```
134     ///
135     /// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by
136     /// the closure must be `Send`.
137     ///
138     /// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove
139     /// `F: Send` for typical use cases. For example, consider the following code snippet, which
140     /// exemplifies the typical use case of deferring the deallocation of a shared reference:
141     ///
142     /// ```ignore
143     /// let shared = Owned::new(7i32).into_shared(guard);
144     /// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`!
145     /// ```
146     ///
147     /// While `Shared` is not `Send`, it's safe for another thread to call the deferred function,
148     /// because it's called only after the grace period and `shared` is no longer shared with other
149     /// threads. But we don't expect type systems to prove this.
150     ///
151     /// # Examples
152     ///
153     /// When a heap-allocated object in a data structure becomes unreachable, it has to be
154     /// deallocated. However, the current thread and other threads may be still holding references
155     /// on the stack to that same object. Therefore it cannot be deallocated before those references
156     /// get dropped. This method can defer deallocation until all those threads get unpinned and
157     /// consequently drop all their references on the stack.
158     ///
159     /// ```
160     /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
161     /// use std::sync::atomic::Ordering::SeqCst;
162     ///
163     /// let a = Atomic::new("foo");
164     ///
165     /// // Now suppose that `a` is shared among multiple threads and concurrently
166     /// // accessed and modified...
167     ///
168     /// // Pin the current thread.
169     /// let guard = &epoch::pin();
170     ///
171     /// // Steal the object currently stored in `a` and swap it with another one.
172     /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
173     ///
174     /// if !p.is_null() {
175     ///     // The object `p` is pointing to is now unreachable.
176     ///     // Defer its deallocation until all currently pinned threads get unpinned.
177     ///     unsafe {
178     ///         // ALWAYS use `move` when sending a closure into `defer_unchecked`.
179     ///         guard.defer_unchecked(move || {
180     ///             println!("{} is now being deallocated.", p.deref());
181     ///             // Now we have unique access to the object pointed to by `p` and can turn it
182     ///             // into an `Owned`. Dropping the `Owned` will deallocate the object.
183     ///             drop(p.into_owned());
184     ///         });
185     ///     }
186     /// }
187     /// ```
defer_unchecked<F, R>(&self, f: F) where F: FnOnce() -> R,188     pub unsafe fn defer_unchecked<F, R>(&self, f: F)
189     where
190         F: FnOnce() -> R,
191     {
192         if let Some(local) = self.local.as_ref() {
193             local.defer(Deferred::new(move || drop(f())), self);
194         } else {
195             drop(f());
196         }
197     }
198 
199     /// Stores a destructor for an object so that it can be deallocated and dropped at some point
200     /// after all currently pinned threads get unpinned.
201     ///
202     /// This method first stores the destructor into the thread-local (or handle-local) cache. If
203     /// this cache becomes full, some destructors are moved into the global cache. At the same
204     /// time, some destructors from both local and global caches may get executed in order to
205     /// incrementally clean up the caches as they fill up.
206     ///
207     /// There is no guarantee when exactly the destructor will be executed. The only guarantee is
208     /// that it won't be executed until all currently pinned threads get unpinned. In theory, the
209     /// destructor might never run, but the epoch-based garbage collection will make an effort to
210     /// execute it reasonably soon.
211     ///
212     /// If this method is called from an [`unprotected`] guard, the destructor will simply be
213     /// executed immediately.
214     ///
215     /// # Safety
216     ///
217     /// The object must not be reachable by other threads anymore, otherwise it might be still in
218     /// use when the destructor runs.
219     ///
220     /// Apart from that, keep in mind that another thread may execute the destructor, so the object
221     /// must be sendable to other threads.
222     ///
223     /// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove
224     /// `T: Send` for typical use cases. For example, consider the following code snippet, which
225     /// exemplifies the typical use case of deferring the deallocation of a shared reference:
226     ///
227     /// ```ignore
228     /// let shared = Owned::new(7i32).into_shared(guard);
229     /// guard.defer_destroy(shared); // `Shared` is not `Send`!
230     /// ```
231     ///
232     /// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because
233     /// it's called only after the grace period and `shared` is no longer shared with other
234     /// threads. But we don't expect type systems to prove this.
235     ///
236     /// # Examples
237     ///
238     /// When a heap-allocated object in a data structure becomes unreachable, it has to be
239     /// deallocated. However, the current thread and other threads may be still holding references
240     /// on the stack to that same object. Therefore it cannot be deallocated before those references
241     /// get dropped. This method can defer deallocation until all those threads get unpinned and
242     /// consequently drop all their references on the stack.
243     ///
244     /// ```
245     /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
246     /// use std::sync::atomic::Ordering::SeqCst;
247     ///
248     /// let a = Atomic::new("foo");
249     ///
250     /// // Now suppose that `a` is shared among multiple threads and concurrently
251     /// // accessed and modified...
252     ///
253     /// // Pin the current thread.
254     /// let guard = &epoch::pin();
255     ///
256     /// // Steal the object currently stored in `a` and swap it with another one.
257     /// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
258     ///
259     /// if !p.is_null() {
260     ///     // The object `p` is pointing to is now unreachable.
261     ///     // Defer its deallocation until all currently pinned threads get unpinned.
262     ///     unsafe {
263     ///         guard.defer_destroy(p);
264     ///     }
265     /// }
266     /// ```
defer_destroy<T>(&self, ptr: Shared<'_, T>)267     pub unsafe fn defer_destroy<T>(&self, ptr: Shared<'_, T>) {
268         self.defer_unchecked(move || ptr.into_owned());
269     }
270 
271     /// Clears up the thread-local cache of deferred functions by executing them or moving into the
272     /// global cache.
273     ///
274     /// Call this method after deferring execution of a function if you want to get it executed as
275     /// soon as possible. Flushing will make sure it is residing in in the global cache, so that
276     /// any thread has a chance of taking the function and executing it.
277     ///
278     /// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens).
279     ///
280     /// # Examples
281     ///
282     /// ```
283     /// use crossbeam_epoch as epoch;
284     ///
285     /// let guard = &epoch::pin();
286     /// guard.defer(move || {
287     ///     println!("This better be printed as soon as possible!");
288     /// });
289     /// guard.flush();
290     /// ```
flush(&self)291     pub fn flush(&self) {
292         if let Some(local) = unsafe { self.local.as_ref() } {
293             local.flush(self);
294         }
295     }
296 
297     /// Unpins and then immediately re-pins the thread.
298     ///
299     /// This method is useful when you don't want delay the advancement of the global epoch by
300     /// holding an old epoch. For safety, you should not maintain any guard-based reference across
301     /// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this
302     /// is the only active guard for the current thread.
303     ///
304     /// If this method is called from an [`unprotected`] guard, then the call will be just no-op.
305     ///
306     /// # Examples
307     ///
308     /// ```
309     /// use crossbeam_epoch::{self as epoch, Atomic};
310     /// use std::sync::atomic::Ordering::SeqCst;
311     ///
312     /// let a = Atomic::new(777);
313     /// let mut guard = epoch::pin();
314     /// {
315     ///     let p = a.load(SeqCst, &guard);
316     ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
317     /// }
318     /// guard.repin();
319     /// {
320     ///     let p = a.load(SeqCst, &guard);
321     ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
322     /// }
323     /// ```
repin(&mut self)324     pub fn repin(&mut self) {
325         if let Some(local) = unsafe { self.local.as_ref() } {
326             local.repin();
327         }
328     }
329 
330     /// Temporarily unpins the thread, executes the given function and then re-pins the thread.
331     ///
332     /// This method is useful when you need to perform a long-running operation (e.g. sleeping)
333     /// and don't need to maintain any guard-based reference across the call (the latter is enforced
334     /// by `&mut self`). The thread will only be unpinned if this is the only active guard for the
335     /// current thread.
336     ///
337     /// If this method is called from an [`unprotected`] guard, then the passed function is called
338     /// directly without unpinning the thread.
339     ///
340     /// # Examples
341     ///
342     /// ```
343     /// use crossbeam_epoch::{self as epoch, Atomic};
344     /// use std::sync::atomic::Ordering::SeqCst;
345     /// use std::thread;
346     /// use std::time::Duration;
347     ///
348     /// let a = Atomic::new(777);
349     /// let mut guard = epoch::pin();
350     /// {
351     ///     let p = a.load(SeqCst, &guard);
352     ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
353     /// }
354     /// guard.repin_after(|| thread::sleep(Duration::from_millis(50)));
355     /// {
356     ///     let p = a.load(SeqCst, &guard);
357     ///     assert_eq!(unsafe { p.as_ref() }, Some(&777));
358     /// }
359     /// ```
repin_after<F, R>(&mut self, f: F) -> R where F: FnOnce() -> R,360     pub fn repin_after<F, R>(&mut self, f: F) -> R
361     where
362         F: FnOnce() -> R,
363     {
364         if let Some(local) = unsafe { self.local.as_ref() } {
365             // We need to acquire a handle here to ensure the Local doesn't
366             // disappear from under us.
367             local.acquire_handle();
368             local.unpin();
369         }
370 
371         // Ensure the Guard is re-pinned even if the function panics
372         defer! {
373             if let Some(local) = unsafe { self.local.as_ref() } {
374                 mem::forget(local.pin());
375                 local.release_handle();
376             }
377         }
378 
379         f()
380     }
381 
382     /// Returns the `Collector` associated with this guard.
383     ///
384     /// This method is useful when you need to ensure that all guards used with
385     /// a data structure come from the same collector.
386     ///
387     /// If this method is called from an [`unprotected`] guard, then `None` is returned.
388     ///
389     /// # Examples
390     ///
391     /// ```
392     /// use crossbeam_epoch as epoch;
393     ///
394     /// let guard1 = epoch::pin();
395     /// let guard2 = epoch::pin();
396     /// assert!(guard1.collector() == guard2.collector());
397     /// ```
collector(&self) -> Option<&Collector>398     pub fn collector(&self) -> Option<&Collector> {
399         unsafe { self.local.as_ref().map(|local| local.collector()) }
400     }
401 }
402 
403 impl Drop for Guard {
404     #[inline]
drop(&mut self)405     fn drop(&mut self) {
406         if let Some(local) = unsafe { self.local.as_ref() } {
407             local.unpin();
408         }
409     }
410 }
411 
412 impl fmt::Debug for Guard {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result413     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
414         f.pad("Guard { .. }")
415     }
416 }
417 
418 /// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s.
419 ///
420 /// This guard should be used in special occasions only. Note that it doesn't actually keep any
421 /// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely.
422 ///
423 /// Note that calling [`defer`] with a dummy guard will not defer the function - it will just
424 /// execute the function immediately.
425 ///
426 /// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`.
427 ///
428 /// # Safety
429 ///
430 /// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the
431 /// [`Atomic`] is not being concurrently modified by other threads.
432 ///
433 /// # Examples
434 ///
435 /// ```
436 /// use crossbeam_epoch::{self as epoch, Atomic};
437 /// use std::sync::atomic::Ordering::Relaxed;
438 ///
439 /// let a = Atomic::new(7);
440 ///
441 /// unsafe {
442 ///     // Load `a` without pinning the current thread.
443 ///     a.load(Relaxed, epoch::unprotected());
444 ///
445 ///     // It's possible to create more dummy guards by calling `clone()`.
446 ///     let dummy = &epoch::unprotected().clone();
447 ///
448 ///     dummy.defer(move || {
449 ///         println!("This gets executed immediately.");
450 ///     });
451 ///
452 ///     // Dropping `dummy` doesn't affect the current thread - it's just a noop.
453 /// }
454 /// ```
455 ///
456 /// The most common use of this function is when constructing or destructing a data structure.
457 ///
458 /// For example, we can use a dummy guard in the destructor of a Treiber stack because at that
459 /// point no other thread could concurrently modify the [`Atomic`]s we are accessing.
460 ///
461 /// If we were to actually pin the current thread during destruction, that would just unnecessarily
462 /// delay garbage collection and incur some performance cost, so in cases like these `unprotected`
463 /// is very helpful.
464 ///
465 /// ```
466 /// use crossbeam_epoch::{self as epoch, Atomic};
467 /// use std::mem::ManuallyDrop;
468 /// use std::sync::atomic::Ordering::Relaxed;
469 ///
470 /// struct Stack<T> {
471 ///     head: Atomic<Node<T>>,
472 /// }
473 ///
474 /// struct Node<T> {
475 ///     data: ManuallyDrop<T>,
476 ///     next: Atomic<Node<T>>,
477 /// }
478 ///
479 /// impl<T> Drop for Stack<T> {
480 ///     fn drop(&mut self) {
481 ///         unsafe {
482 ///             // Unprotected load.
483 ///             let mut node = self.head.load(Relaxed, epoch::unprotected());
484 ///
485 ///             while let Some(n) = node.as_ref() {
486 ///                 // Unprotected load.
487 ///                 let next = n.next.load(Relaxed, epoch::unprotected());
488 ///
489 ///                 // Take ownership of the node, then drop its data and deallocate it.
490 ///                 let mut o = node.into_owned();
491 ///                 ManuallyDrop::drop(&mut o.data);
492 ///                 drop(o);
493 ///
494 ///                 node = next;
495 ///             }
496 ///         }
497 ///     }
498 /// }
499 /// ```
500 ///
501 /// [`Atomic`]: super::Atomic
502 /// [`defer`]: Guard::defer
503 #[inline]
unprotected() -> &'static Guard504 pub unsafe fn unprotected() -> &'static Guard {
505     // An unprotected guard is just a `Guard` with its field `local` set to null.
506     // We make a newtype over `Guard` because `Guard` isn't `Sync`, so can't be directly stored in
507     // a `static`
508     struct GuardWrapper(Guard);
509     unsafe impl Sync for GuardWrapper {}
510     static UNPROTECTED: GuardWrapper = GuardWrapper(Guard {
511         local: core::ptr::null(),
512     });
513     &UNPROTECTED.0
514 }
515