• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: Apache-2.0 OR MIT
2 
3 // Adapted from https://github.com/crossbeam-rs/crossbeam/blob/crossbeam-utils-0.8.7/crossbeam-utils/src/atomic/seq_lock_wide.rs.
4 
5 use core::{
6     mem::ManuallyDrop,
7     sync::atomic::{self, AtomicUsize, Ordering},
8 };
9 
10 use super::utils::Backoff;
11 
12 // See mod.rs for details.
13 pub(super) type AtomicChunk = AtomicUsize;
14 pub(super) type Chunk = usize;
15 
16 /// A simple stamped lock.
17 ///
18 /// The state is represented as two `AtomicUsize`: `state_hi` for high bits and `state_lo` for low
19 /// bits.
20 pub(super) struct SeqLock {
21     /// The high bits of the current state of the lock.
22     state_hi: AtomicUsize,
23 
24     /// The low bits of the current state of the lock.
25     ///
26     /// All bits except the least significant one hold the current stamp. When locked, the state_lo
27     /// equals 1 and doesn't contain a valid stamp.
28     state_lo: AtomicUsize,
29 }
30 
31 impl SeqLock {
32     #[inline]
new() -> Self33     pub(super) const fn new() -> Self {
34         Self { state_hi: AtomicUsize::new(0), state_lo: AtomicUsize::new(0) }
35     }
36 
37     /// If not locked, returns the current stamp.
38     ///
39     /// This method should be called before optimistic reads.
40     #[inline]
optimistic_read(&self) -> Option<(usize, usize)>41     pub(super) fn optimistic_read(&self) -> Option<(usize, usize)> {
42         // The acquire loads from `state_hi` and `state_lo` synchronize with the release stores in
43         // `SeqLockWriteGuard::drop` and `SeqLockWriteGuard::abort`.
44         //
45         // As a consequence, we can make sure that (1) all writes within the era of `state_hi - 1`
46         // happens before now; and therefore, (2) if `state_lo` is even, all writes within the
47         // critical section of (`state_hi`, `state_lo`) happens before now.
48         let state_hi = self.state_hi.load(Ordering::Acquire);
49         let state_lo = self.state_lo.load(Ordering::Acquire);
50         if state_lo == 1 {
51             None
52         } else {
53             Some((state_hi, state_lo))
54         }
55     }
56 
57     /// Returns `true` if the current stamp is equal to `stamp`.
58     ///
59     /// This method should be called after optimistic reads to check whether they are valid. The
60     /// argument `stamp` should correspond to the one returned by method `optimistic_read`.
61     #[inline]
validate_read(&self, stamp: (usize, usize)) -> bool62     pub(super) fn validate_read(&self, stamp: (usize, usize)) -> bool {
63         // Thanks to the fence, if we're noticing any modification to the data at the critical
64         // section of `(stamp.0, stamp.1)`, then the critical section's write of 1 to state_lo should be
65         // visible.
66         atomic::fence(Ordering::Acquire);
67 
68         // So if `state_lo` coincides with `stamp.1`, then either (1) we're noticing no modification
69         // to the data after the critical section of `(stamp.0, stamp.1)`, or (2) `state_lo` wrapped
70         // around.
71         //
72         // If (2) is the case, the acquire ordering ensures we see the new value of `state_hi`.
73         let state_lo = self.state_lo.load(Ordering::Acquire);
74 
75         // If (2) is the case and `state_hi` coincides with `stamp.0`, then `state_hi` also wrapped
76         // around, which we give up to correctly validate the read.
77         let state_hi = self.state_hi.load(Ordering::Relaxed);
78 
79         // Except for the case that both `state_hi` and `state_lo` wrapped around, the following
80         // condition implies that we're noticing no modification to the data after the critical
81         // section of `(stamp.0, stamp.1)`.
82         (state_hi, state_lo) == stamp
83     }
84 
85     /// Grabs the lock for writing.
86     #[inline]
write(&self) -> SeqLockWriteGuard<'_>87     pub(super) fn write(&self) -> SeqLockWriteGuard<'_> {
88         let mut backoff = Backoff::new();
89         loop {
90             let previous = self.state_lo.swap(1, Ordering::Acquire);
91 
92             if previous != 1 {
93                 // To synchronize with the acquire fence in `validate_read` via any modification to
94                 // the data at the critical section of `(state_hi, previous)`.
95                 atomic::fence(Ordering::Release);
96 
97                 return SeqLockWriteGuard { lock: self, state_lo: previous };
98             }
99 
100             while self.state_lo.load(Ordering::Relaxed) == 1 {
101                 backoff.snooze();
102             }
103         }
104     }
105 }
106 
107 /// An RAII guard that releases the lock and increments the stamp when dropped.
108 #[must_use]
109 pub(super) struct SeqLockWriteGuard<'a> {
110     /// The parent lock.
111     lock: &'a SeqLock,
112 
113     /// The stamp before locking.
114     state_lo: usize,
115 }
116 
117 impl SeqLockWriteGuard<'_> {
118     /// Releases the lock without incrementing the stamp.
119     #[inline]
abort(self)120     pub(super) fn abort(self) {
121         // We specifically don't want to call drop(), since that's
122         // what increments the stamp.
123         let this = ManuallyDrop::new(self);
124 
125         // Restore the stamp.
126         //
127         // Release ordering for synchronizing with `optimistic_read`.
128         this.lock.state_lo.store(this.state_lo, Ordering::Release);
129     }
130 }
131 
132 impl Drop for SeqLockWriteGuard<'_> {
133     #[inline]
drop(&mut self)134     fn drop(&mut self) {
135         let state_lo = self.state_lo.wrapping_add(2);
136 
137         // Increase the high bits if the low bits wrap around.
138         //
139         // Release ordering for synchronizing with `optimistic_read`.
140         if state_lo == 0 {
141             let state_hi = self.lock.state_hi.load(Ordering::Relaxed);
142             self.lock.state_hi.store(state_hi.wrapping_add(1), Ordering::Release);
143         }
144 
145         // Release the lock and increment the stamp.
146         //
147         // Release ordering for synchronizing with `optimistic_read`.
148         self.lock.state_lo.store(state_lo, Ordering::Release);
149     }
150 }
151 
152 #[cfg(test)]
153 mod tests {
154     use super::SeqLock;
155 
156     #[test]
smoke()157     fn smoke() {
158         let lock = SeqLock::new();
159         let before = lock.optimistic_read().unwrap();
160         assert!(lock.validate_read(before));
161         {
162             let _guard = lock.write();
163         }
164         assert!(!lock.validate_read(before));
165         let after = lock.optimistic_read().unwrap();
166         assert_ne!(before, after);
167     }
168 
169     #[test]
test_abort()170     fn test_abort() {
171         let lock = SeqLock::new();
172         let before = lock.optimistic_read().unwrap();
173         {
174             let guard = lock.write();
175             guard.abort();
176         }
177         let after = lock.optimistic_read().unwrap();
178         assert_eq!(before, after, "aborted write does not update the stamp");
179     }
180 }
181