• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! Implementation of a data-race detector using Lamport Timestamps / Vector-clocks
2 //! based on the Dynamic Race Detection for C++:
3 //! <https://www.doc.ic.ac.uk/~afd/homepages/papers/pdfs/2017/POPL.pdf>
4 //! which does not report false-positives when fences are used, and gives better
5 //! accuracy in presence of read-modify-write operations.
6 //!
7 //! The implementation contains modifications to correctly model the changes to the memory model in C++20
8 //! regarding the weakening of release sequences: <http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0982r1.html>.
9 //! Relaxed stores now unconditionally block all currently active release sequences and so per-thread tracking of release
10 //! sequences is not needed.
11 //!
12 //! The implementation also models races with memory allocation and deallocation via treating allocation and
13 //! deallocation as a type of write internally for detecting data-races.
14 //!
15 //! Weak memory orders are explored but not all weak behaviours are exhibited, so it can still miss data-races
16 //! but should not report false-positives
17 //!
18 //! Data-race definition from(<https://en.cppreference.com/w/cpp/language/memory_model#Threads_and_data_races>):
19 //! a data race occurs between two memory accesses if they are on different threads, at least one operation
20 //! is non-atomic, at least one operation is a write and neither access happens-before the other. Read the link
21 //! for full definition.
22 //!
23 //! This re-uses vector indexes for threads that are known to be unable to report data-races, this is valid
24 //! because it only re-uses vector indexes once all currently-active (not-terminated) threads have an internal
25 //! vector clock that happens-after the join operation of the candidate thread. Threads that have not been joined
26 //! on are not considered. Since the thread's vector clock will only increase and a data-race implies that
27 //! there is some index x where `clock[x] > thread_clock`, when this is true `clock[candidate-idx] > thread_clock`
28 //! can never hold and hence a data-race can never be reported in that vector index again.
29 //! This means that the thread-index can be safely re-used, starting on the next timestamp for the newly created
30 //! thread.
31 //!
32 //! The timestamps used in the data-race detector assign each sequence of non-atomic operations
33 //! followed by a single atomic or concurrent operation a single timestamp.
34 //! Write, Read, Write, ThreadJoin will be represented by a single timestamp value on a thread.
35 //! This is because extra increment operations between the operations in the sequence are not
36 //! required for accurate reporting of data-race values.
37 //!
38 //! As per the paper a threads timestamp is only incremented after a release operation is performed
39 //! so some atomic operations that only perform acquires do not increment the timestamp. Due to shared
40 //! code some atomic operations may increment the timestamp when not necessary but this has no effect
41 //! on the data-race detection code.
42 
43 use std::{
44     cell::{Cell, Ref, RefCell, RefMut},
45     fmt::Debug,
46     mem,
47 };
48 
49 use rustc_ast::Mutability;
50 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
51 use rustc_index::{Idx, IndexVec};
52 use rustc_middle::mir;
53 use rustc_span::Span;
54 use rustc_target::abi::{Align, Size};
55 
56 use crate::diagnostics::RacingOp;
57 use crate::*;
58 
59 use super::{
60     vector_clock::{VClock, VTimestamp, VectorIdx},
61     weak_memory::EvalContextExt as _,
62 };
63 
64 pub type AllocState = VClockAlloc;
65 
66 /// Valid atomic read-write orderings, alias of atomic::Ordering (not non-exhaustive).
67 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
68 pub enum AtomicRwOrd {
69     Relaxed,
70     Acquire,
71     Release,
72     AcqRel,
73     SeqCst,
74 }
75 
76 /// Valid atomic read orderings, subset of atomic::Ordering.
77 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
78 pub enum AtomicReadOrd {
79     Relaxed,
80     Acquire,
81     SeqCst,
82 }
83 
84 /// Valid atomic write orderings, subset of atomic::Ordering.
85 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
86 pub enum AtomicWriteOrd {
87     Relaxed,
88     Release,
89     SeqCst,
90 }
91 
92 /// Valid atomic fence orderings, subset of atomic::Ordering.
93 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
94 pub enum AtomicFenceOrd {
95     Acquire,
96     Release,
97     AcqRel,
98     SeqCst,
99 }
100 
101 /// The current set of vector clocks describing the state
102 /// of a thread, contains the happens-before clock and
103 /// additional metadata to model atomic fence operations.
104 #[derive(Clone, Default, Debug)]
105 pub(super) struct ThreadClockSet {
106     /// The increasing clock representing timestamps
107     /// that happen-before this thread.
108     pub(super) clock: VClock,
109 
110     /// The set of timestamps that will happen-before this
111     /// thread once it performs an acquire fence.
112     fence_acquire: VClock,
113 
114     /// The last timestamp of happens-before relations that
115     /// have been released by this thread by a fence.
116     fence_release: VClock,
117 
118     /// Timestamps of the last SC fence performed by each
119     /// thread, updated when this thread performs an SC fence
120     pub(super) fence_seqcst: VClock,
121 
122     /// Timestamps of the last SC write performed by each
123     /// thread, updated when this thread performs an SC fence
124     pub(super) write_seqcst: VClock,
125 
126     /// Timestamps of the last SC fence performed by each
127     /// thread, updated when this thread performs an SC read
128     pub(super) read_seqcst: VClock,
129 }
130 
131 impl ThreadClockSet {
132     /// Apply the effects of a release fence to this
133     /// set of thread vector clocks.
134     #[inline]
apply_release_fence(&mut self)135     fn apply_release_fence(&mut self) {
136         self.fence_release.clone_from(&self.clock);
137     }
138 
139     /// Apply the effects of an acquire fence to this
140     /// set of thread vector clocks.
141     #[inline]
apply_acquire_fence(&mut self)142     fn apply_acquire_fence(&mut self) {
143         self.clock.join(&self.fence_acquire);
144     }
145 
146     /// Increment the happens-before clock at a
147     /// known index.
148     #[inline]
increment_clock(&mut self, index: VectorIdx, current_span: Span)149     fn increment_clock(&mut self, index: VectorIdx, current_span: Span) {
150         self.clock.increment_index(index, current_span);
151     }
152 
153     /// Join the happens-before clock with that of
154     /// another thread, used to model thread join
155     /// operations.
join_with(&mut self, other: &ThreadClockSet)156     fn join_with(&mut self, other: &ThreadClockSet) {
157         self.clock.join(&other.clock);
158     }
159 }
160 
161 /// Error returned by finding a data race
162 /// should be elaborated upon.
163 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
164 pub struct DataRace;
165 
166 /// Externally stored memory cell clocks
167 /// explicitly to reduce memory usage for the
168 /// common case where no atomic operations
169 /// exists on the memory cell.
170 #[derive(Clone, PartialEq, Eq, Default, Debug)]
171 struct AtomicMemoryCellClocks {
172     /// The clock-vector of the timestamp of the last atomic
173     /// read operation performed by each thread.
174     /// This detects potential data-races between atomic read
175     /// and non-atomic write operations.
176     read_vector: VClock,
177 
178     /// The clock-vector of the timestamp of the last atomic
179     /// write operation performed by each thread.
180     /// This detects potential data-races between atomic write
181     /// and non-atomic read or write operations.
182     write_vector: VClock,
183 
184     /// Synchronization vector for acquire-release semantics
185     /// contains the vector of timestamps that will
186     /// happen-before a thread if an acquire-load is
187     /// performed on the data.
188     sync_vector: VClock,
189 }
190 
191 /// Type of write operation: allocating memory
192 /// non-atomic writes and deallocating memory
193 /// are all treated as writes for the purpose
194 /// of the data-race detector.
195 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
196 enum WriteType {
197     /// Allocate memory.
198     Allocate,
199 
200     /// Standard unsynchronized write.
201     Write,
202 
203     /// Deallocate memory.
204     /// Note that when memory is deallocated first, later non-atomic accesses
205     /// will be reported as use-after-free, not as data races.
206     /// (Same for `Allocate` above.)
207     Deallocate,
208 }
209 impl WriteType {
get_descriptor(self) -> &'static str210     fn get_descriptor(self) -> &'static str {
211         match self {
212             WriteType::Allocate => "Allocate",
213             WriteType::Write => "Write",
214             WriteType::Deallocate => "Deallocate",
215         }
216     }
217 }
218 
219 /// Memory Cell vector clock metadata
220 /// for data-race detection.
221 #[derive(Clone, PartialEq, Eq, Debug)]
222 struct MemoryCellClocks {
223     /// The vector-clock timestamp of the last write
224     /// corresponding to the writing threads timestamp.
225     write: VTimestamp,
226 
227     /// The identifier of the vector index, corresponding to a thread
228     /// that performed the last write operation.
229     write_index: VectorIdx,
230 
231     /// The type of operation that the write index represents,
232     /// either newly allocated memory, a non-atomic write or
233     /// a deallocation of memory.
234     write_type: WriteType,
235 
236     /// The vector-clock of the timestamp of the last read operation
237     /// performed by a thread since the last write operation occurred.
238     /// It is reset to zero on each write operation.
239     read: VClock,
240 
241     /// Atomic acquire & release sequence tracking clocks.
242     /// For non-atomic memory in the common case this
243     /// value is set to None.
244     atomic_ops: Option<Box<AtomicMemoryCellClocks>>,
245 }
246 
247 impl MemoryCellClocks {
248     /// Create a new set of clocks representing memory allocated
249     ///  at a given vector timestamp and index.
new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self250     fn new(alloc: VTimestamp, alloc_index: VectorIdx) -> Self {
251         MemoryCellClocks {
252             read: VClock::default(),
253             write: alloc,
254             write_index: alloc_index,
255             write_type: WriteType::Allocate,
256             atomic_ops: None,
257         }
258     }
259 
260     /// Load the internal atomic memory cells if they exist.
261     #[inline]
atomic(&self) -> Option<&AtomicMemoryCellClocks>262     fn atomic(&self) -> Option<&AtomicMemoryCellClocks> {
263         self.atomic_ops.as_deref()
264     }
265 
266     /// Load or create the internal atomic memory metadata
267     /// if it does not exist.
268     #[inline]
atomic_mut(&mut self) -> &mut AtomicMemoryCellClocks269     fn atomic_mut(&mut self) -> &mut AtomicMemoryCellClocks {
270         self.atomic_ops.get_or_insert_with(Default::default)
271     }
272 
273     /// Update memory cell data-race tracking for atomic
274     /// load acquire semantics, is a no-op if this memory was
275     /// not used previously as atomic memory.
load_acquire( &mut self, thread_clocks: &mut ThreadClockSet, index: VectorIdx, ) -> Result<(), DataRace>276     fn load_acquire(
277         &mut self,
278         thread_clocks: &mut ThreadClockSet,
279         index: VectorIdx,
280     ) -> Result<(), DataRace> {
281         self.atomic_read_detect(thread_clocks, index)?;
282         if let Some(atomic) = self.atomic() {
283             thread_clocks.clock.join(&atomic.sync_vector);
284         }
285         Ok(())
286     }
287 
288     /// Checks if the memory cell access is ordered with all prior atomic reads and writes
race_free_with_atomic(&self, thread_clocks: &ThreadClockSet) -> bool289     fn race_free_with_atomic(&self, thread_clocks: &ThreadClockSet) -> bool {
290         if let Some(atomic) = self.atomic() {
291             atomic.read_vector <= thread_clocks.clock && atomic.write_vector <= thread_clocks.clock
292         } else {
293             true
294         }
295     }
296 
297     /// Update memory cell data-race tracking for atomic
298     /// load relaxed semantics, is a no-op if this memory was
299     /// not used previously as atomic memory.
load_relaxed( &mut self, thread_clocks: &mut ThreadClockSet, index: VectorIdx, ) -> Result<(), DataRace>300     fn load_relaxed(
301         &mut self,
302         thread_clocks: &mut ThreadClockSet,
303         index: VectorIdx,
304     ) -> Result<(), DataRace> {
305         self.atomic_read_detect(thread_clocks, index)?;
306         if let Some(atomic) = self.atomic() {
307             thread_clocks.fence_acquire.join(&atomic.sync_vector);
308         }
309         Ok(())
310     }
311 
312     /// Update the memory cell data-race tracking for atomic
313     /// store release semantics.
store_release( &mut self, thread_clocks: &ThreadClockSet, index: VectorIdx, ) -> Result<(), DataRace>314     fn store_release(
315         &mut self,
316         thread_clocks: &ThreadClockSet,
317         index: VectorIdx,
318     ) -> Result<(), DataRace> {
319         self.atomic_write_detect(thread_clocks, index)?;
320         let atomic = self.atomic_mut();
321         atomic.sync_vector.clone_from(&thread_clocks.clock);
322         Ok(())
323     }
324 
325     /// Update the memory cell data-race tracking for atomic
326     /// store relaxed semantics.
store_relaxed( &mut self, thread_clocks: &ThreadClockSet, index: VectorIdx, ) -> Result<(), DataRace>327     fn store_relaxed(
328         &mut self,
329         thread_clocks: &ThreadClockSet,
330         index: VectorIdx,
331     ) -> Result<(), DataRace> {
332         self.atomic_write_detect(thread_clocks, index)?;
333 
334         // The handling of release sequences was changed in C++20 and so
335         // the code here is different to the paper since now all relaxed
336         // stores block release sequences. The exception for same-thread
337         // relaxed stores has been removed.
338         let atomic = self.atomic_mut();
339         atomic.sync_vector.clone_from(&thread_clocks.fence_release);
340         Ok(())
341     }
342 
343     /// Update the memory cell data-race tracking for atomic
344     /// store release semantics for RMW operations.
rmw_release( &mut self, thread_clocks: &ThreadClockSet, index: VectorIdx, ) -> Result<(), DataRace>345     fn rmw_release(
346         &mut self,
347         thread_clocks: &ThreadClockSet,
348         index: VectorIdx,
349     ) -> Result<(), DataRace> {
350         self.atomic_write_detect(thread_clocks, index)?;
351         let atomic = self.atomic_mut();
352         atomic.sync_vector.join(&thread_clocks.clock);
353         Ok(())
354     }
355 
356     /// Update the memory cell data-race tracking for atomic
357     /// store relaxed semantics for RMW operations.
rmw_relaxed( &mut self, thread_clocks: &ThreadClockSet, index: VectorIdx, ) -> Result<(), DataRace>358     fn rmw_relaxed(
359         &mut self,
360         thread_clocks: &ThreadClockSet,
361         index: VectorIdx,
362     ) -> Result<(), DataRace> {
363         self.atomic_write_detect(thread_clocks, index)?;
364         let atomic = self.atomic_mut();
365         atomic.sync_vector.join(&thread_clocks.fence_release);
366         Ok(())
367     }
368 
369     /// Detect data-races with an atomic read, caused by a non-atomic write that does
370     /// not happen-before the atomic-read.
atomic_read_detect( &mut self, thread_clocks: &ThreadClockSet, index: VectorIdx, ) -> Result<(), DataRace>371     fn atomic_read_detect(
372         &mut self,
373         thread_clocks: &ThreadClockSet,
374         index: VectorIdx,
375     ) -> Result<(), DataRace> {
376         log::trace!("Atomic read with vectors: {:#?} :: {:#?}", self, thread_clocks);
377         let atomic = self.atomic_mut();
378         atomic.read_vector.set_at_index(&thread_clocks.clock, index);
379         if self.write <= thread_clocks.clock[self.write_index] { Ok(()) } else { Err(DataRace) }
380     }
381 
382     /// Detect data-races with an atomic write, either with a non-atomic read or with
383     /// a non-atomic write.
atomic_write_detect( &mut self, thread_clocks: &ThreadClockSet, index: VectorIdx, ) -> Result<(), DataRace>384     fn atomic_write_detect(
385         &mut self,
386         thread_clocks: &ThreadClockSet,
387         index: VectorIdx,
388     ) -> Result<(), DataRace> {
389         log::trace!("Atomic write with vectors: {:#?} :: {:#?}", self, thread_clocks);
390         let atomic = self.atomic_mut();
391         atomic.write_vector.set_at_index(&thread_clocks.clock, index);
392         if self.write <= thread_clocks.clock[self.write_index] && self.read <= thread_clocks.clock {
393             Ok(())
394         } else {
395             Err(DataRace)
396         }
397     }
398 
399     /// Detect races for non-atomic read operations at the current memory cell
400     /// returns true if a data-race is detected.
read_race_detect( &mut self, thread_clocks: &mut ThreadClockSet, index: VectorIdx, current_span: Span, ) -> Result<(), DataRace>401     fn read_race_detect(
402         &mut self,
403         thread_clocks: &mut ThreadClockSet,
404         index: VectorIdx,
405         current_span: Span,
406     ) -> Result<(), DataRace> {
407         log::trace!("Unsynchronized read with vectors: {:#?} :: {:#?}", self, thread_clocks);
408         if !current_span.is_dummy() {
409             thread_clocks.clock[index].span = current_span;
410         }
411         if self.write <= thread_clocks.clock[self.write_index] {
412             let race_free = if let Some(atomic) = self.atomic() {
413                 atomic.write_vector <= thread_clocks.clock
414             } else {
415                 true
416             };
417             self.read.set_at_index(&thread_clocks.clock, index);
418             if race_free { Ok(()) } else { Err(DataRace) }
419         } else {
420             Err(DataRace)
421         }
422     }
423 
424     /// Detect races for non-atomic write operations at the current memory cell
425     /// returns true if a data-race is detected.
write_race_detect( &mut self, thread_clocks: &mut ThreadClockSet, index: VectorIdx, write_type: WriteType, current_span: Span, ) -> Result<(), DataRace>426     fn write_race_detect(
427         &mut self,
428         thread_clocks: &mut ThreadClockSet,
429         index: VectorIdx,
430         write_type: WriteType,
431         current_span: Span,
432     ) -> Result<(), DataRace> {
433         log::trace!("Unsynchronized write with vectors: {:#?} :: {:#?}", self, thread_clocks);
434         if !current_span.is_dummy() {
435             thread_clocks.clock[index].span = current_span;
436         }
437         if self.write <= thread_clocks.clock[self.write_index] && self.read <= thread_clocks.clock {
438             let race_free = if let Some(atomic) = self.atomic() {
439                 atomic.write_vector <= thread_clocks.clock
440                     && atomic.read_vector <= thread_clocks.clock
441             } else {
442                 true
443             };
444             self.write = thread_clocks.clock[index];
445             self.write_index = index;
446             self.write_type = write_type;
447             if race_free {
448                 self.read.set_zero_vector();
449                 Ok(())
450             } else {
451                 Err(DataRace)
452             }
453         } else {
454             Err(DataRace)
455         }
456     }
457 }
458 
459 /// Evaluation context extensions.
460 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx> {}
461 pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
462     /// Perform an atomic read operation at the memory location.
read_scalar_atomic( &self, place: &MPlaceTy<'tcx, Provenance>, atomic: AtomicReadOrd, ) -> InterpResult<'tcx, Scalar<Provenance>>463     fn read_scalar_atomic(
464         &self,
465         place: &MPlaceTy<'tcx, Provenance>,
466         atomic: AtomicReadOrd,
467     ) -> InterpResult<'tcx, Scalar<Provenance>> {
468         let this = self.eval_context_ref();
469         this.atomic_access_check(place)?;
470         // This will read from the last store in the modification order of this location. In case
471         // weak memory emulation is enabled, this may not be the store we will pick to actually read from and return.
472         // This is fine with StackedBorrow and race checks because they don't concern metadata on
473         // the *value* (including the associated provenance if this is an AtomicPtr) at this location.
474         // Only metadata on the location itself is used.
475         let scalar = this.allow_data_races_ref(move |this| this.read_scalar(&place.into()))?;
476         this.validate_overlapping_atomic(place)?;
477         this.buffered_atomic_read(place, atomic, scalar, || {
478             this.validate_atomic_load(place, atomic)
479         })
480     }
481 
482     /// Perform an atomic write operation at the memory location.
write_scalar_atomic( &mut self, val: Scalar<Provenance>, dest: &MPlaceTy<'tcx, Provenance>, atomic: AtomicWriteOrd, ) -> InterpResult<'tcx>483     fn write_scalar_atomic(
484         &mut self,
485         val: Scalar<Provenance>,
486         dest: &MPlaceTy<'tcx, Provenance>,
487         atomic: AtomicWriteOrd,
488     ) -> InterpResult<'tcx> {
489         let this = self.eval_context_mut();
490         this.atomic_access_check(dest)?;
491 
492         this.validate_overlapping_atomic(dest)?;
493         this.allow_data_races_mut(move |this| this.write_scalar(val, &dest.into()))?;
494         this.validate_atomic_store(dest, atomic)?;
495         // FIXME: it's not possible to get the value before write_scalar. A read_scalar will cause
496         // side effects from a read the program did not perform. So we have to initialise
497         // the store buffer with the value currently being written
498         // ONCE this is fixed please remove the hack in buffered_atomic_write() in weak_memory.rs
499         // https://github.com/rust-lang/miri/issues/2164
500         this.buffered_atomic_write(val, dest, atomic, val)
501     }
502 
503     /// Perform an atomic operation on a memory location.
atomic_op_immediate( &mut self, place: &MPlaceTy<'tcx, Provenance>, rhs: &ImmTy<'tcx, Provenance>, op: mir::BinOp, neg: bool, atomic: AtomicRwOrd, ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>504     fn atomic_op_immediate(
505         &mut self,
506         place: &MPlaceTy<'tcx, Provenance>,
507         rhs: &ImmTy<'tcx, Provenance>,
508         op: mir::BinOp,
509         neg: bool,
510         atomic: AtomicRwOrd,
511     ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
512         let this = self.eval_context_mut();
513         this.atomic_access_check(place)?;
514 
515         this.validate_overlapping_atomic(place)?;
516         let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
517 
518         // Atomics wrap around on overflow.
519         let val = this.binary_op(op, &old, rhs)?;
520         let val = if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val };
521         this.allow_data_races_mut(|this| this.write_immediate(*val, &place.into()))?;
522 
523         this.validate_atomic_rmw(place, atomic)?;
524 
525         this.buffered_atomic_rmw(val.to_scalar(), place, atomic, old.to_scalar())?;
526         Ok(old)
527     }
528 
529     /// Perform an atomic exchange with a memory place and a new
530     /// scalar value, the old value is returned.
atomic_exchange_scalar( &mut self, place: &MPlaceTy<'tcx, Provenance>, new: Scalar<Provenance>, atomic: AtomicRwOrd, ) -> InterpResult<'tcx, Scalar<Provenance>>531     fn atomic_exchange_scalar(
532         &mut self,
533         place: &MPlaceTy<'tcx, Provenance>,
534         new: Scalar<Provenance>,
535         atomic: AtomicRwOrd,
536     ) -> InterpResult<'tcx, Scalar<Provenance>> {
537         let this = self.eval_context_mut();
538         this.atomic_access_check(place)?;
539 
540         this.validate_overlapping_atomic(place)?;
541         let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
542         this.allow_data_races_mut(|this| this.write_scalar(new, &place.into()))?;
543 
544         this.validate_atomic_rmw(place, atomic)?;
545 
546         this.buffered_atomic_rmw(new, place, atomic, old)?;
547         Ok(old)
548     }
549 
550     /// Perform an conditional atomic exchange with a memory place and a new
551     /// scalar value, the old value is returned.
atomic_min_max_scalar( &mut self, place: &MPlaceTy<'tcx, Provenance>, rhs: ImmTy<'tcx, Provenance>, min: bool, atomic: AtomicRwOrd, ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>>552     fn atomic_min_max_scalar(
553         &mut self,
554         place: &MPlaceTy<'tcx, Provenance>,
555         rhs: ImmTy<'tcx, Provenance>,
556         min: bool,
557         atomic: AtomicRwOrd,
558     ) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
559         let this = self.eval_context_mut();
560         this.atomic_access_check(place)?;
561 
562         this.validate_overlapping_atomic(place)?;
563         let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
564         let lt = this.binary_op(mir::BinOp::Lt, &old, &rhs)?.to_scalar().to_bool()?;
565 
566         let new_val = if min {
567             if lt { &old } else { &rhs }
568         } else {
569             if lt { &rhs } else { &old }
570         };
571 
572         this.allow_data_races_mut(|this| this.write_immediate(**new_val, &place.into()))?;
573 
574         this.validate_atomic_rmw(place, atomic)?;
575 
576         this.buffered_atomic_rmw(new_val.to_scalar(), place, atomic, old.to_scalar())?;
577 
578         // Return the old value.
579         Ok(old)
580     }
581 
582     /// Perform an atomic compare and exchange at a given memory location.
583     /// On success an atomic RMW operation is performed and on failure
584     /// only an atomic read occurs. If `can_fail_spuriously` is true,
585     /// then we treat it as a "compare_exchange_weak" operation, and
586     /// some portion of the time fail even when the values are actually
587     /// identical.
atomic_compare_exchange_scalar( &mut self, place: &MPlaceTy<'tcx, Provenance>, expect_old: &ImmTy<'tcx, Provenance>, new: Scalar<Provenance>, success: AtomicRwOrd, fail: AtomicReadOrd, can_fail_spuriously: bool, ) -> InterpResult<'tcx, Immediate<Provenance>>588     fn atomic_compare_exchange_scalar(
589         &mut self,
590         place: &MPlaceTy<'tcx, Provenance>,
591         expect_old: &ImmTy<'tcx, Provenance>,
592         new: Scalar<Provenance>,
593         success: AtomicRwOrd,
594         fail: AtomicReadOrd,
595         can_fail_spuriously: bool,
596     ) -> InterpResult<'tcx, Immediate<Provenance>> {
597         use rand::Rng as _;
598         let this = self.eval_context_mut();
599         this.atomic_access_check(place)?;
600 
601         this.validate_overlapping_atomic(place)?;
602         // Failure ordering cannot be stronger than success ordering, therefore first attempt
603         // to read with the failure ordering and if successful then try again with the success
604         // read ordering and write in the success case.
605         // Read as immediate for the sake of `binary_op()`
606         let old = this.allow_data_races_mut(|this| this.read_immediate(&(place.into())))?;
607         // `binary_op` will bail if either of them is not a scalar.
608         let eq = this.binary_op(mir::BinOp::Eq, &old, expect_old)?;
609         // If the operation would succeed, but is "weak", fail some portion
610         // of the time, based on `success_rate`.
611         let success_rate = 1.0 - this.machine.cmpxchg_weak_failure_rate;
612         let cmpxchg_success = eq.to_scalar().to_bool()?
613             && if can_fail_spuriously {
614                 this.machine.rng.get_mut().gen_bool(success_rate)
615             } else {
616                 true
617             };
618         let res = Immediate::ScalarPair(old.to_scalar(), Scalar::from_bool(cmpxchg_success));
619 
620         // Update ptr depending on comparison.
621         // if successful, perform a full rw-atomic validation
622         // otherwise treat this as an atomic load with the fail ordering.
623         if cmpxchg_success {
624             this.allow_data_races_mut(|this| this.write_scalar(new, &place.into()))?;
625             this.validate_atomic_rmw(place, success)?;
626             this.buffered_atomic_rmw(new, place, success, old.to_scalar())?;
627         } else {
628             this.validate_atomic_load(place, fail)?;
629             // A failed compare exchange is equivalent to a load, reading from the latest store
630             // in the modification order.
631             // Since `old` is only a value and not the store element, we need to separately
632             // find it in our store buffer and perform load_impl on it.
633             this.perform_read_on_buffered_latest(place, fail, old.to_scalar())?;
634         }
635 
636         // Return the old value.
637         Ok(res)
638     }
639 
640     /// Update the data-race detector for an atomic fence on the current thread.
atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx>641     fn atomic_fence(&mut self, atomic: AtomicFenceOrd) -> InterpResult<'tcx> {
642         let this = self.eval_context_mut();
643         let current_span = this.machine.current_span();
644         if let Some(data_race) = &mut this.machine.data_race {
645             data_race.maybe_perform_sync_operation(
646                 &this.machine.threads,
647                 current_span,
648                 |index, mut clocks| {
649                     log::trace!("Atomic fence on {:?} with ordering {:?}", index, atomic);
650 
651                     // Apply data-race detection for the current fences
652                     // this treats AcqRel and SeqCst as the same as an acquire
653                     // and release fence applied in the same timestamp.
654                     if atomic != AtomicFenceOrd::Release {
655                         // Either Acquire | AcqRel | SeqCst
656                         clocks.apply_acquire_fence();
657                     }
658                     if atomic != AtomicFenceOrd::Acquire {
659                         // Either Release | AcqRel | SeqCst
660                         clocks.apply_release_fence();
661                     }
662                     if atomic == AtomicFenceOrd::SeqCst {
663                         data_race.last_sc_fence.borrow_mut().set_at_index(&clocks.clock, index);
664                         clocks.fence_seqcst.join(&data_race.last_sc_fence.borrow());
665                         clocks.write_seqcst.join(&data_race.last_sc_write.borrow());
666                     }
667 
668                     // Increment timestamp in case of release semantics.
669                     Ok(atomic != AtomicFenceOrd::Acquire)
670                 },
671             )
672         } else {
673             Ok(())
674         }
675     }
676 
677     /// After all threads are done running, this allows data races to occur for subsequent
678     /// 'administrative' machine accesses (that logically happen outside of the Abstract Machine).
allow_data_races_all_threads_done(&mut self)679     fn allow_data_races_all_threads_done(&mut self) {
680         let this = self.eval_context_ref();
681         assert!(this.have_all_terminated());
682         if let Some(data_race) = &this.machine.data_race {
683             let old = data_race.ongoing_action_data_race_free.replace(true);
684             assert!(!old, "cannot nest allow_data_races");
685         }
686     }
687 }
688 
689 /// Vector clock metadata for a logical memory allocation.
690 #[derive(Debug, Clone)]
691 pub struct VClockAlloc {
692     /// Assigning each byte a MemoryCellClocks.
693     alloc_ranges: RefCell<RangeMap<MemoryCellClocks>>,
694 }
695 
696 impl VisitTags for VClockAlloc {
visit_tags(&self, _visit: &mut dyn FnMut(BorTag))697     fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) {
698         // No tags here.
699     }
700 }
701 
702 impl VClockAlloc {
703     /// Create a new data-race detector for newly allocated memory.
new_allocation( global: &GlobalState, thread_mgr: &ThreadManager<'_, '_>, len: Size, kind: MemoryKind<MiriMemoryKind>, current_span: Span, ) -> VClockAlloc704     pub fn new_allocation(
705         global: &GlobalState,
706         thread_mgr: &ThreadManager<'_, '_>,
707         len: Size,
708         kind: MemoryKind<MiriMemoryKind>,
709         current_span: Span,
710     ) -> VClockAlloc {
711         let (alloc_timestamp, alloc_index) = match kind {
712             // User allocated and stack memory should track allocation.
713             MemoryKind::Machine(
714                 MiriMemoryKind::Rust
715                 | MiriMemoryKind::Miri
716                 | MiriMemoryKind::C
717                 | MiriMemoryKind::WinHeap
718                 | MiriMemoryKind::Mmap,
719             )
720             | MemoryKind::Stack => {
721                 let (alloc_index, clocks) = global.current_thread_state(thread_mgr);
722                 let mut alloc_timestamp = clocks.clock[alloc_index];
723                 alloc_timestamp.span = current_span;
724                 (alloc_timestamp, alloc_index)
725             }
726             // Other global memory should trace races but be allocated at the 0 timestamp.
727             MemoryKind::Machine(
728                 MiriMemoryKind::Global
729                 | MiriMemoryKind::Machine
730                 | MiriMemoryKind::Runtime
731                 | MiriMemoryKind::ExternStatic
732                 | MiriMemoryKind::Tls,
733             )
734             | MemoryKind::CallerLocation => (VTimestamp::ZERO, VectorIdx::MAX_INDEX),
735         };
736         VClockAlloc {
737             alloc_ranges: RefCell::new(RangeMap::new(
738                 len,
739                 MemoryCellClocks::new(alloc_timestamp, alloc_index),
740             )),
741         }
742     }
743 
744     // Find an index, if one exists where the value
745     // in `l` is greater than the value in `r`.
find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx>746     fn find_gt_index(l: &VClock, r: &VClock) -> Option<VectorIdx> {
747         log::trace!("Find index where not {:?} <= {:?}", l, r);
748         let l_slice = l.as_slice();
749         let r_slice = r.as_slice();
750         l_slice
751             .iter()
752             .zip(r_slice.iter())
753             .enumerate()
754             .find_map(|(idx, (&l, &r))| if l > r { Some(idx) } else { None })
755             .or_else(|| {
756                 if l_slice.len() > r_slice.len() {
757                     // By invariant, if l_slice is longer
758                     // then one element must be larger.
759                     // This just validates that this is true
760                     // and reports earlier elements first.
761                     let l_remainder_slice = &l_slice[r_slice.len()..];
762                     let idx = l_remainder_slice
763                         .iter()
764                         .enumerate()
765                         .find_map(|(idx, &r)| if r == VTimestamp::ZERO { None } else { Some(idx) })
766                         .expect("Invalid VClock Invariant");
767                     Some(idx + r_slice.len())
768                 } else {
769                     None
770                 }
771             })
772             .map(VectorIdx::new)
773     }
774 
775     /// Report a data-race found in the program.
776     /// This finds the two racing threads and the type
777     /// of data-race that occurred. This will also
778     /// return info about the memory location the data-race
779     /// occurred in.
780     #[cold]
781     #[inline(never)]
report_data_race<'tcx>( global: &GlobalState, thread_mgr: &ThreadManager<'_, '_>, mem_clocks: &MemoryCellClocks, action: &str, is_atomic: bool, ptr_dbg: Pointer<AllocId>, ) -> InterpResult<'tcx>782     fn report_data_race<'tcx>(
783         global: &GlobalState,
784         thread_mgr: &ThreadManager<'_, '_>,
785         mem_clocks: &MemoryCellClocks,
786         action: &str,
787         is_atomic: bool,
788         ptr_dbg: Pointer<AllocId>,
789     ) -> InterpResult<'tcx> {
790         let (current_index, current_clocks) = global.current_thread_state(thread_mgr);
791         let write_clock;
792         let (other_action, other_thread, other_clock) = if mem_clocks.write
793             > current_clocks.clock[mem_clocks.write_index]
794         {
795             // Convert the write action into the vector clock it
796             // represents for diagnostic purposes.
797             write_clock = VClock::new_with_index(mem_clocks.write_index, mem_clocks.write);
798             (mem_clocks.write_type.get_descriptor(), mem_clocks.write_index, &write_clock)
799         } else if let Some(idx) = Self::find_gt_index(&mem_clocks.read, &current_clocks.clock) {
800             ("Read", idx, &mem_clocks.read)
801         } else if !is_atomic {
802             if let Some(atomic) = mem_clocks.atomic() {
803                 if let Some(idx) = Self::find_gt_index(&atomic.write_vector, &current_clocks.clock)
804                 {
805                     ("Atomic Store", idx, &atomic.write_vector)
806                 } else if let Some(idx) =
807                     Self::find_gt_index(&atomic.read_vector, &current_clocks.clock)
808                 {
809                     ("Atomic Load", idx, &atomic.read_vector)
810                 } else {
811                     unreachable!(
812                         "Failed to report data-race for non-atomic operation: no race found"
813                     )
814                 }
815             } else {
816                 unreachable!(
817                     "Failed to report data-race for non-atomic operation: no atomic component"
818                 )
819             }
820         } else {
821             unreachable!("Failed to report data-race for atomic operation")
822         };
823 
824         // Load elaborated thread information about the racing thread actions.
825         let current_thread_info = global.print_thread_metadata(thread_mgr, current_index);
826         let other_thread_info = global.print_thread_metadata(thread_mgr, other_thread);
827 
828         // Throw the data-race detection.
829         Err(err_machine_stop!(TerminationInfo::DataRace {
830             ptr: ptr_dbg,
831             op1: RacingOp {
832                 action: other_action.to_string(),
833                 thread_info: other_thread_info,
834                 span: other_clock.as_slice()[other_thread.index()].span_data(),
835             },
836             op2: RacingOp {
837                 action: action.to_string(),
838                 thread_info: current_thread_info,
839                 span: current_clocks.clock.as_slice()[current_index.index()].span_data(),
840             },
841         }))?
842     }
843 
844     /// Detect racing atomic read and writes (not data races)
845     /// on every byte of the current access range
race_free_with_atomic( &self, range: AllocRange, global: &GlobalState, thread_mgr: &ThreadManager<'_, '_>, ) -> bool846     pub(super) fn race_free_with_atomic(
847         &self,
848         range: AllocRange,
849         global: &GlobalState,
850         thread_mgr: &ThreadManager<'_, '_>,
851     ) -> bool {
852         if global.race_detecting() {
853             let (_, thread_clocks) = global.current_thread_state(thread_mgr);
854             let alloc_ranges = self.alloc_ranges.borrow();
855             for (_, mem_clocks) in alloc_ranges.iter(range.start, range.size) {
856                 if !mem_clocks.race_free_with_atomic(&thread_clocks) {
857                     return false;
858                 }
859             }
860         }
861         true
862     }
863 
864     /// Detect data-races for an unsynchronized read operation, will not perform
865     /// data-race detection if `race_detecting()` is false, either due to no threads
866     /// being created or if it is temporarily disabled during a racy read or write
867     /// operation for which data-race detection is handled separately, for example
868     /// atomic read operations.
read<'tcx>( &self, alloc_id: AllocId, access_range: AllocRange, machine: &MiriMachine<'_, '_>, ) -> InterpResult<'tcx>869     pub fn read<'tcx>(
870         &self,
871         alloc_id: AllocId,
872         access_range: AllocRange,
873         machine: &MiriMachine<'_, '_>,
874     ) -> InterpResult<'tcx> {
875         let current_span = machine.current_span();
876         let global = machine.data_race.as_ref().unwrap();
877         if global.race_detecting() {
878             let (index, mut thread_clocks) = global.current_thread_state_mut(&machine.threads);
879             let mut alloc_ranges = self.alloc_ranges.borrow_mut();
880             for (mem_clocks_range, mem_clocks) in
881                 alloc_ranges.iter_mut(access_range.start, access_range.size)
882             {
883                 if let Err(DataRace) =
884                     mem_clocks.read_race_detect(&mut thread_clocks, index, current_span)
885                 {
886                     drop(thread_clocks);
887                     // Report data-race.
888                     return Self::report_data_race(
889                         global,
890                         &machine.threads,
891                         mem_clocks,
892                         "Read",
893                         false,
894                         Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
895                     );
896                 }
897             }
898             Ok(())
899         } else {
900             Ok(())
901         }
902     }
903 
904     // Shared code for detecting data-races on unique access to a section of memory
unique_access<'tcx>( &mut self, alloc_id: AllocId, access_range: AllocRange, write_type: WriteType, machine: &mut MiriMachine<'_, '_>, ) -> InterpResult<'tcx>905     fn unique_access<'tcx>(
906         &mut self,
907         alloc_id: AllocId,
908         access_range: AllocRange,
909         write_type: WriteType,
910         machine: &mut MiriMachine<'_, '_>,
911     ) -> InterpResult<'tcx> {
912         let current_span = machine.current_span();
913         let global = machine.data_race.as_mut().unwrap();
914         if global.race_detecting() {
915             let (index, mut thread_clocks) = global.current_thread_state_mut(&machine.threads);
916             for (mem_clocks_range, mem_clocks) in
917                 self.alloc_ranges.get_mut().iter_mut(access_range.start, access_range.size)
918             {
919                 if let Err(DataRace) = mem_clocks.write_race_detect(
920                     &mut thread_clocks,
921                     index,
922                     write_type,
923                     current_span,
924                 ) {
925                     drop(thread_clocks);
926                     // Report data-race
927                     return Self::report_data_race(
928                         global,
929                         &machine.threads,
930                         mem_clocks,
931                         write_type.get_descriptor(),
932                         false,
933                         Pointer::new(alloc_id, Size::from_bytes(mem_clocks_range.start)),
934                     );
935                 }
936             }
937             Ok(())
938         } else {
939             Ok(())
940         }
941     }
942 
943     /// Detect data-races for an unsynchronized write operation, will not perform
944     /// data-race threads if `race_detecting()` is false, either due to no threads
945     /// being created or if it is temporarily disabled during a racy read or write
946     /// operation
write<'tcx>( &mut self, alloc_id: AllocId, range: AllocRange, machine: &mut MiriMachine<'_, '_>, ) -> InterpResult<'tcx>947     pub fn write<'tcx>(
948         &mut self,
949         alloc_id: AllocId,
950         range: AllocRange,
951         machine: &mut MiriMachine<'_, '_>,
952     ) -> InterpResult<'tcx> {
953         self.unique_access(alloc_id, range, WriteType::Write, machine)
954     }
955 
956     /// Detect data-races for an unsynchronized deallocate operation, will not perform
957     /// data-race threads if `race_detecting()` is false, either due to no threads
958     /// being created or if it is temporarily disabled during a racy read or write
959     /// operation
deallocate<'tcx>( &mut self, alloc_id: AllocId, range: AllocRange, machine: &mut MiriMachine<'_, '_>, ) -> InterpResult<'tcx>960     pub fn deallocate<'tcx>(
961         &mut self,
962         alloc_id: AllocId,
963         range: AllocRange,
964         machine: &mut MiriMachine<'_, '_>,
965     ) -> InterpResult<'tcx> {
966         self.unique_access(alloc_id, range, WriteType::Deallocate, machine)
967     }
968 }
969 
970 impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx> {}
971 trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
972     /// Temporarily allow data-races to occur. This should only be used in
973     /// one of these cases:
974     /// - One of the appropriate `validate_atomic` functions will be called to
975     /// to treat a memory access as atomic.
976     /// - The memory being accessed should be treated as internal state, that
977     /// cannot be accessed by the interpreted program.
978     /// - Execution of the interpreted program execution has halted.
979     #[inline]
allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriInterpCx<'mir, 'tcx>) -> R) -> R980     fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriInterpCx<'mir, 'tcx>) -> R) -> R {
981         let this = self.eval_context_ref();
982         if let Some(data_race) = &this.machine.data_race {
983             let old = data_race.ongoing_action_data_race_free.replace(true);
984             assert!(!old, "cannot nest allow_data_races");
985         }
986         let result = op(this);
987         if let Some(data_race) = &this.machine.data_race {
988             data_race.ongoing_action_data_race_free.set(false);
989         }
990         result
991     }
992 
993     /// Same as `allow_data_races_ref`, this temporarily disables any data-race detection and
994     /// so should only be used for atomic operations or internal state that the program cannot
995     /// access.
996     #[inline]
allow_data_races_mut<R>( &mut self, op: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> R, ) -> R997     fn allow_data_races_mut<R>(
998         &mut self,
999         op: impl FnOnce(&mut MiriInterpCx<'mir, 'tcx>) -> R,
1000     ) -> R {
1001         let this = self.eval_context_mut();
1002         if let Some(data_race) = &this.machine.data_race {
1003             let old = data_race.ongoing_action_data_race_free.replace(true);
1004             assert!(!old, "cannot nest allow_data_races");
1005         }
1006         let result = op(this);
1007         if let Some(data_race) = &this.machine.data_race {
1008             data_race.ongoing_action_data_race_free.set(false);
1009         }
1010         result
1011     }
1012 
1013     /// Checks that an atomic access is legal at the given place.
atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx>1014     fn atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
1015         let this = self.eval_context_ref();
1016         // Check alignment requirements. Atomics must always be aligned to their size,
1017         // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1018         // be 8-aligned).
1019         let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1020         this.check_ptr_access_align(
1021             place.ptr,
1022             place.layout.size,
1023             align,
1024             CheckInAllocMsg::MemoryAccessTest,
1025         )?;
1026         // Ensure the allocation is mutable. Even failing (read-only) compare_exchange need mutable
1027         // memory on many targets (i.e., they segfault if taht memory is mapped read-only), and
1028         // atomic loads can be implemented via compare_exchange on some targets. There could
1029         // possibly be some very specific exceptions to this, see
1030         // <https://github.com/rust-lang/miri/pull/2464#discussion_r939636130> for details.
1031         // We avoid `get_ptr_alloc` since we do *not* want to run the access hooks -- the actual
1032         // access will happen later.
1033         let (alloc_id, _offset, _prov) =
1034             this.ptr_try_get_alloc_id(place.ptr).expect("there are no zero-sized atomic accesses");
1035         if this.get_alloc_mutability(alloc_id)? == Mutability::Not {
1036             // FIXME: make this prettier, once these messages have separate title/span/help messages.
1037             throw_ub_format!(
1038                 "atomic operations cannot be performed on read-only memory\n\
1039                 many platforms require atomic read-modify-write instructions to be performed on writeable memory, even if the operation fails \
1040                 (and is hence nominally read-only)\n\
1041                 some platforms implement (some) atomic loads via compare-exchange, which means they do not work on read-only memory; \
1042                 it is possible that we could have an exception permitting this for specific kinds of loads\n\
1043                 please report an issue at <https://github.com/rust-lang/miri/issues> if this is a problem for you"
1044             );
1045         }
1046         Ok(())
1047     }
1048 
1049     /// Update the data-race detector for an atomic read occurring at the
1050     /// associated memory-place and on the current thread.
validate_atomic_load( &self, place: &MPlaceTy<'tcx, Provenance>, atomic: AtomicReadOrd, ) -> InterpResult<'tcx>1051     fn validate_atomic_load(
1052         &self,
1053         place: &MPlaceTy<'tcx, Provenance>,
1054         atomic: AtomicReadOrd,
1055     ) -> InterpResult<'tcx> {
1056         let this = self.eval_context_ref();
1057         this.validate_overlapping_atomic(place)?;
1058         this.validate_atomic_op(
1059             place,
1060             atomic,
1061             "Atomic Load",
1062             move |memory, clocks, index, atomic| {
1063                 if atomic == AtomicReadOrd::Relaxed {
1064                     memory.load_relaxed(&mut *clocks, index)
1065                 } else {
1066                     memory.load_acquire(&mut *clocks, index)
1067                 }
1068             },
1069         )
1070     }
1071 
1072     /// Update the data-race detector for an atomic write occurring at the
1073     /// associated memory-place and on the current thread.
validate_atomic_store( &mut self, place: &MPlaceTy<'tcx, Provenance>, atomic: AtomicWriteOrd, ) -> InterpResult<'tcx>1074     fn validate_atomic_store(
1075         &mut self,
1076         place: &MPlaceTy<'tcx, Provenance>,
1077         atomic: AtomicWriteOrd,
1078     ) -> InterpResult<'tcx> {
1079         let this = self.eval_context_mut();
1080         this.validate_overlapping_atomic(place)?;
1081         this.validate_atomic_op(
1082             place,
1083             atomic,
1084             "Atomic Store",
1085             move |memory, clocks, index, atomic| {
1086                 if atomic == AtomicWriteOrd::Relaxed {
1087                     memory.store_relaxed(clocks, index)
1088                 } else {
1089                     memory.store_release(clocks, index)
1090                 }
1091             },
1092         )
1093     }
1094 
1095     /// Update the data-race detector for an atomic read-modify-write occurring
1096     /// at the associated memory place and on the current thread.
validate_atomic_rmw( &mut self, place: &MPlaceTy<'tcx, Provenance>, atomic: AtomicRwOrd, ) -> InterpResult<'tcx>1097     fn validate_atomic_rmw(
1098         &mut self,
1099         place: &MPlaceTy<'tcx, Provenance>,
1100         atomic: AtomicRwOrd,
1101     ) -> InterpResult<'tcx> {
1102         use AtomicRwOrd::*;
1103         let acquire = matches!(atomic, Acquire | AcqRel | SeqCst);
1104         let release = matches!(atomic, Release | AcqRel | SeqCst);
1105         let this = self.eval_context_mut();
1106         this.validate_overlapping_atomic(place)?;
1107         this.validate_atomic_op(place, atomic, "Atomic RMW", move |memory, clocks, index, _| {
1108             if acquire {
1109                 memory.load_acquire(clocks, index)?;
1110             } else {
1111                 memory.load_relaxed(clocks, index)?;
1112             }
1113             if release {
1114                 memory.rmw_release(clocks, index)
1115             } else {
1116                 memory.rmw_relaxed(clocks, index)
1117             }
1118         })
1119     }
1120 
1121     /// Generic atomic operation implementation
validate_atomic_op<A: Debug + Copy>( &self, place: &MPlaceTy<'tcx, Provenance>, atomic: A, description: &str, mut op: impl FnMut( &mut MemoryCellClocks, &mut ThreadClockSet, VectorIdx, A, ) -> Result<(), DataRace>, ) -> InterpResult<'tcx>1122     fn validate_atomic_op<A: Debug + Copy>(
1123         &self,
1124         place: &MPlaceTy<'tcx, Provenance>,
1125         atomic: A,
1126         description: &str,
1127         mut op: impl FnMut(
1128             &mut MemoryCellClocks,
1129             &mut ThreadClockSet,
1130             VectorIdx,
1131             A,
1132         ) -> Result<(), DataRace>,
1133     ) -> InterpResult<'tcx> {
1134         let this = self.eval_context_ref();
1135         if let Some(data_race) = &this.machine.data_race {
1136             if data_race.race_detecting() {
1137                 let size = place.layout.size;
1138                 let (alloc_id, base_offset, _prov) = this.ptr_get_alloc_id(place.ptr)?;
1139                 // Load and log the atomic operation.
1140                 // Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
1141                 let alloc_meta = this.get_alloc_extra(alloc_id)?.data_race.as_ref().unwrap();
1142                 log::trace!(
1143                     "Atomic op({}) with ordering {:?} on {:?} (size={})",
1144                     description,
1145                     &atomic,
1146                     place.ptr,
1147                     size.bytes()
1148                 );
1149 
1150                 let current_span = this.machine.current_span();
1151                 // Perform the atomic operation.
1152                 data_race.maybe_perform_sync_operation(
1153                     &this.machine.threads,
1154                     current_span,
1155                     |index, mut thread_clocks| {
1156                         for (mem_clocks_range, mem_clocks) in
1157                             alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size)
1158                         {
1159                             if let Err(DataRace) = op(mem_clocks, &mut thread_clocks, index, atomic)
1160                             {
1161                                 mem::drop(thread_clocks);
1162                                 return VClockAlloc::report_data_race(
1163                                     data_race,
1164                                     &this.machine.threads,
1165                                     mem_clocks,
1166                                     description,
1167                                     true,
1168                                     Pointer::new(
1169                                         alloc_id,
1170                                         Size::from_bytes(mem_clocks_range.start),
1171                                     ),
1172                                 )
1173                                 .map(|_| true);
1174                             }
1175                         }
1176 
1177                         // This conservatively assumes all operations have release semantics
1178                         Ok(true)
1179                     },
1180                 )?;
1181 
1182                 // Log changes to atomic memory.
1183                 if log::log_enabled!(log::Level::Trace) {
1184                     for (_offset, mem_clocks) in
1185                         alloc_meta.alloc_ranges.borrow().iter(base_offset, size)
1186                     {
1187                         log::trace!(
1188                             "Updated atomic memory({:?}, size={}) to {:#?}",
1189                             place.ptr,
1190                             size.bytes(),
1191                             mem_clocks.atomic_ops
1192                         );
1193                     }
1194                 }
1195             }
1196         }
1197         Ok(())
1198     }
1199 }
1200 
1201 /// Extra metadata associated with a thread.
1202 #[derive(Debug, Clone, Default)]
1203 struct ThreadExtraState {
1204     /// The current vector index in use by the
1205     /// thread currently, this is set to None
1206     /// after the vector index has been re-used
1207     /// and hence the value will never need to be
1208     /// read during data-race reporting.
1209     vector_index: Option<VectorIdx>,
1210 
1211     /// Thread termination vector clock, this
1212     /// is set on thread termination and is used
1213     /// for joining on threads since the vector_index
1214     /// may be re-used when the join operation occurs.
1215     termination_vector_clock: Option<VClock>,
1216 }
1217 
1218 /// Global data-race detection state, contains the currently
1219 /// executing thread as well as the vector-clocks associated
1220 /// with each of the threads.
1221 // FIXME: it is probably better to have one large RefCell, than to have so many small ones.
1222 #[derive(Debug, Clone)]
1223 pub struct GlobalState {
1224     /// Set to true once the first additional
1225     /// thread has launched, due to the dependency
1226     /// between before and after a thread launch.
1227     /// Any data-races must be recorded after this
1228     /// so concurrent execution can ignore recording
1229     /// any data-races.
1230     multi_threaded: Cell<bool>,
1231 
1232     /// A flag to mark we are currently performing
1233     /// a data race free action (such as atomic access)
1234     /// to suppress the race detector
1235     ongoing_action_data_race_free: Cell<bool>,
1236 
1237     /// Mapping of a vector index to a known set of thread
1238     /// clocks, this is not directly mapping from a thread id
1239     /// since it may refer to multiple threads.
1240     vector_clocks: RefCell<IndexVec<VectorIdx, ThreadClockSet>>,
1241 
1242     /// Mapping of a given vector index to the current thread
1243     /// that the execution is representing, this may change
1244     /// if a vector index is re-assigned to a new thread.
1245     vector_info: RefCell<IndexVec<VectorIdx, ThreadId>>,
1246 
1247     /// The mapping of a given thread to associated thread metadata.
1248     thread_info: RefCell<IndexVec<ThreadId, ThreadExtraState>>,
1249 
1250     /// Potential vector indices that could be re-used on thread creation
1251     /// values are inserted here on after the thread has terminated and
1252     /// been joined with, and hence may potentially become free
1253     /// for use as the index for a new thread.
1254     /// Elements in this set may still require the vector index to
1255     /// report data-races, and can only be re-used after all
1256     /// active vector-clocks catch up with the threads timestamp.
1257     reuse_candidates: RefCell<FxHashSet<VectorIdx>>,
1258 
1259     /// This contains threads that have terminated, but not yet joined
1260     /// and so cannot become re-use candidates until a join operation
1261     /// occurs.
1262     /// The associated vector index will be moved into re-use candidates
1263     /// after the join operation occurs.
1264     terminated_threads: RefCell<FxHashMap<ThreadId, VectorIdx>>,
1265 
1266     /// The timestamp of last SC fence performed by each thread
1267     last_sc_fence: RefCell<VClock>,
1268 
1269     /// The timestamp of last SC write performed by each thread
1270     last_sc_write: RefCell<VClock>,
1271 
1272     /// Track when an outdated (weak memory) load happens.
1273     pub track_outdated_loads: bool,
1274 }
1275 
1276 impl VisitTags for GlobalState {
visit_tags(&self, _visit: &mut dyn FnMut(BorTag))1277     fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) {
1278         // We don't have any tags.
1279     }
1280 }
1281 
1282 impl GlobalState {
1283     /// Create a new global state, setup with just thread-id=0
1284     /// advanced to timestamp = 1.
new(config: &MiriConfig) -> Self1285     pub fn new(config: &MiriConfig) -> Self {
1286         let mut global_state = GlobalState {
1287             multi_threaded: Cell::new(false),
1288             ongoing_action_data_race_free: Cell::new(false),
1289             vector_clocks: RefCell::new(IndexVec::new()),
1290             vector_info: RefCell::new(IndexVec::new()),
1291             thread_info: RefCell::new(IndexVec::new()),
1292             reuse_candidates: RefCell::new(FxHashSet::default()),
1293             terminated_threads: RefCell::new(FxHashMap::default()),
1294             last_sc_fence: RefCell::new(VClock::default()),
1295             last_sc_write: RefCell::new(VClock::default()),
1296             track_outdated_loads: config.track_outdated_loads,
1297         };
1298 
1299         // Setup the main-thread since it is not explicitly created:
1300         // uses vector index and thread-id 0.
1301         let index = global_state.vector_clocks.get_mut().push(ThreadClockSet::default());
1302         global_state.vector_info.get_mut().push(ThreadId::new(0));
1303         global_state
1304             .thread_info
1305             .get_mut()
1306             .push(ThreadExtraState { vector_index: Some(index), termination_vector_clock: None });
1307 
1308         global_state
1309     }
1310 
1311     // We perform data race detection when there are more than 1 active thread
1312     // and we have not temporarily disabled race detection to perform something
1313     // data race free
race_detecting(&self) -> bool1314     fn race_detecting(&self) -> bool {
1315         self.multi_threaded.get() && !self.ongoing_action_data_race_free.get()
1316     }
1317 
ongoing_action_data_race_free(&self) -> bool1318     pub fn ongoing_action_data_race_free(&self) -> bool {
1319         self.ongoing_action_data_race_free.get()
1320     }
1321 
1322     // Try to find vector index values that can potentially be re-used
1323     // by a new thread instead of a new vector index being created.
find_vector_index_reuse_candidate(&self) -> Option<VectorIdx>1324     fn find_vector_index_reuse_candidate(&self) -> Option<VectorIdx> {
1325         let mut reuse = self.reuse_candidates.borrow_mut();
1326         let vector_clocks = self.vector_clocks.borrow();
1327         let vector_info = self.vector_info.borrow();
1328         let terminated_threads = self.terminated_threads.borrow();
1329         for &candidate in reuse.iter() {
1330             let target_timestamp = vector_clocks[candidate].clock[candidate];
1331             if vector_clocks.iter_enumerated().all(|(clock_idx, clock)| {
1332                 // The thread happens before the clock, and hence cannot report
1333                 // a data-race with this the candidate index.
1334                 let no_data_race = clock.clock[candidate] >= target_timestamp;
1335 
1336                 // The vector represents a thread that has terminated and hence cannot
1337                 // report a data-race with the candidate index.
1338                 let thread_id = vector_info[clock_idx];
1339                 let vector_terminated =
1340                     reuse.contains(&clock_idx) || terminated_threads.contains_key(&thread_id);
1341 
1342                 // The vector index cannot report a race with the candidate index
1343                 // and hence allows the candidate index to be re-used.
1344                 no_data_race || vector_terminated
1345             }) {
1346                 // All vector clocks for each vector index are equal to
1347                 // the target timestamp, and the thread is known to have
1348                 // terminated, therefore this vector clock index cannot
1349                 // report any more data-races.
1350                 assert!(reuse.remove(&candidate));
1351                 return Some(candidate);
1352             }
1353         }
1354         None
1355     }
1356 
1357     // Hook for thread creation, enabled multi-threaded execution and marks
1358     // the current thread timestamp as happening-before the current thread.
1359     #[inline]
thread_created( &mut self, thread_mgr: &ThreadManager<'_, '_>, thread: ThreadId, current_span: Span, )1360     pub fn thread_created(
1361         &mut self,
1362         thread_mgr: &ThreadManager<'_, '_>,
1363         thread: ThreadId,
1364         current_span: Span,
1365     ) {
1366         let current_index = self.current_index(thread_mgr);
1367 
1368         // Enable multi-threaded execution, there are now at least two threads
1369         // so data-races are now possible.
1370         self.multi_threaded.set(true);
1371 
1372         // Load and setup the associated thread metadata
1373         let mut thread_info = self.thread_info.borrow_mut();
1374         thread_info.ensure_contains_elem(thread, Default::default);
1375 
1376         // Assign a vector index for the thread, attempting to re-use an old
1377         // vector index that can no longer report any data-races if possible.
1378         let created_index = if let Some(reuse_index) = self.find_vector_index_reuse_candidate() {
1379             // Now re-configure the re-use candidate, increment the clock
1380             // for the new sync use of the vector.
1381             let vector_clocks = self.vector_clocks.get_mut();
1382             vector_clocks[reuse_index].increment_clock(reuse_index, current_span);
1383 
1384             // Locate the old thread the vector was associated with and update
1385             // it to represent the new thread instead.
1386             let vector_info = self.vector_info.get_mut();
1387             let old_thread = vector_info[reuse_index];
1388             vector_info[reuse_index] = thread;
1389 
1390             // Mark the thread the vector index was associated with as no longer
1391             // representing a thread index.
1392             thread_info[old_thread].vector_index = None;
1393 
1394             reuse_index
1395         } else {
1396             // No vector re-use candidates available, instead create
1397             // a new vector index.
1398             let vector_info = self.vector_info.get_mut();
1399             vector_info.push(thread)
1400         };
1401 
1402         log::trace!("Creating thread = {:?} with vector index = {:?}", thread, created_index);
1403 
1404         // Mark the chosen vector index as in use by the thread.
1405         thread_info[thread].vector_index = Some(created_index);
1406 
1407         // Create a thread clock set if applicable.
1408         let vector_clocks = self.vector_clocks.get_mut();
1409         if created_index == vector_clocks.next_index() {
1410             vector_clocks.push(ThreadClockSet::default());
1411         }
1412 
1413         // Now load the two clocks and configure the initial state.
1414         let (current, created) = vector_clocks.pick2_mut(current_index, created_index);
1415 
1416         // Join the created with current, since the current threads
1417         // previous actions happen-before the created thread.
1418         created.join_with(current);
1419 
1420         // Advance both threads after the synchronized operation.
1421         // Both operations are considered to have release semantics.
1422         current.increment_clock(current_index, current_span);
1423         created.increment_clock(created_index, current_span);
1424     }
1425 
1426     /// Hook on a thread join to update the implicit happens-before relation between the joined
1427     /// thread (the joinee, the thread that someone waited on) and the current thread (the joiner,
1428     /// the thread who was waiting).
1429     #[inline]
thread_joined( &mut self, thread_mgr: &ThreadManager<'_, '_>, joiner: ThreadId, joinee: ThreadId, )1430     pub fn thread_joined(
1431         &mut self,
1432         thread_mgr: &ThreadManager<'_, '_>,
1433         joiner: ThreadId,
1434         joinee: ThreadId,
1435     ) {
1436         let clocks_vec = self.vector_clocks.get_mut();
1437         let thread_info = self.thread_info.get_mut();
1438 
1439         // Load the vector clock of the current thread.
1440         let current_index = thread_info[joiner]
1441             .vector_index
1442             .expect("Performed thread join on thread with no assigned vector");
1443         let current = &mut clocks_vec[current_index];
1444 
1445         // Load the associated vector clock for the terminated thread.
1446         let join_clock = thread_info[joinee]
1447             .termination_vector_clock
1448             .as_ref()
1449             .expect("Joined with thread but thread has not terminated");
1450 
1451         // The join thread happens-before the current thread
1452         // so update the current vector clock.
1453         // Is not a release operation so the clock is not incremented.
1454         current.clock.join(join_clock);
1455 
1456         // Check the number of live threads, if the value is 1
1457         // then test for potentially disabling multi-threaded execution.
1458         if thread_mgr.get_live_thread_count() == 1 {
1459             // May potentially be able to disable multi-threaded execution.
1460             let current_clock = &clocks_vec[current_index];
1461             if clocks_vec
1462                 .iter_enumerated()
1463                 .all(|(idx, clocks)| clocks.clock[idx] <= current_clock.clock[idx])
1464             {
1465                 // All thread terminations happen-before the current clock
1466                 // therefore no data-races can be reported until a new thread
1467                 // is created, so disable multi-threaded execution.
1468                 self.multi_threaded.set(false);
1469             }
1470         }
1471 
1472         // If the thread is marked as terminated but not joined
1473         // then move the thread to the re-use set.
1474         let termination = self.terminated_threads.get_mut();
1475         if let Some(index) = termination.remove(&joinee) {
1476             let reuse = self.reuse_candidates.get_mut();
1477             reuse.insert(index);
1478         }
1479     }
1480 
1481     /// On thread termination, the vector-clock may re-used
1482     /// in the future once all remaining thread-clocks catch
1483     /// up with the time index of the terminated thread.
1484     /// This assigns thread termination with a unique index
1485     /// which will be used to join the thread
1486     /// This should be called strictly before any calls to
1487     /// `thread_joined`.
1488     #[inline]
thread_terminated(&mut self, thread_mgr: &ThreadManager<'_, '_>, current_span: Span)1489     pub fn thread_terminated(&mut self, thread_mgr: &ThreadManager<'_, '_>, current_span: Span) {
1490         let current_index = self.current_index(thread_mgr);
1491 
1492         // Increment the clock to a unique termination timestamp.
1493         let vector_clocks = self.vector_clocks.get_mut();
1494         let current_clocks = &mut vector_clocks[current_index];
1495         current_clocks.increment_clock(current_index, current_span);
1496 
1497         // Load the current thread id for the executing vector.
1498         let vector_info = self.vector_info.get_mut();
1499         let current_thread = vector_info[current_index];
1500 
1501         // Load the current thread metadata, and move to a terminated
1502         // vector state. Setting up the vector clock all join operations
1503         // will use.
1504         let thread_info = self.thread_info.get_mut();
1505         let current = &mut thread_info[current_thread];
1506         current.termination_vector_clock = Some(current_clocks.clock.clone());
1507 
1508         // Add this thread as a candidate for re-use after a thread join
1509         // occurs.
1510         let termination = self.terminated_threads.get_mut();
1511         termination.insert(current_thread, current_index);
1512     }
1513 
1514     /// Attempt to perform a synchronized operation, this
1515     /// will perform no operation if multi-threading is
1516     /// not currently enabled.
1517     /// Otherwise it will increment the clock for the current
1518     /// vector before and after the operation for data-race
1519     /// detection between any happens-before edges the
1520     /// operation may create.
maybe_perform_sync_operation<'tcx>( &self, thread_mgr: &ThreadManager<'_, '_>, current_span: Span, op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>, ) -> InterpResult<'tcx>1521     fn maybe_perform_sync_operation<'tcx>(
1522         &self,
1523         thread_mgr: &ThreadManager<'_, '_>,
1524         current_span: Span,
1525         op: impl FnOnce(VectorIdx, RefMut<'_, ThreadClockSet>) -> InterpResult<'tcx, bool>,
1526     ) -> InterpResult<'tcx> {
1527         if self.multi_threaded.get() {
1528             let (index, clocks) = self.current_thread_state_mut(thread_mgr);
1529             if op(index, clocks)? {
1530                 let (_, mut clocks) = self.current_thread_state_mut(thread_mgr);
1531                 clocks.increment_clock(index, current_span);
1532             }
1533         }
1534         Ok(())
1535     }
1536 
1537     /// Internal utility to identify a thread stored internally
1538     /// returns the id and the name for better diagnostics.
print_thread_metadata( &self, thread_mgr: &ThreadManager<'_, '_>, vector: VectorIdx, ) -> String1539     fn print_thread_metadata(
1540         &self,
1541         thread_mgr: &ThreadManager<'_, '_>,
1542         vector: VectorIdx,
1543     ) -> String {
1544         let thread = self.vector_info.borrow()[vector];
1545         let thread_name = thread_mgr.get_thread_name(thread);
1546         format!("thread `{}`", String::from_utf8_lossy(thread_name))
1547     }
1548 
1549     /// Acquire a lock, express that the previous call of
1550     /// `validate_lock_release` must happen before this.
1551     /// As this is an acquire operation, the thread timestamp is not
1552     /// incremented.
validate_lock_acquire(&self, lock: &VClock, thread: ThreadId)1553     pub fn validate_lock_acquire(&self, lock: &VClock, thread: ThreadId) {
1554         let (_, mut clocks) = self.load_thread_state_mut(thread);
1555         clocks.clock.join(lock);
1556     }
1557 
1558     /// Release a lock handle, express that this happens-before
1559     /// any subsequent calls to `validate_lock_acquire`.
1560     /// For normal locks this should be equivalent to `validate_lock_release_shared`
1561     /// since an acquire operation should have occurred before, however
1562     /// for futex & condvar operations this is not the case and this
1563     /// operation must be used.
validate_lock_release(&self, lock: &mut VClock, thread: ThreadId, current_span: Span)1564     pub fn validate_lock_release(&self, lock: &mut VClock, thread: ThreadId, current_span: Span) {
1565         let (index, mut clocks) = self.load_thread_state_mut(thread);
1566         lock.clone_from(&clocks.clock);
1567         clocks.increment_clock(index, current_span);
1568     }
1569 
1570     /// Release a lock handle, express that this happens-before
1571     /// any subsequent calls to `validate_lock_acquire` as well
1572     /// as any previous calls to this function after any
1573     /// `validate_lock_release` calls.
1574     /// For normal locks this should be equivalent to `validate_lock_release`.
1575     /// This function only exists for joining over the set of concurrent readers
1576     /// in a read-write lock and should not be used for anything else.
validate_lock_release_shared( &self, lock: &mut VClock, thread: ThreadId, current_span: Span, )1577     pub fn validate_lock_release_shared(
1578         &self,
1579         lock: &mut VClock,
1580         thread: ThreadId,
1581         current_span: Span,
1582     ) {
1583         let (index, mut clocks) = self.load_thread_state_mut(thread);
1584         lock.join(&clocks.clock);
1585         clocks.increment_clock(index, current_span);
1586     }
1587 
1588     /// Load the vector index used by the given thread as well as the set of vector clocks
1589     /// used by the thread.
1590     #[inline]
load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>)1591     fn load_thread_state_mut(&self, thread: ThreadId) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1592         let index = self.thread_info.borrow()[thread]
1593             .vector_index
1594             .expect("Loading thread state for thread with no assigned vector");
1595         let ref_vector = self.vector_clocks.borrow_mut();
1596         let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1597         (index, clocks)
1598     }
1599 
1600     /// Load the current vector clock in use and the current set of thread clocks
1601     /// in use for the vector.
1602     #[inline]
current_thread_state( &self, thread_mgr: &ThreadManager<'_, '_>, ) -> (VectorIdx, Ref<'_, ThreadClockSet>)1603     pub(super) fn current_thread_state(
1604         &self,
1605         thread_mgr: &ThreadManager<'_, '_>,
1606     ) -> (VectorIdx, Ref<'_, ThreadClockSet>) {
1607         let index = self.current_index(thread_mgr);
1608         let ref_vector = self.vector_clocks.borrow();
1609         let clocks = Ref::map(ref_vector, |vec| &vec[index]);
1610         (index, clocks)
1611     }
1612 
1613     /// Load the current vector clock in use and the current set of thread clocks
1614     /// in use for the vector mutably for modification.
1615     #[inline]
current_thread_state_mut( &self, thread_mgr: &ThreadManager<'_, '_>, ) -> (VectorIdx, RefMut<'_, ThreadClockSet>)1616     pub(super) fn current_thread_state_mut(
1617         &self,
1618         thread_mgr: &ThreadManager<'_, '_>,
1619     ) -> (VectorIdx, RefMut<'_, ThreadClockSet>) {
1620         let index = self.current_index(thread_mgr);
1621         let ref_vector = self.vector_clocks.borrow_mut();
1622         let clocks = RefMut::map(ref_vector, |vec| &mut vec[index]);
1623         (index, clocks)
1624     }
1625 
1626     /// Return the current thread, should be the same
1627     /// as the data-race active thread.
1628     #[inline]
current_index(&self, thread_mgr: &ThreadManager<'_, '_>) -> VectorIdx1629     fn current_index(&self, thread_mgr: &ThreadManager<'_, '_>) -> VectorIdx {
1630         let active_thread_id = thread_mgr.get_active_thread_id();
1631         self.thread_info.borrow()[active_thread_id]
1632             .vector_index
1633             .expect("active thread has no assigned vector")
1634     }
1635 
1636     // SC ATOMIC STORE rule in the paper.
sc_write(&self, thread_mgr: &ThreadManager<'_, '_>)1637     pub(super) fn sc_write(&self, thread_mgr: &ThreadManager<'_, '_>) {
1638         let (index, clocks) = self.current_thread_state(thread_mgr);
1639         self.last_sc_write.borrow_mut().set_at_index(&clocks.clock, index);
1640     }
1641 
1642     // SC ATOMIC READ rule in the paper.
sc_read(&self, thread_mgr: &ThreadManager<'_, '_>)1643     pub(super) fn sc_read(&self, thread_mgr: &ThreadManager<'_, '_>) {
1644         let (.., mut clocks) = self.current_thread_state_mut(thread_mgr);
1645         clocks.read_seqcst.join(&self.last_sc_fence.borrow());
1646     }
1647 }
1648