• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! This module defines various operations and types that are implemented in
2 //! one way for the serial compiler, and another way the parallel compiler.
3 //!
4 //! Operations
5 //! ----------
6 //! The parallel versions of operations use Rayon to execute code in parallel,
7 //! while the serial versions degenerate straightforwardly to serial execution.
8 //! The operations include `join`, `parallel`, `par_iter`, and `par_for_each`.
9 //!
10 //! Types
11 //! -----
12 //! The parallel versions of types provide various kinds of synchronization,
13 //! while the serial compiler versions do not.
14 //!
15 //! The following table shows how the types are implemented internally. Except
16 //! where noted otherwise, the type in column one is defined as a
17 //! newtype around the type from column two or three.
18 //!
19 //! | Type                    | Serial version      | Parallel version                |
20 //! | ----------------------- | ------------------- | ------------------------------- |
21 //! | `Lrc<T>`                | `rc::Rc<T>`         | `sync::Arc<T>`                  |
22 //! |` Weak<T>`               | `rc::Weak<T>`       | `sync::Weak<T>`                 |
23 //! |                         |                     |                                 |
24 //! | `AtomicBool`            | `Cell<bool>`        | `atomic::AtomicBool`            |
25 //! | `AtomicU32`             | `Cell<u32>`         | `atomic::AtomicU32`             |
26 //! | `AtomicU64`             | `Cell<u64>`         | `atomic::AtomicU64`             |
27 //! | `AtomicUsize`           | `Cell<usize>`       | `atomic::AtomicUsize`           |
28 //! |                         |                     |                                 |
29 //! | `Lock<T>`               | `RefCell<T>`        | `parking_lot::Mutex<T>`         |
30 //! | `RwLock<T>`             | `RefCell<T>`        | `parking_lot::RwLock<T>`        |
31 //! | `MTLock<T>`        [^1] | `T`                 | `Lock<T>`                       |
32 //! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>`                 |
33 //! |                         |                     |                                 |
34 //! | `ParallelIterator`      | `Iterator`          | `rayon::iter::ParallelIterator` |
35 //!
36 //! [^1] `MTLock` is similar to `Lock`, but the serial version avoids the cost
37 //! of a `RefCell`. This is appropriate when interior mutability is not
38 //! required.
39 //!
40 //! [^2] `MTLockRef` is a typedef.
41 
42 pub use crate::marker::*;
43 use std::collections::HashMap;
44 use std::hash::{BuildHasher, Hash};
45 use std::ops::{Deref, DerefMut};
46 use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
47 
48 mod worker_local;
49 pub use worker_local::{Registry, WorkerLocal};
50 
51 pub use std::sync::atomic::Ordering;
52 pub use std::sync::atomic::Ordering::SeqCst;
53 
54 pub use vec::{AppendOnlyIndexVec, AppendOnlyVec};
55 
56 mod vec;
57 
58 mod mode {
59     use super::Ordering;
60     use std::sync::atomic::AtomicU8;
61 
62     const UNINITIALIZED: u8 = 0;
63     const DYN_NOT_THREAD_SAFE: u8 = 1;
64     const DYN_THREAD_SAFE: u8 = 2;
65 
66     static DYN_THREAD_SAFE_MODE: AtomicU8 = AtomicU8::new(UNINITIALIZED);
67 
68     // Whether thread safety is enabled (due to running under multiple threads).
69     #[inline]
is_dyn_thread_safe() -> bool70     pub fn is_dyn_thread_safe() -> bool {
71         match DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) {
72             DYN_NOT_THREAD_SAFE => false,
73             DYN_THREAD_SAFE => true,
74             _ => panic!("uninitialized dyn_thread_safe mode!"),
75         }
76     }
77 
78     // Only set by the `-Z threads` compile option
set_dyn_thread_safe_mode(mode: bool)79     pub fn set_dyn_thread_safe_mode(mode: bool) {
80         let set: u8 = if mode { DYN_THREAD_SAFE } else { DYN_NOT_THREAD_SAFE };
81         let previous = DYN_THREAD_SAFE_MODE.compare_exchange(
82             UNINITIALIZED,
83             set,
84             Ordering::Relaxed,
85             Ordering::Relaxed,
86         );
87 
88         // Check that the mode was either uninitialized or was already set to the requested mode.
89         assert!(previous.is_ok() || previous == Err(set));
90     }
91 }
92 
93 pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode};
94 
95 cfg_if! {
96     if #[cfg(not(parallel_compiler))] {
97         pub unsafe auto trait Send {}
98         pub unsafe auto trait Sync {}
99 
100         unsafe impl<T> Send for T {}
101         unsafe impl<T> Sync for T {}
102 
103         use std::ops::Add;
104 
105         /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
106         /// It has explicit ordering arguments and is only intended for use with
107         /// the native atomic types.
108         /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
109         /// as it's not intended to be used separately.
110         #[derive(Debug, Default)]
111         pub struct Atomic<T: Copy>(Cell<T>);
112 
113         impl<T: Copy> Atomic<T> {
114             #[inline]
115             pub fn new(v: T) -> Self {
116                 Atomic(Cell::new(v))
117             }
118 
119             #[inline]
120             pub fn into_inner(self) -> T {
121                 self.0.into_inner()
122             }
123 
124             #[inline]
125             pub fn load(&self, _: Ordering) -> T {
126                 self.0.get()
127             }
128 
129             #[inline]
130             pub fn store(&self, val: T, _: Ordering) {
131                 self.0.set(val)
132             }
133 
134             #[inline]
135             pub fn swap(&self, val: T, _: Ordering) -> T {
136                 self.0.replace(val)
137             }
138         }
139 
140         impl Atomic<bool> {
141             pub fn fetch_or(&self, val: bool, _: Ordering) -> bool {
142                 let old = self.0.get();
143                 self.0.set(val | old);
144                 old
145             }
146             pub fn fetch_and(&self, val: bool, _: Ordering) -> bool {
147                 let old = self.0.get();
148                 self.0.set(val & old);
149                 old
150             }
151         }
152 
153         impl<T: Copy + PartialEq> Atomic<T> {
154             #[inline]
155             pub fn compare_exchange(&self,
156                                     current: T,
157                                     new: T,
158                                     _: Ordering,
159                                     _: Ordering)
160                                     -> Result<T, T> {
161                 let read = self.0.get();
162                 if read == current {
163                     self.0.set(new);
164                     Ok(read)
165                 } else {
166                     Err(read)
167                 }
168             }
169         }
170 
171         impl<T: Add<Output=T> + Copy> Atomic<T> {
172             #[inline]
173             pub fn fetch_add(&self, val: T, _: Ordering) -> T {
174                 let old = self.0.get();
175                 self.0.set(old + val);
176                 old
177             }
178         }
179 
180         pub type AtomicUsize = Atomic<usize>;
181         pub type AtomicBool = Atomic<bool>;
182         pub type AtomicU32 = Atomic<u32>;
183         pub type AtomicU64 = Atomic<u64>;
184 
185         pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
186             where A: FnOnce() -> RA,
187                   B: FnOnce() -> RB
188         {
189             (oper_a(), oper_b())
190         }
191 
192         #[macro_export]
193         macro_rules! parallel {
194             ($($blocks:block),*) => {
195                 // We catch panics here ensuring that all the blocks execute.
196                 // This makes behavior consistent with the parallel compiler.
197                 let mut panic = None;
198                 $(
199                     if let Err(p) = ::std::panic::catch_unwind(
200                         ::std::panic::AssertUnwindSafe(|| $blocks)
201                     ) {
202                         if panic.is_none() {
203                             panic = Some(p);
204                         }
205                     }
206                 )*
207                 if let Some(panic) = panic {
208                     ::std::panic::resume_unwind(panic);
209                 }
210             }
211         }
212 
213         pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
214             // We catch panics here ensuring that all the loop iterations execute.
215             // This makes behavior consistent with the parallel compiler.
216             let mut panic = None;
217             t.into_iter().for_each(|i| {
218                 if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
219                     if panic.is_none() {
220                         panic = Some(p);
221                     }
222                 }
223             });
224             if let Some(panic) = panic {
225                 resume_unwind(panic);
226             }
227         }
228 
229         pub fn par_map<T: IntoIterator, R, C: FromIterator<R>>(
230             t: T,
231             mut map: impl FnMut(<<T as IntoIterator>::IntoIter as Iterator>::Item) -> R,
232         ) -> C {
233             // We catch panics here ensuring that all the loop iterations execute.
234             let mut panic = None;
235             let r = t.into_iter().filter_map(|i| {
236                 match catch_unwind(AssertUnwindSafe(|| map(i))) {
237                     Ok(r) => Some(r),
238                     Err(p) => {
239                         if panic.is_none() {
240                             panic = Some(p);
241                         }
242                         None
243                     }
244                 }
245             }).collect();
246             if let Some(panic) = panic {
247                 resume_unwind(panic);
248             }
249             r
250         }
251 
252         pub use std::rc::Rc as Lrc;
253         pub use std::rc::Weak as Weak;
254         pub use std::cell::Ref as ReadGuard;
255         pub use std::cell::Ref as MappedReadGuard;
256         pub use std::cell::RefMut as WriteGuard;
257         pub use std::cell::RefMut as MappedWriteGuard;
258         pub use std::cell::RefMut as LockGuard;
259         pub use std::cell::RefMut as MappedLockGuard;
260 
261         pub use std::cell::OnceCell;
262 
263         use std::cell::RefCell as InnerRwLock;
264         use std::cell::RefCell as InnerLock;
265 
266         use std::cell::Cell;
267 
268         pub type MTLockRef<'a, T> = &'a mut MTLock<T>;
269 
270         #[derive(Debug, Default)]
271         pub struct MTLock<T>(T);
272 
273         impl<T> MTLock<T> {
274             #[inline(always)]
275             pub fn new(inner: T) -> Self {
276                 MTLock(inner)
277             }
278 
279             #[inline(always)]
280             pub fn into_inner(self) -> T {
281                 self.0
282             }
283 
284             #[inline(always)]
285             pub fn get_mut(&mut self) -> &mut T {
286                 &mut self.0
287             }
288 
289             #[inline(always)]
290             pub fn lock(&self) -> &T {
291                 &self.0
292             }
293 
294             #[inline(always)]
295             pub fn lock_mut(&mut self) -> &mut T {
296                 &mut self.0
297             }
298         }
299 
300         // FIXME: Probably a bad idea (in the threaded case)
301         impl<T: Clone> Clone for MTLock<T> {
302             #[inline]
303             fn clone(&self) -> Self {
304                 MTLock(self.0.clone())
305             }
306         }
307     } else {
308         pub use std::marker::Send as Send;
309         pub use std::marker::Sync as Sync;
310 
311         pub use parking_lot::RwLockReadGuard as ReadGuard;
312         pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
313         pub use parking_lot::RwLockWriteGuard as WriteGuard;
314         pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
315 
316         pub use parking_lot::MutexGuard as LockGuard;
317         pub use parking_lot::MappedMutexGuard as MappedLockGuard;
318 
319         pub use std::sync::OnceLock as OnceCell;
320 
321         pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
322 
323         pub use std::sync::Arc as Lrc;
324         pub use std::sync::Weak as Weak;
325 
326         pub type MTLockRef<'a, T> = &'a MTLock<T>;
327 
328         #[derive(Debug, Default)]
329         pub struct MTLock<T>(Lock<T>);
330 
331         impl<T> MTLock<T> {
332             #[inline(always)]
333             pub fn new(inner: T) -> Self {
334                 MTLock(Lock::new(inner))
335             }
336 
337             #[inline(always)]
338             pub fn into_inner(self) -> T {
339                 self.0.into_inner()
340             }
341 
342             #[inline(always)]
343             pub fn get_mut(&mut self) -> &mut T {
344                 self.0.get_mut()
345             }
346 
347             #[inline(always)]
348             pub fn lock(&self) -> LockGuard<'_, T> {
349                 self.0.lock()
350             }
351 
352             #[inline(always)]
353             pub fn lock_mut(&self) -> LockGuard<'_, T> {
354                 self.lock()
355             }
356         }
357 
358         use parking_lot::Mutex as InnerLock;
359         use parking_lot::RwLock as InnerRwLock;
360 
361         use std::thread;
362 
363         #[inline]
364         pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
365         where
366             A: FnOnce() -> RA + DynSend,
367             B: FnOnce() -> RB + DynSend,
368         {
369             if mode::is_dyn_thread_safe() {
370                 let oper_a = FromDyn::from(oper_a);
371                 let oper_b = FromDyn::from(oper_b);
372                 let (a, b) = rayon::join(move || FromDyn::from(oper_a.into_inner()()), move || FromDyn::from(oper_b.into_inner()()));
373                 (a.into_inner(), b.into_inner())
374             } else {
375                 (oper_a(), oper_b())
376             }
377         }
378 
379         // This function only works when `mode::is_dyn_thread_safe()`.
380         pub fn scope<'scope, OP, R>(op: OP) -> R
381         where
382             OP: FnOnce(&rayon::Scope<'scope>) -> R + DynSend,
383             R: DynSend,
384         {
385             let op = FromDyn::from(op);
386             rayon::scope(|s| FromDyn::from(op.into_inner()(s))).into_inner()
387         }
388 
389         /// Runs a list of blocks in parallel. The first block is executed immediately on
390         /// the current thread. Use that for the longest running block.
391         #[macro_export]
392         macro_rules! parallel {
393             (impl $fblock:block [$($c:expr,)*] [$block:expr $(, $rest:expr)*]) => {
394                 parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
395             };
396             (impl $fblock:block [$($blocks:expr,)*] []) => {
397                 ::rustc_data_structures::sync::scope(|s| {
398                     $(let block = rustc_data_structures::sync::FromDyn::from(|| $blocks);
399                     s.spawn(move |_| block.into_inner()());)*
400                     (|| $fblock)();
401                 });
402             };
403             ($fblock:block, $($blocks:block),*) => {
404                 if rustc_data_structures::sync::is_dyn_thread_safe() {
405                     // Reverse the order of the later blocks since Rayon executes them in reverse order
406                     // when using a single thread. This ensures the execution order matches that
407                     // of a single threaded rustc.
408                     parallel!(impl $fblock [] [$($blocks),*]);
409                 } else {
410                     // We catch panics here ensuring that all the blocks execute.
411                     // This makes behavior consistent with the parallel compiler.
412                     let mut panic = None;
413                     if let Err(p) = ::std::panic::catch_unwind(
414                         ::std::panic::AssertUnwindSafe(|| $fblock)
415                     ) {
416                         if panic.is_none() {
417                             panic = Some(p);
418                         }
419                     }
420                     $(
421                         if let Err(p) = ::std::panic::catch_unwind(
422                             ::std::panic::AssertUnwindSafe(|| $blocks)
423                         ) {
424                             if panic.is_none() {
425                                 panic = Some(p);
426                             }
427                         }
428                     )*
429                     if let Some(panic) = panic {
430                         ::std::panic::resume_unwind(panic);
431                     }
432                 }
433             };
434         }
435 
436         use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelIterator};
437 
438         pub fn par_for_each_in<I, T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>>(
439             t: T,
440             for_each: impl Fn(I) + DynSync + DynSend
441         ) {
442             if mode::is_dyn_thread_safe() {
443                 let for_each = FromDyn::from(for_each);
444                 let panic: Lock<Option<_>> = Lock::new(None);
445                 t.into_par_iter().for_each(|i| if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
446                     let mut l = panic.lock();
447                     if l.is_none() {
448                         *l = Some(p)
449                     }
450                 });
451 
452                 if let Some(panic) = panic.into_inner() {
453                     resume_unwind(panic);
454                 }
455             } else {
456                 // We catch panics here ensuring that all the loop iterations execute.
457                 // This makes behavior consistent with the parallel compiler.
458                 let mut panic = None;
459                 t.into_iter().for_each(|i| {
460                     if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
461                         if panic.is_none() {
462                             panic = Some(p);
463                         }
464                     }
465                 });
466                 if let Some(panic) = panic {
467                     resume_unwind(panic);
468                 }
469             }
470         }
471 
472         pub fn par_map<
473             I,
474             T: IntoIterator<Item = I> + IntoParallelIterator<Item = I>,
475             R: std::marker::Send,
476             C: FromIterator<R> + FromParallelIterator<R>
477         >(
478             t: T,
479             map: impl Fn(I) -> R + DynSync + DynSend
480         ) -> C {
481             if mode::is_dyn_thread_safe() {
482                 let panic: Lock<Option<_>> = Lock::new(None);
483                 let map = FromDyn::from(map);
484                 // We catch panics here ensuring that all the loop iterations execute.
485                 let r = t.into_par_iter().filter_map(|i| {
486                     match catch_unwind(AssertUnwindSafe(|| map(i))) {
487                         Ok(r) => Some(r),
488                         Err(p) => {
489                             let mut l = panic.lock();
490                             if l.is_none() {
491                                 *l = Some(p);
492                             }
493                             None
494                         },
495                     }
496                 }).collect();
497 
498                 if let Some(panic) = panic.into_inner() {
499                     resume_unwind(panic);
500                 }
501                 r
502             } else {
503                 // We catch panics here ensuring that all the loop iterations execute.
504                 let mut panic = None;
505                 let r = t.into_iter().filter_map(|i| {
506                     match catch_unwind(AssertUnwindSafe(|| map(i))) {
507                         Ok(r) => Some(r),
508                         Err(p) => {
509                             if panic.is_none() {
510                                 panic = Some(p);
511                             }
512                             None
513                         }
514                     }
515                 }).collect();
516                 if let Some(panic) = panic {
517                     resume_unwind(panic);
518                 }
519                 r
520             }
521         }
522 
523         /// This makes locks panic if they are already held.
524         /// It is only useful when you are running in a single thread
525         const ERROR_CHECKING: bool = false;
526     }
527 }
528 
529 #[derive(Default)]
530 #[cfg_attr(parallel_compiler, repr(align(64)))]
531 pub struct CacheAligned<T>(pub T);
532 
533 pub trait HashMapExt<K, V> {
534     /// Same as HashMap::insert, but it may panic if there's already an
535     /// entry for `key` with a value not equal to `value`
insert_same(&mut self, key: K, value: V)536     fn insert_same(&mut self, key: K, value: V);
537 }
538 
539 impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
insert_same(&mut self, key: K, value: V)540     fn insert_same(&mut self, key: K, value: V) {
541         self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
542     }
543 }
544 
545 #[derive(Debug)]
546 pub struct Lock<T>(InnerLock<T>);
547 
548 impl<T> Lock<T> {
549     #[inline(always)]
new(inner: T) -> Self550     pub fn new(inner: T) -> Self {
551         Lock(InnerLock::new(inner))
552     }
553 
554     #[inline(always)]
into_inner(self) -> T555     pub fn into_inner(self) -> T {
556         self.0.into_inner()
557     }
558 
559     #[inline(always)]
get_mut(&mut self) -> &mut T560     pub fn get_mut(&mut self) -> &mut T {
561         self.0.get_mut()
562     }
563 
564     #[cfg(parallel_compiler)]
565     #[inline(always)]
try_lock(&self) -> Option<LockGuard<'_, T>>566     pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
567         self.0.try_lock()
568     }
569 
570     #[cfg(not(parallel_compiler))]
571     #[inline(always)]
try_lock(&self) -> Option<LockGuard<'_, T>>572     pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
573         self.0.try_borrow_mut().ok()
574     }
575 
576     #[cfg(parallel_compiler)]
577     #[inline(always)]
578     #[track_caller]
lock(&self) -> LockGuard<'_, T>579     pub fn lock(&self) -> LockGuard<'_, T> {
580         if ERROR_CHECKING {
581             self.0.try_lock().expect("lock was already held")
582         } else {
583             self.0.lock()
584         }
585     }
586 
587     #[cfg(not(parallel_compiler))]
588     #[inline(always)]
589     #[track_caller]
lock(&self) -> LockGuard<'_, T>590     pub fn lock(&self) -> LockGuard<'_, T> {
591         self.0.borrow_mut()
592     }
593 
594     #[inline(always)]
595     #[track_caller]
with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R596     pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
597         f(&mut *self.lock())
598     }
599 
600     #[inline(always)]
601     #[track_caller]
borrow(&self) -> LockGuard<'_, T>602     pub fn borrow(&self) -> LockGuard<'_, T> {
603         self.lock()
604     }
605 
606     #[inline(always)]
607     #[track_caller]
borrow_mut(&self) -> LockGuard<'_, T>608     pub fn borrow_mut(&self) -> LockGuard<'_, T> {
609         self.lock()
610     }
611 }
612 
613 impl<T: Default> Default for Lock<T> {
614     #[inline]
default() -> Self615     fn default() -> Self {
616         Lock::new(T::default())
617     }
618 }
619 
620 #[derive(Debug, Default)]
621 pub struct RwLock<T>(InnerRwLock<T>);
622 
623 impl<T> RwLock<T> {
624     #[inline(always)]
new(inner: T) -> Self625     pub fn new(inner: T) -> Self {
626         RwLock(InnerRwLock::new(inner))
627     }
628 
629     #[inline(always)]
into_inner(self) -> T630     pub fn into_inner(self) -> T {
631         self.0.into_inner()
632     }
633 
634     #[inline(always)]
get_mut(&mut self) -> &mut T635     pub fn get_mut(&mut self) -> &mut T {
636         self.0.get_mut()
637     }
638 
639     #[cfg(not(parallel_compiler))]
640     #[inline(always)]
641     #[track_caller]
read(&self) -> ReadGuard<'_, T>642     pub fn read(&self) -> ReadGuard<'_, T> {
643         self.0.borrow()
644     }
645 
646     #[cfg(parallel_compiler)]
647     #[inline(always)]
read(&self) -> ReadGuard<'_, T>648     pub fn read(&self) -> ReadGuard<'_, T> {
649         if ERROR_CHECKING {
650             self.0.try_read().expect("lock was already held")
651         } else {
652             self.0.read()
653         }
654     }
655 
656     #[inline(always)]
657     #[track_caller]
with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R658     pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
659         f(&*self.read())
660     }
661 
662     #[cfg(not(parallel_compiler))]
663     #[inline(always)]
try_write(&self) -> Result<WriteGuard<'_, T>, ()>664     pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
665         self.0.try_borrow_mut().map_err(|_| ())
666     }
667 
668     #[cfg(parallel_compiler)]
669     #[inline(always)]
try_write(&self) -> Result<WriteGuard<'_, T>, ()>670     pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
671         self.0.try_write().ok_or(())
672     }
673 
674     #[cfg(not(parallel_compiler))]
675     #[inline(always)]
676     #[track_caller]
write(&self) -> WriteGuard<'_, T>677     pub fn write(&self) -> WriteGuard<'_, T> {
678         self.0.borrow_mut()
679     }
680 
681     #[cfg(parallel_compiler)]
682     #[inline(always)]
write(&self) -> WriteGuard<'_, T>683     pub fn write(&self) -> WriteGuard<'_, T> {
684         if ERROR_CHECKING {
685             self.0.try_write().expect("lock was already held")
686         } else {
687             self.0.write()
688         }
689     }
690 
691     #[inline(always)]
692     #[track_caller]
with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R693     pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
694         f(&mut *self.write())
695     }
696 
697     #[inline(always)]
698     #[track_caller]
borrow(&self) -> ReadGuard<'_, T>699     pub fn borrow(&self) -> ReadGuard<'_, T> {
700         self.read()
701     }
702 
703     #[inline(always)]
704     #[track_caller]
borrow_mut(&self) -> WriteGuard<'_, T>705     pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
706         self.write()
707     }
708 
709     #[cfg(not(parallel_compiler))]
710     #[inline(always)]
leak(&self) -> &T711     pub fn leak(&self) -> &T {
712         ReadGuard::leak(self.read())
713     }
714 
715     #[cfg(parallel_compiler)]
716     #[inline(always)]
leak(&self) -> &T717     pub fn leak(&self) -> &T {
718         let guard = self.read();
719         let ret = unsafe { &*(&*guard as *const T) };
720         std::mem::forget(guard);
721         ret
722     }
723 }
724 
725 // FIXME: Probably a bad idea
726 impl<T: Clone> Clone for RwLock<T> {
727     #[inline]
clone(&self) -> Self728     fn clone(&self) -> Self {
729         RwLock::new(self.borrow().clone())
730     }
731 }
732 
733 /// A type which only allows its inner value to be used in one thread.
734 /// It will panic if it is used on multiple threads.
735 #[derive(Debug)]
736 pub struct OneThread<T> {
737     #[cfg(parallel_compiler)]
738     thread: thread::ThreadId,
739     inner: T,
740 }
741 
742 #[cfg(parallel_compiler)]
743 unsafe impl<T> std::marker::Sync for OneThread<T> {}
744 #[cfg(parallel_compiler)]
745 unsafe impl<T> std::marker::Send for OneThread<T> {}
746 
747 impl<T> OneThread<T> {
748     #[inline(always)]
check(&self)749     fn check(&self) {
750         #[cfg(parallel_compiler)]
751         assert_eq!(thread::current().id(), self.thread);
752     }
753 
754     #[inline(always)]
new(inner: T) -> Self755     pub fn new(inner: T) -> Self {
756         OneThread {
757             #[cfg(parallel_compiler)]
758             thread: thread::current().id(),
759             inner,
760         }
761     }
762 
763     #[inline(always)]
into_inner(value: Self) -> T764     pub fn into_inner(value: Self) -> T {
765         value.check();
766         value.inner
767     }
768 }
769 
770 impl<T> Deref for OneThread<T> {
771     type Target = T;
772 
deref(&self) -> &T773     fn deref(&self) -> &T {
774         self.check();
775         &self.inner
776     }
777 }
778 
779 impl<T> DerefMut for OneThread<T> {
deref_mut(&mut self) -> &mut T780     fn deref_mut(&mut self) -> &mut T {
781         self.check();
782         &mut self.inner
783     }
784 }
785