• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use core::fmt;
2 use core::ops::{Deref, DerefMut};
3 
4 #[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
5 // Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
6 // lines at a time, so we have to align to 128 bytes rather than 64.
7 //
8 // Sources:
9 // - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
10 // - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
11 #[cfg_attr(target_arch = "x86_64", repr(align(128)))]
12 #[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
13 pub(crate) struct CachePadded<T> {
14     value: T,
15 }
16 
17 unsafe impl<T: Send> Send for CachePadded<T> {}
18 unsafe impl<T: Sync> Sync for CachePadded<T> {}
19 
20 impl<T> CachePadded<T> {
new(t: T) -> CachePadded<T>21     pub(crate) fn new(t: T) -> CachePadded<T> {
22         CachePadded::<T> { value: t }
23     }
24 }
25 
26 impl<T> Deref for CachePadded<T> {
27     type Target = T;
28 
deref(&self) -> &T29     fn deref(&self) -> &T {
30         &self.value
31     }
32 }
33 
34 impl<T> DerefMut for CachePadded<T> {
deref_mut(&mut self) -> &mut T35     fn deref_mut(&mut self) -> &mut T {
36         &mut self.value
37     }
38 }
39 
40 impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result41     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
42         f.debug_struct("CachePadded")
43             .field("value", &self.value)
44             .finish()
45     }
46 }
47 
48 impl<T> From<T> for CachePadded<T> {
from(t: T) -> Self49     fn from(t: T) -> Self {
50         CachePadded::new(t)
51     }
52 }
53