1 #ifndef _ASM_X86_BARRIER_H
2 #define _ASM_X86_BARRIER_H
3
4 #include <asm/alternative.h>
5 #include <asm/nops.h>
6
7 /*
8 * Force strict CPU ordering.
9 * And yes, this is required on UP too when we're talking
10 * to devices.
11 */
12
13 #ifdef CONFIG_X86_32
14 /*
15 * Some non-Intel clones support out of order store. wmb() ceases to be a
16 * nop for these.
17 */
18 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
19 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
20 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
21 #else
22 #define mb() asm volatile("mfence":::"memory")
23 #define rmb() asm volatile("lfence":::"memory")
24 #define wmb() asm volatile("sfence" ::: "memory")
25 #endif
26
27 /**
28 * array_index_mask_nospec() - generate a mask that is ~0UL when the
29 * bounds check succeeds and 0 otherwise
30 * @index: array element index
31 * @size: number of elements in array
32 *
33 * Returns:
34 * 0 - (index < size)
35 */
array_index_mask_nospec(unsigned long index,unsigned long size)36 static inline unsigned long array_index_mask_nospec(unsigned long index,
37 unsigned long size)
38 {
39 unsigned long mask;
40
41 asm volatile ("cmp %1,%2; sbb %0,%0;"
42 :"=r" (mask)
43 :"g"(size),"r" (index)
44 :"cc");
45 return mask;
46 }
47
48 /* Override the default implementation from linux/nospec.h. */
49 #define array_index_mask_nospec array_index_mask_nospec
50
51 /* Prevent speculative execution past this barrier. */
52 #define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
53 "lfence", X86_FEATURE_LFENCE_RDTSC)
54
55 #ifdef CONFIG_X86_PPRO_FENCE
56 #define dma_rmb() rmb()
57 #else
58 #define dma_rmb() barrier()
59 #endif
60 #define dma_wmb() barrier()
61
62 #ifdef CONFIG_SMP
63 #define smp_mb() mb()
64 #define smp_rmb() dma_rmb()
65 #define smp_wmb() barrier()
66 #define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
67 #else /* !SMP */
68 #define smp_mb() barrier()
69 #define smp_rmb() barrier()
70 #define smp_wmb() barrier()
71 #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
72 #endif /* SMP */
73
74 #define read_barrier_depends() do { } while (0)
75 #define smp_read_barrier_depends() do { } while (0)
76
77 #if defined(CONFIG_X86_PPRO_FENCE)
78
79 /*
80 * For this option x86 doesn't have a strong TSO memory
81 * model and we should fall back to full barriers.
82 */
83
84 #define smp_store_release(p, v) \
85 do { \
86 compiletime_assert_atomic_type(*p); \
87 smp_mb(); \
88 WRITE_ONCE(*p, v); \
89 } while (0)
90
91 #define smp_load_acquire(p) \
92 ({ \
93 typeof(*p) ___p1 = READ_ONCE(*p); \
94 compiletime_assert_atomic_type(*p); \
95 smp_mb(); \
96 ___p1; \
97 })
98
99 #else /* regular x86 TSO memory ordering */
100
101 #define smp_store_release(p, v) \
102 do { \
103 compiletime_assert_atomic_type(*p); \
104 barrier(); \
105 WRITE_ONCE(*p, v); \
106 } while (0)
107
108 #define smp_load_acquire(p) \
109 ({ \
110 typeof(*p) ___p1 = READ_ONCE(*p); \
111 compiletime_assert_atomic_type(*p); \
112 barrier(); \
113 ___p1; \
114 })
115
116 #endif
117
118 /* Atomic operations are already serializing on x86 */
119 #define smp_mb__before_atomic() do { } while (0)
120 #define smp_mb__after_atomic() do { } while (0)
121
122 /*
123 * Make previous memory operations globally visible before
124 * a WRMSR.
125 *
126 * MFENCE makes writes visible, but only affects load/store
127 * instructions. WRMSR is unfortunately not a load/store
128 * instruction and is unaffected by MFENCE. The LFENCE ensures
129 * that the WRMSR is not reordered.
130 *
131 * Most WRMSRs are full serializing instructions themselves and
132 * do not require this barrier. This is only required for the
133 * IA32_TSC_DEADLINE and X2APIC MSRs.
134 */
weak_wrmsr_fence(void)135 static inline void weak_wrmsr_fence(void)
136 {
137 asm volatile("mfence; lfence" : : : "memory");
138 }
139
140 #endif /* _ASM_X86_BARRIER_H */
141