1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Based on arch/arm/include/asm/barrier.h
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7 #ifndef __ASM_BARRIER_H
8 #define __ASM_BARRIER_H
9
10 #ifndef __ASSEMBLY__
11
12 #include <linux/kasan-checks.h>
13
14 #define __nops(n) ".rept " #n "\nnop\n.endr\n"
15 #define nops(n) asm volatile(__nops(n))
16
17 #define sev() asm volatile("sev" : : : "memory")
18 #define wfe() asm volatile("wfe" : : : "memory")
19 #define wfi() asm volatile("wfi" : : : "memory")
20
21 #define isb() asm volatile("isb" : : : "memory")
22 #define dmb(opt) asm volatile("dmb " #opt : : : "memory")
23 #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
24
25 #define psb_csync() asm volatile("hint #17" : : : "memory")
26 #define __tsb_csync() asm volatile("hint #18" : : : "memory")
27 #define csdb() asm volatile("hint #20" : : : "memory")
28
29 #define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
30 SB_BARRIER_INSN"nop\n", \
31 ARM64_HAS_SB))
32
33 #ifdef CONFIG_ARM64_PSEUDO_NMI
34 #define pmr_sync() \
35 do { \
36 extern struct static_key_false gic_pmr_sync; \
37 \
38 if (static_branch_unlikely(&gic_pmr_sync)) \
39 dsb(sy); \
40 } while(0)
41 #else
42 #define pmr_sync() do {} while (0)
43 #endif
44
45 #define mb() dsb(sy)
46 #define rmb() dsb(ld)
47 #define wmb() dsb(st)
48
49 #define dma_mb() dmb(osh)
50 #define dma_rmb() dmb(oshld)
51 #define dma_wmb() dmb(oshst)
52
53
54 #define tsb_csync() \
55 do { \
56 /* \
57 * CPUs affected by Arm Erratum 2054223 or 2067961 needs \
58 * another TSB to ensure the trace is flushed. The barriers \
59 * don't have to be strictly back to back, as long as the \
60 * CPU is in trace prohibited state. \
61 */ \
62 if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \
63 __tsb_csync(); \
64 __tsb_csync(); \
65 } while (0)
66
67 /*
68 * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
69 * and 0 otherwise.
70 */
71 #define array_index_mask_nospec array_index_mask_nospec
array_index_mask_nospec(unsigned long idx,unsigned long sz)72 static inline unsigned long array_index_mask_nospec(unsigned long idx,
73 unsigned long sz)
74 {
75 unsigned long mask;
76
77 asm volatile(
78 " cmp %1, %2\n"
79 " sbc %0, xzr, xzr\n"
80 : "=r" (mask)
81 : "r" (idx), "Ir" (sz)
82 : "cc");
83
84 csdb();
85 return mask;
86 }
87
88 /*
89 * Ensure that reads of the counter are treated the same as memory reads
90 * for the purposes of ordering by subsequent memory barriers.
91 *
92 * This insanity brought to you by speculative system register reads,
93 * out-of-order memory accesses, sequence locks and Thomas Gleixner.
94 *
95 * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
96 */
97 #define arch_counter_enforce_ordering(val) do { \
98 u64 tmp, _val = (val); \
99 \
100 asm volatile( \
101 " eor %0, %1, %1\n" \
102 " add %0, sp, %0\n" \
103 " ldr xzr, [%0]" \
104 : "=r" (tmp) : "r" (_val)); \
105 } while (0)
106
107 #define __smp_mb() dmb(ish)
108 #define __smp_rmb() dmb(ishld)
109 #define __smp_wmb() dmb(ishst)
110
111 #define __smp_store_release(p, v) \
112 do { \
113 typeof(p) __p = (p); \
114 union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \
115 { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \
116 compiletime_assert_atomic_type(*p); \
117 kasan_check_write(__p, sizeof(*p)); \
118 switch (sizeof(*p)) { \
119 case 1: \
120 asm volatile ("stlrb %w1, %0" \
121 : "=Q" (*__p) \
122 : "r" (*(__u8 *)__u.__c) \
123 : "memory"); \
124 break; \
125 case 2: \
126 asm volatile ("stlrh %w1, %0" \
127 : "=Q" (*__p) \
128 : "r" (*(__u16 *)__u.__c) \
129 : "memory"); \
130 break; \
131 case 4: \
132 asm volatile ("stlr %w1, %0" \
133 : "=Q" (*__p) \
134 : "r" (*(__u32 *)__u.__c) \
135 : "memory"); \
136 break; \
137 case 8: \
138 asm volatile ("stlr %1, %0" \
139 : "=Q" (*__p) \
140 : "r" (*(__u64 *)__u.__c) \
141 : "memory"); \
142 break; \
143 } \
144 } while (0)
145
146 #define __smp_load_acquire(p) \
147 ({ \
148 union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \
149 typeof(p) __p = (p); \
150 compiletime_assert_atomic_type(*p); \
151 kasan_check_read(__p, sizeof(*p)); \
152 switch (sizeof(*p)) { \
153 case 1: \
154 asm volatile ("ldarb %w0, %1" \
155 : "=r" (*(__u8 *)__u.__c) \
156 : "Q" (*__p) : "memory"); \
157 break; \
158 case 2: \
159 asm volatile ("ldarh %w0, %1" \
160 : "=r" (*(__u16 *)__u.__c) \
161 : "Q" (*__p) : "memory"); \
162 break; \
163 case 4: \
164 asm volatile ("ldar %w0, %1" \
165 : "=r" (*(__u32 *)__u.__c) \
166 : "Q" (*__p) : "memory"); \
167 break; \
168 case 8: \
169 asm volatile ("ldar %0, %1" \
170 : "=r" (*(__u64 *)__u.__c) \
171 : "Q" (*__p) : "memory"); \
172 break; \
173 } \
174 (typeof(*p))__u.__val; \
175 })
176
177 #define smp_cond_load_relaxed(ptr, cond_expr) \
178 ({ \
179 typeof(ptr) __PTR = (ptr); \
180 __unqual_scalar_typeof(*ptr) VAL; \
181 for (;;) { \
182 VAL = READ_ONCE(*__PTR); \
183 if (cond_expr) \
184 break; \
185 __cmpwait_relaxed(__PTR, VAL); \
186 } \
187 (typeof(*ptr))VAL; \
188 })
189
190 #define smp_cond_load_acquire(ptr, cond_expr) \
191 ({ \
192 typeof(ptr) __PTR = (ptr); \
193 __unqual_scalar_typeof(*ptr) VAL; \
194 for (;;) { \
195 VAL = smp_load_acquire(__PTR); \
196 if (cond_expr) \
197 break; \
198 __cmpwait_relaxed(__PTR, VAL); \
199 } \
200 (typeof(*ptr))VAL; \
201 })
202
203 #include <asm-generic/barrier.h>
204
205 #endif /* __ASSEMBLY__ */
206
207 #endif /* __ASM_BARRIER_H */
208