1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_H
3 #define __LINUX_COMPILER_H
4
5 #include <linux/compiler_types.h>
6
7 #ifndef __ASSEMBLY__
8
9 #ifdef __KERNEL__
10
11 /*
12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13 * to disable branch tracing on a per file basis.
14 */
15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18 int expect, int is_constant);
19
20 #define likely_notrace(x) __builtin_expect(!!(x), 1)
21 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
22
23 #define __branch_check__(x, expect, is_constant) ({ \
24 long ______r; \
25 static struct ftrace_likely_data \
26 __aligned(4) \
27 __section(_ftrace_annotated_branch) \
28 ______f = { \
29 .data.func = __func__, \
30 .data.file = __FILE__, \
31 .data.line = __LINE__, \
32 }; \
33 ______r = __builtin_expect(!!(x), expect); \
34 ftrace_likely_update(&______f, ______r, \
35 expect, is_constant); \
36 ______r; \
37 })
38
39 /*
40 * Using __builtin_constant_p(x) to ignore cases where the return
41 * value is always the same. This idea is taken from a similar patch
42 * written by Daniel Walker.
43 */
44 # ifndef likely
45 # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
46 # endif
47 # ifndef unlikely
48 # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
49 # endif
50
51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
52 /*
53 * "Define 'is'", Bill Clinton
54 * "Define 'if'", Steven Rostedt
55 */
56 #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
57
58 #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
59
60 #define __trace_if_value(cond) ({ \
61 static struct ftrace_branch_data \
62 __aligned(4) \
63 __section(_ftrace_branch) \
64 __if_trace = { \
65 .func = __func__, \
66 .file = __FILE__, \
67 .line = __LINE__, \
68 }; \
69 (cond) ? \
70 (__if_trace.miss_hit[1]++,1) : \
71 (__if_trace.miss_hit[0]++,0); \
72 })
73
74 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
75
76 #else
77 # define likely(x) __builtin_expect(!!(x), 1)
78 # define unlikely(x) __builtin_expect(!!(x), 0)
79 #endif
80
81 /* Optimization barrier */
82 #ifndef barrier
83 /* The "volatile" is due to gcc bugs */
84 # define barrier() __asm__ __volatile__("": : :"memory")
85 #endif
86
87 #ifndef barrier_data
88 /*
89 * This version is i.e. to prevent dead stores elimination on @ptr
90 * where gcc and llvm may behave differently when otherwise using
91 * normal barrier(): while gcc behavior gets along with a normal
92 * barrier(), llvm needs an explicit input variable to be assumed
93 * clobbered. The issue is as follows: while the inline asm might
94 * access any memory it wants, the compiler could have fit all of
95 * @ptr into memory registers instead, and since @ptr never escaped
96 * from that, it proved that the inline asm wasn't touching any of
97 * it. This version works well with both compilers, i.e. we're telling
98 * the compiler that the inline asm absolutely may see the contents
99 * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
100 */
101 # define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
102 #endif
103
104 /* workaround for GCC PR82365 if needed */
105 #ifndef barrier_before_unreachable
106 # define barrier_before_unreachable() do { } while (0)
107 #endif
108
109 /* Unreachable code */
110 #ifdef CONFIG_STACK_VALIDATION
111 /*
112 * These macros help objtool understand GCC code flow for unreachable code.
113 * The __COUNTER__ based labels are a hack to make each instance of the macros
114 * unique, to convince GCC not to merge duplicate inline asm statements.
115 */
116 #define annotate_reachable() ({ \
117 asm volatile("%c0:\n\t" \
118 ".pushsection .discard.reachable\n\t" \
119 ".long %c0b - .\n\t" \
120 ".popsection\n\t" : : "i" (__COUNTER__)); \
121 })
122 #define annotate_unreachable() ({ \
123 asm volatile("%c0:\n\t" \
124 ".pushsection .discard.unreachable\n\t" \
125 ".long %c0b - .\n\t" \
126 ".popsection\n\t" : : "i" (__COUNTER__)); \
127 })
128 #define ASM_UNREACHABLE \
129 "999:\n\t" \
130 ".pushsection .discard.unreachable\n\t" \
131 ".long 999b - .\n\t" \
132 ".popsection\n\t"
133
134 /* Annotate a C jump table to allow objtool to follow the code flow */
135 #define __annotate_jump_table __section(.rodata..c_jump_table)
136
137 #ifdef CONFIG_DEBUG_ENTRY
138 /* Begin/end of an instrumentation safe region */
139 #define instrumentation_begin() ({ \
140 asm volatile("%c0:\n\t" \
141 ".pushsection .discard.instr_begin\n\t" \
142 ".long %c0b - .\n\t" \
143 ".popsection\n\t" : : "i" (__COUNTER__)); \
144 })
145
146 /*
147 * Because instrumentation_{begin,end}() can nest, objtool validation considers
148 * _begin() a +1 and _end() a -1 and computes a sum over the instructions.
149 * When the value is greater than 0, we consider instrumentation allowed.
150 *
151 * There is a problem with code like:
152 *
153 * noinstr void foo()
154 * {
155 * instrumentation_begin();
156 * ...
157 * if (cond) {
158 * instrumentation_begin();
159 * ...
160 * instrumentation_end();
161 * }
162 * bar();
163 * instrumentation_end();
164 * }
165 *
166 * If instrumentation_end() would be an empty label, like all the other
167 * annotations, the inner _end(), which is at the end of a conditional block,
168 * would land on the instruction after the block.
169 *
170 * If we then consider the sum of the !cond path, we'll see that the call to
171 * bar() is with a 0-value, even though, we meant it to happen with a positive
172 * value.
173 *
174 * To avoid this, have _end() be a NOP instruction, this ensures it will be
175 * part of the condition block and does not escape.
176 */
177 #define instrumentation_end() ({ \
178 asm volatile("%c0: nop\n\t" \
179 ".pushsection .discard.instr_end\n\t" \
180 ".long %c0b - .\n\t" \
181 ".popsection\n\t" : : "i" (__COUNTER__)); \
182 })
183 #endif /* CONFIG_DEBUG_ENTRY */
184
185 #else
186 #define annotate_reachable()
187 #define annotate_unreachable()
188 #define __annotate_jump_table
189 #endif
190
191 #ifndef instrumentation_begin
192 #define instrumentation_begin() do { } while(0)
193 #define instrumentation_end() do { } while(0)
194 #endif
195
196 #ifndef ASM_UNREACHABLE
197 # define ASM_UNREACHABLE
198 #endif
199 #ifndef unreachable
200 # define unreachable() do { \
201 annotate_unreachable(); \
202 __builtin_unreachable(); \
203 } while (0)
204 #endif
205
206 /*
207 * KENTRY - kernel entry point
208 * This can be used to annotate symbols (functions or data) that are used
209 * without their linker symbol being referenced explicitly. For example,
210 * interrupt vector handlers, or functions in the kernel image that are found
211 * programatically.
212 *
213 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
214 * are handled in their own way (with KEEP() in linker scripts).
215 *
216 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
217 * linker script. For example an architecture could KEEP() its entire
218 * boot/exception vector code rather than annotate each function and data.
219 */
220 #ifndef KENTRY
221 # define KENTRY(sym) \
222 extern typeof(sym) sym; \
223 static const unsigned long __kentry_##sym \
224 __used \
225 __section("___kentry" "+" #sym ) \
226 = (unsigned long)&sym;
227 #endif
228
229 #ifndef RELOC_HIDE
230 # define RELOC_HIDE(ptr, off) \
231 ({ unsigned long __ptr; \
232 __ptr = (unsigned long) (ptr); \
233 (typeof(ptr)) (__ptr + (off)); })
234 #endif
235
236 #define absolute_pointer(val) RELOC_HIDE((void *)(val), 0)
237
238 #ifndef OPTIMIZER_HIDE_VAR
239 /* Make the optimizer believe the variable can be manipulated arbitrarily. */
240 #define OPTIMIZER_HIDE_VAR(var) \
241 __asm__ ("" : "=r" (var) : "0" (var))
242 #endif
243
244 /* Not-quite-unique ID. */
245 #ifndef __UNIQUE_ID
246 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
247 #endif
248
249 #include <uapi/linux/types.h>
250
251 #define __READ_ONCE_SIZE \
252 ({ \
253 switch (size) { \
254 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
255 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
256 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
257 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
258 default: \
259 barrier(); \
260 __builtin_memcpy((void *)res, (const void *)p, size); \
261 barrier(); \
262 } \
263 })
264
265 static __always_inline
__read_once_size(const volatile void * p,void * res,int size)266 void __read_once_size(const volatile void *p, void *res, int size)
267 {
268 __READ_ONCE_SIZE;
269 }
270
271 #ifdef CONFIG_KASAN
272 /*
273 * We can't declare function 'inline' because __no_sanitize_address confilcts
274 * with inlining. Attempt to inline it may cause a build failure.
275 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
276 * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
277 */
278 # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
279 #else
280 # define __no_kasan_or_inline __always_inline
281 #endif
282
283 static __no_kasan_or_inline
__read_once_size_nocheck(const volatile void * p,void * res,int size)284 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
285 {
286 __READ_ONCE_SIZE;
287 }
288
__write_once_size(volatile void * p,void * res,int size)289 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
290 {
291 switch (size) {
292 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
293 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
294 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
295 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
296 default:
297 barrier();
298 __builtin_memcpy((void *)p, (const void *)res, size);
299 barrier();
300 }
301 }
302
303 /*
304 * Prevent the compiler from merging or refetching reads or writes. The
305 * compiler is also forbidden from reordering successive instances of
306 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
307 * particular ordering. One way to make the compiler aware of ordering is to
308 * put the two invocations of READ_ONCE or WRITE_ONCE in different C
309 * statements.
310 *
311 * These two macros will also work on aggregate data types like structs or
312 * unions. If the size of the accessed data type exceeds the word size of
313 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
314 * fall back to memcpy(). There's at least two memcpy()s: one for the
315 * __builtin_memcpy() and then one for the macro doing the copy of variable
316 * - '__u' allocated on the stack.
317 *
318 * Their two major use cases are: (1) Mediating communication between
319 * process-level code and irq/NMI handlers, all running on the same CPU,
320 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
321 * mutilate accesses that either do not require ordering or that interact
322 * with an explicit memory barrier or atomic instruction that provides the
323 * required ordering.
324 */
325 #include <asm/barrier.h>
326 #include <linux/kasan-checks.h>
327
328 #define __READ_ONCE(x, check) \
329 ({ \
330 union { typeof(x) __val; char __c[1]; } __u; \
331 if (check) \
332 __read_once_size(&(x), __u.__c, sizeof(x)); \
333 else \
334 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
335 smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
336 __u.__val; \
337 })
338 #define READ_ONCE(x) __READ_ONCE(x, 1)
339
340 /*
341 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
342 * to hide memory access from KASAN.
343 */
344 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
345
346 static __no_kasan_or_inline
read_word_at_a_time(const void * addr)347 unsigned long read_word_at_a_time(const void *addr)
348 {
349 kasan_check_read(addr, 1);
350 return *(unsigned long *)addr;
351 }
352
353 #define WRITE_ONCE(x, val) \
354 ({ \
355 union { typeof(x) __val; char __c[1]; } __u = \
356 { .__val = (__force typeof(x)) (val) }; \
357 __write_once_size(&(x), __u.__c, sizeof(x)); \
358 __u.__val; \
359 })
360
361 #endif /* __KERNEL__ */
362
363 /*
364 * Force the compiler to emit 'sym' as a symbol, so that we can reference
365 * it from inline assembler. Necessary in case 'sym' could be inlined
366 * otherwise, or eliminated entirely due to lack of references that are
367 * visible to the compiler.
368 */
369 #define __ADDRESSABLE(sym) \
370 static void * __section(.discard.addressable) __used \
371 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
372
373 /**
374 * offset_to_ptr - convert a relative memory offset to an absolute pointer
375 * @off: the address of the 32-bit offset value
376 */
offset_to_ptr(const int * off)377 static inline void *offset_to_ptr(const int *off)
378 {
379 return (void *)((unsigned long)off + *off);
380 }
381
382 #endif /* __ASSEMBLY__ */
383
384 /* Compile time object size, -1 for unknown */
385 #ifndef __compiletime_object_size
386 # define __compiletime_object_size(obj) -1
387 #endif
388 #ifndef __compiletime_warning
389 # define __compiletime_warning(message)
390 #endif
391 #ifndef __compiletime_error
392 # define __compiletime_error(message)
393 #endif
394
395 #ifdef __OPTIMIZE__
396 # define __compiletime_assert(condition, msg, prefix, suffix) \
397 do { \
398 extern void prefix ## suffix(void) __compiletime_error(msg); \
399 if (!(condition)) \
400 prefix ## suffix(); \
401 } while (0)
402 #else
403 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
404 #endif
405
406 #define _compiletime_assert(condition, msg, prefix, suffix) \
407 __compiletime_assert(condition, msg, prefix, suffix)
408
409 /**
410 * compiletime_assert - break build and emit msg if condition is false
411 * @condition: a compile-time constant condition to check
412 * @msg: a message to emit if condition is false
413 *
414 * In tradition of POSIX assert, this macro will break the build if the
415 * supplied condition is *false*, emitting the supplied error message if the
416 * compiler has support to do so.
417 */
418 #define compiletime_assert(condition, msg) \
419 _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
420
421 #define compiletime_assert_atomic_type(t) \
422 compiletime_assert(__native_word(t), \
423 "Need native word sized stores/loads for atomicity.")
424
425 /* &a[0] degrades to a pointer: a different type from an array */
426 #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
427
428 /*
429 * This is needed in functions which generate the stack canary, see
430 * arch/x86/kernel/smpboot.c::start_secondary() for an example.
431 */
432 #define prevent_tail_call_optimization() mb()
433
434 #endif /* __LINUX_COMPILER_H */
435