• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_H
3 #define __LINUX_COMPILER_H
4 
5 #include <linux/compiler_types.h>
6 
7 #ifndef __ASSEMBLY__
8 
9 #ifdef __KERNEL__
10 
11 /*
12  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13  * to disable branch tracing on a per file basis.
14  */
15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18 			  int expect, int is_constant);
19 
20 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
21 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
22 
23 #define __branch_check__(x, expect, is_constant) ({			\
24 			long ______r;					\
25 			static struct ftrace_likely_data		\
26 				__attribute__((__aligned__(4)))		\
27 				__attribute__((section("_ftrace_annotated_branch"))) \
28 				______f = {				\
29 				.data.func = __func__,			\
30 				.data.file = __FILE__,			\
31 				.data.line = __LINE__,			\
32 			};						\
33 			______r = __builtin_expect(!!(x), expect);	\
34 			ftrace_likely_update(&______f, ______r,		\
35 					     expect, is_constant);	\
36 			______r;					\
37 		})
38 
39 /*
40  * Using __builtin_constant_p(x) to ignore cases where the return
41  * value is always the same.  This idea is taken from a similar patch
42  * written by Daniel Walker.
43  */
44 # ifndef likely
45 #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
46 # endif
47 # ifndef unlikely
48 #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
49 # endif
50 
51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
52 /*
53  * "Define 'is'", Bill Clinton
54  * "Define 'if'", Steven Rostedt
55  */
56 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
57 #define __trace_if(cond) \
58 	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
59 	({								\
60 		int ______r;						\
61 		static struct ftrace_branch_data			\
62 			__attribute__((__aligned__(4)))			\
63 			__attribute__((section("_ftrace_branch")))	\
64 			______f = {					\
65 				.func = __func__,			\
66 				.file = __FILE__,			\
67 				.line = __LINE__,			\
68 			};						\
69 		______r = !!(cond);					\
70 		______f.miss_hit[______r]++;					\
71 		______r;						\
72 	}))
73 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
74 
75 #else
76 # define likely(x)	__builtin_expect(!!(x), 1)
77 # define unlikely(x)	__builtin_expect(!!(x), 0)
78 #endif
79 
80 /* Optimization barrier */
81 #ifndef barrier
82 # define barrier() __memory_barrier()
83 #endif
84 
85 #ifndef barrier_data
86 # define barrier_data(ptr) barrier()
87 #endif
88 
89 /* workaround for GCC PR82365 if needed */
90 #ifndef barrier_before_unreachable
91 # define barrier_before_unreachable() do { } while (0)
92 #endif
93 
94 /* Unreachable code */
95 #ifdef CONFIG_STACK_VALIDATION
96 #define annotate_reachable() ({						\
97 	asm("%c0:\n\t"							\
98 	    ".pushsection .discard.reachable\n\t"			\
99 	    ".long %c0b - .\n\t"					\
100 	    ".popsection\n\t" : : "i" (__COUNTER__));			\
101 })
102 #define annotate_unreachable() ({					\
103 	asm("%c0:\n\t"							\
104 	    ".pushsection .discard.unreachable\n\t"			\
105 	    ".long %c0b - .\n\t"					\
106 	    ".popsection\n\t" : : "i" (__COUNTER__));			\
107 })
108 #define ASM_UNREACHABLE							\
109 	"999:\n\t"							\
110 	".pushsection .discard.unreachable\n\t"				\
111 	".long 999b - .\n\t"						\
112 	".popsection\n\t"
113 #else
114 #define annotate_reachable()
115 #define annotate_unreachable()
116 #endif
117 
118 #ifndef ASM_UNREACHABLE
119 # define ASM_UNREACHABLE
120 #endif
121 #ifndef unreachable
122 # define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
123 #endif
124 
125 /*
126  * KENTRY - kernel entry point
127  * This can be used to annotate symbols (functions or data) that are used
128  * without their linker symbol being referenced explicitly. For example,
129  * interrupt vector handlers, or functions in the kernel image that are found
130  * programatically.
131  *
132  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
133  * are handled in their own way (with KEEP() in linker scripts).
134  *
135  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
136  * linker script. For example an architecture could KEEP() its entire
137  * boot/exception vector code rather than annotate each function and data.
138  */
139 #ifndef KENTRY
140 # define KENTRY(sym)						\
141 	extern typeof(sym) sym;					\
142 	static const unsigned long __kentry_##sym		\
143 	__used							\
144 	__attribute__((section("___kentry" "+" #sym ), used))	\
145 	= (unsigned long)&sym;
146 #endif
147 
148 #ifndef RELOC_HIDE
149 # define RELOC_HIDE(ptr, off)					\
150   ({ unsigned long __ptr;					\
151      __ptr = (unsigned long) (ptr);				\
152     (typeof(ptr)) (__ptr + (off)); })
153 #endif
154 
155 #ifndef OPTIMIZER_HIDE_VAR
156 #define OPTIMIZER_HIDE_VAR(var) barrier()
157 #endif
158 
159 /* Not-quite-unique ID. */
160 #ifndef __UNIQUE_ID
161 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
162 #endif
163 
164 #include <uapi/linux/types.h>
165 
166 #define __READ_ONCE_SIZE						\
167 ({									\
168 	switch (size) {							\
169 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
170 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
171 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
172 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
173 	default:							\
174 		barrier();						\
175 		__builtin_memcpy((void *)res, (const void *)p, size);	\
176 		barrier();						\
177 	}								\
178 })
179 
180 static __always_inline
__read_once_size(const volatile void * p,void * res,int size)181 void __read_once_size(const volatile void *p, void *res, int size)
182 {
183 	__READ_ONCE_SIZE;
184 }
185 
186 #ifdef CONFIG_KASAN
187 /*
188  * We can't declare function 'inline' because __no_sanitize_address confilcts
189  * with inlining. Attempt to inline it may cause a build failure.
190  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
191  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
192  */
193 # define __no_kasan_or_inline __no_sanitize_address __maybe_unused
194 #else
195 # define __no_kasan_or_inline __always_inline
196 #endif
197 
198 static __no_kasan_or_inline
__read_once_size_nocheck(const volatile void * p,void * res,int size)199 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
200 {
201 	__READ_ONCE_SIZE;
202 }
203 
__write_once_size(volatile void * p,void * res,int size)204 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
205 {
206 	switch (size) {
207 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
208 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
209 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
210 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
211 	default:
212 		barrier();
213 		__builtin_memcpy((void *)p, (const void *)res, size);
214 		barrier();
215 	}
216 }
217 
218 /*
219  * Prevent the compiler from merging or refetching reads or writes. The
220  * compiler is also forbidden from reordering successive instances of
221  * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
222  * compiler is aware of some particular ordering.  One way to make the
223  * compiler aware of ordering is to put the two invocations of READ_ONCE,
224  * WRITE_ONCE or ACCESS_ONCE() in different C statements.
225  *
226  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
227  * data types like structs or unions. If the size of the accessed data
228  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
229  * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
230  * least two memcpy()s: one for the __builtin_memcpy() and then one for
231  * the macro doing the copy of variable - '__u' allocated on the stack.
232  *
233  * Their two major use cases are: (1) Mediating communication between
234  * process-level code and irq/NMI handlers, all running on the same CPU,
235  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
236  * mutilate accesses that either do not require ordering or that interact
237  * with an explicit memory barrier or atomic instruction that provides the
238  * required ordering.
239  */
240 #include <asm/barrier.h>
241 #include <linux/kasan-checks.h>
242 
243 #define __READ_ONCE(x, check)						\
244 ({									\
245 	union { typeof(x) __val; char __c[1]; } __u;			\
246 	if (check)							\
247 		__read_once_size(&(x), __u.__c, sizeof(x));		\
248 	else								\
249 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
250 	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
251 	__u.__val;							\
252 })
253 #define READ_ONCE(x) __READ_ONCE(x, 1)
254 
255 /*
256  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
257  * to hide memory access from KASAN.
258  */
259 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
260 
261 static __no_kasan_or_inline
read_word_at_a_time(const void * addr)262 unsigned long read_word_at_a_time(const void *addr)
263 {
264 	kasan_check_read(addr, 1);
265 	return *(unsigned long *)addr;
266 }
267 
268 #define WRITE_ONCE(x, val) \
269 ({							\
270 	union { typeof(x) __val; char __c[1]; } __u =	\
271 		{ .__val = (__force typeof(x)) (val) }; \
272 	__write_once_size(&(x), __u.__c, sizeof(x));	\
273 	__u.__val;					\
274 })
275 
276 #endif /* __KERNEL__ */
277 
278 #endif /* __ASSEMBLY__ */
279 
280 #ifndef __optimize
281 # define __optimize(level)
282 #endif
283 
284 /* Compile time object size, -1 for unknown */
285 #ifndef __compiletime_object_size
286 # define __compiletime_object_size(obj) -1
287 #endif
288 #ifndef __compiletime_warning
289 # define __compiletime_warning(message)
290 #endif
291 #ifndef __compiletime_error
292 # define __compiletime_error(message)
293 /*
294  * Sparse complains of variable sized arrays due to the temporary variable in
295  * __compiletime_assert. Unfortunately we can't just expand it out to make
296  * sparse see a constant array size without breaking compiletime_assert on old
297  * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
298  */
299 # ifndef __CHECKER__
300 #  define __compiletime_error_fallback(condition) \
301 	do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
302 # endif
303 #endif
304 #ifndef __compiletime_error_fallback
305 # define __compiletime_error_fallback(condition) do { } while (0)
306 #endif
307 
308 #ifdef __OPTIMIZE__
309 # define __compiletime_assert(condition, msg, prefix, suffix)		\
310 	do {								\
311 		bool __cond = !(condition);				\
312 		extern void prefix ## suffix(void) __compiletime_error(msg); \
313 		if (__cond)						\
314 			prefix ## suffix();				\
315 		__compiletime_error_fallback(__cond);			\
316 	} while (0)
317 #else
318 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
319 #endif
320 
321 #define _compiletime_assert(condition, msg, prefix, suffix) \
322 	__compiletime_assert(condition, msg, prefix, suffix)
323 
324 /**
325  * compiletime_assert - break build and emit msg if condition is false
326  * @condition: a compile-time constant condition to check
327  * @msg:       a message to emit if condition is false
328  *
329  * In tradition of POSIX assert, this macro will break the build if the
330  * supplied condition is *false*, emitting the supplied error message if the
331  * compiler has support to do so.
332  */
333 #define compiletime_assert(condition, msg) \
334 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
335 
336 #define compiletime_assert_atomic_type(t)				\
337 	compiletime_assert(__native_word(t),				\
338 		"Need native word sized stores/loads for atomicity.")
339 
340 /*
341  * Prevent the compiler from merging or refetching accesses.  The compiler
342  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
343  * but only when the compiler is aware of some particular ordering.  One way
344  * to make the compiler aware of ordering is to put the two invocations of
345  * ACCESS_ONCE() in different C statements.
346  *
347  * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
348  * on a union member will work as long as the size of the member matches the
349  * size of the union and the size is smaller than word size.
350  *
351  * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
352  * between process-level code and irq/NMI handlers, all running on the same CPU,
353  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
354  * mutilate accesses that either do not require ordering or that interact
355  * with an explicit memory barrier or atomic instruction that provides the
356  * required ordering.
357  *
358  * If possible use READ_ONCE()/WRITE_ONCE() instead.
359  */
360 #define __ACCESS_ONCE(x) ({ \
361 	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
362 	(volatile typeof(x) *)&(x); })
363 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
364 
365 /**
366  * lockless_dereference() - safely load a pointer for later dereference
367  * @p: The pointer to load
368  *
369  * Similar to rcu_dereference(), but for situations where the pointed-to
370  * object's lifetime is managed by something other than RCU.  That
371  * "something other" might be reference counting or simple immortality.
372  *
373  * The seemingly unused variable ___typecheck_p validates that @p is
374  * indeed a pointer type by using a pointer to typeof(*p) as the type.
375  * Taking a pointer to typeof(*p) again is needed in case p is void *.
376  */
377 #define lockless_dereference(p) \
378 ({ \
379 	typeof(p) _________p1 = READ_ONCE(p); \
380 	typeof(*(p)) *___typecheck_p __maybe_unused; \
381 	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
382 	(_________p1); \
383 })
384 
385 #endif /* __LINUX_COMPILER_H */
386