• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __LINUX_COMPILER_H
2 #define __LINUX_COMPILER_H
3 
4 #ifndef __ASSEMBLY__
5 
6 #ifdef __CHECKER__
7 # define __user		__attribute__((noderef, address_space(1)))
8 # define __kernel	__attribute__((address_space(0)))
9 # define __safe		__attribute__((safe))
10 # define __force	__attribute__((force))
11 # define __nocast	__attribute__((nocast))
12 # define __iomem	__attribute__((noderef, address_space(2)))
13 # define __must_hold(x)	__attribute__((context(x,1,1)))
14 # define __acquires(x)	__attribute__((context(x,0,1)))
15 # define __releases(x)	__attribute__((context(x,1,0)))
16 # define __acquire(x)	__context__(x,1)
17 # define __release(x)	__context__(x,-1)
18 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
19 # define __percpu	__attribute__((noderef, address_space(3)))
20 #ifdef CONFIG_SPARSE_RCU_POINTER
21 # define __rcu		__attribute__((noderef, address_space(4)))
22 #else /* CONFIG_SPARSE_RCU_POINTER */
23 # define __rcu
24 #endif /* CONFIG_SPARSE_RCU_POINTER */
25 # define __private	__attribute__((noderef))
26 extern void __chk_user_ptr(const volatile void __user *);
27 extern void __chk_io_ptr(const volatile void __iomem *);
28 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
29 #else /* __CHECKER__ */
30 # define __user
31 # define __kernel
32 # define __safe
33 # define __force
34 # define __nocast
35 # define __iomem
36 # define __chk_user_ptr(x) (void)0
37 # define __chk_io_ptr(x) (void)0
38 # define __builtin_warning(x, y...) (1)
39 # define __must_hold(x)
40 # define __acquires(x)
41 # define __releases(x)
42 # define __acquire(x) (void)0
43 # define __release(x) (void)0
44 # define __cond_lock(x,c) (c)
45 # define __percpu
46 # define __rcu
47 # define __private
48 # define ACCESS_PRIVATE(p, member) ((p)->member)
49 #endif /* __CHECKER__ */
50 
51 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
52 #define ___PASTE(a,b) a##b
53 #define __PASTE(a,b) ___PASTE(a,b)
54 
55 #ifdef __KERNEL__
56 
57 #ifdef __GNUC__
58 #include <linux/compiler-gcc.h>
59 #endif
60 
61 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
62 #define notrace __attribute__((hotpatch(0,0)))
63 #else
64 #define notrace __attribute__((no_instrument_function))
65 #endif
66 
67 /* Intel compiler defines __GNUC__. So we will overwrite implementations
68  * coming from above header files here
69  */
70 #ifdef __INTEL_COMPILER
71 # include <linux/compiler-intel.h>
72 #endif
73 
74 /* Clang compiler defines __GNUC__. So we will overwrite implementations
75  * coming from above header files here
76  */
77 #ifdef __clang__
78 #include <linux/compiler-clang.h>
79 #endif
80 
81 /*
82  * Generic compiler-dependent macros required for kernel
83  * build go below this comment. Actual compiler/compiler version
84  * specific implementations come from the above header files
85  */
86 
87 struct ftrace_branch_data {
88 	const char *func;
89 	const char *file;
90 	unsigned line;
91 	union {
92 		struct {
93 			unsigned long correct;
94 			unsigned long incorrect;
95 		};
96 		struct {
97 			unsigned long miss;
98 			unsigned long hit;
99 		};
100 		unsigned long miss_hit[2];
101 	};
102 };
103 
104 /*
105  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
106  * to disable branch tracing on a per file basis.
107  */
108 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
109     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
110 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
111 
112 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
113 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
114 
115 #define __branch_check__(x, expect) ({					\
116 			int ______r;					\
117 			static struct ftrace_branch_data		\
118 				__attribute__((__aligned__(4)))		\
119 				__attribute__((section("_ftrace_annotated_branch"))) \
120 				______f = {				\
121 				.func = __func__,			\
122 				.file = __FILE__,			\
123 				.line = __LINE__,			\
124 			};						\
125 			______r = likely_notrace(x);			\
126 			ftrace_likely_update(&______f, ______r, expect); \
127 			______r;					\
128 		})
129 
130 /*
131  * Using __builtin_constant_p(x) to ignore cases where the return
132  * value is always the same.  This idea is taken from a similar patch
133  * written by Daniel Walker.
134  */
135 # ifndef likely
136 #  define likely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
137 # endif
138 # ifndef unlikely
139 #  define unlikely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
140 # endif
141 
142 #ifdef CONFIG_PROFILE_ALL_BRANCHES
143 /*
144  * "Define 'is'", Bill Clinton
145  * "Define 'if'", Steven Rostedt
146  */
147 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
148 #define __trace_if(cond) \
149 	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
150 	({								\
151 		int ______r;						\
152 		static struct ftrace_branch_data			\
153 			__attribute__((__aligned__(4)))			\
154 			__attribute__((section("_ftrace_branch")))	\
155 			______f = {					\
156 				.func = __func__,			\
157 				.file = __FILE__,			\
158 				.line = __LINE__,			\
159 			};						\
160 		______r = !!(cond);					\
161 		______f.miss_hit[______r]++;					\
162 		______r;						\
163 	}))
164 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
165 
166 #else
167 # define likely(x)	__builtin_expect(!!(x), 1)
168 # define unlikely(x)	__builtin_expect(!!(x), 0)
169 #endif
170 
171 /* Optimization barrier */
172 #ifndef barrier
173 # define barrier() __memory_barrier()
174 #endif
175 
176 #ifndef barrier_data
177 # define barrier_data(ptr) barrier()
178 #endif
179 
180 /* Unreachable code */
181 #ifndef unreachable
182 # define unreachable() do { } while (1)
183 #endif
184 
185 /*
186  * KENTRY - kernel entry point
187  * This can be used to annotate symbols (functions or data) that are used
188  * without their linker symbol being referenced explicitly. For example,
189  * interrupt vector handlers, or functions in the kernel image that are found
190  * programatically.
191  *
192  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
193  * are handled in their own way (with KEEP() in linker scripts).
194  *
195  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
196  * linker script. For example an architecture could KEEP() its entire
197  * boot/exception vector code rather than annotate each function and data.
198  */
199 #ifndef KENTRY
200 # define KENTRY(sym)						\
201 	extern typeof(sym) sym;					\
202 	static const unsigned long __kentry_##sym		\
203 	__used							\
204 	__attribute__((section("___kentry" "+" #sym ), used))	\
205 	= (unsigned long)&sym;
206 #endif
207 
208 #ifndef RELOC_HIDE
209 # define RELOC_HIDE(ptr, off)					\
210   ({ unsigned long __ptr;					\
211      __ptr = (unsigned long) (ptr);				\
212     (typeof(ptr)) (__ptr + (off)); })
213 #endif
214 
215 #ifndef OPTIMIZER_HIDE_VAR
216 #define OPTIMIZER_HIDE_VAR(var) barrier()
217 #endif
218 
219 /* Not-quite-unique ID. */
220 #ifndef __UNIQUE_ID
221 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
222 #endif
223 
224 #include <uapi/linux/types.h>
225 
226 #define __READ_ONCE_SIZE						\
227 ({									\
228 	switch (size) {							\
229 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
230 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
231 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
232 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
233 	default:							\
234 		barrier();						\
235 		__builtin_memcpy((void *)res, (const void *)p, size);	\
236 		barrier();						\
237 	}								\
238 })
239 
240 static __always_inline
__read_once_size(const volatile void * p,void * res,int size)241 void __read_once_size(const volatile void *p, void *res, int size)
242 {
243 	__READ_ONCE_SIZE;
244 }
245 
246 #ifdef CONFIG_KASAN
247 /*
248  * This function is not 'inline' because __no_sanitize_address confilcts
249  * with inlining. Attempt to inline it may cause a build failure.
250  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
251  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
252  */
253 static __no_sanitize_address __maybe_unused
__read_once_size_nocheck(const volatile void * p,void * res,int size)254 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
255 {
256 	__READ_ONCE_SIZE;
257 }
258 #else
259 static __always_inline
__read_once_size_nocheck(const volatile void * p,void * res,int size)260 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
261 {
262 	__READ_ONCE_SIZE;
263 }
264 #endif
265 
__write_once_size(volatile void * p,void * res,int size)266 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
267 {
268 	switch (size) {
269 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
270 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
271 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
272 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
273 	default:
274 		barrier();
275 		__builtin_memcpy((void *)p, (const void *)res, size);
276 		barrier();
277 	}
278 }
279 
280 /*
281  * Prevent the compiler from merging or refetching reads or writes. The
282  * compiler is also forbidden from reordering successive instances of
283  * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
284  * compiler is aware of some particular ordering.  One way to make the
285  * compiler aware of ordering is to put the two invocations of READ_ONCE,
286  * WRITE_ONCE or ACCESS_ONCE() in different C statements.
287  *
288  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
289  * data types like structs or unions. If the size of the accessed data
290  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
291  * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
292  * least two memcpy()s: one for the __builtin_memcpy() and then one for
293  * the macro doing the copy of variable - '__u' allocated on the stack.
294  *
295  * Their two major use cases are: (1) Mediating communication between
296  * process-level code and irq/NMI handlers, all running on the same CPU,
297  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
298  * mutilate accesses that either do not require ordering or that interact
299  * with an explicit memory barrier or atomic instruction that provides the
300  * required ordering.
301  */
302 
303 #define __READ_ONCE(x, check)						\
304 ({									\
305 	union { typeof(x) __val; char __c[1]; } __u;			\
306 	if (check)							\
307 		__read_once_size(&(x), __u.__c, sizeof(x));		\
308 	else								\
309 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
310 	__u.__val;							\
311 })
312 #define READ_ONCE(x) __READ_ONCE(x, 1)
313 
314 /*
315  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
316  * to hide memory access from KASAN.
317  */
318 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
319 
320 #define WRITE_ONCE(x, val) \
321 ({							\
322 	union { typeof(x) __val; char __c[1]; } __u =	\
323 		{ .__val = (__force typeof(x)) (val) }; \
324 	__write_once_size(&(x), __u.__c, sizeof(x));	\
325 	__u.__val;					\
326 })
327 
328 #endif /* __KERNEL__ */
329 
330 #endif /* __ASSEMBLY__ */
331 
332 #ifdef __KERNEL__
333 /*
334  * Allow us to mark functions as 'deprecated' and have gcc emit a nice
335  * warning for each use, in hopes of speeding the functions removal.
336  * Usage is:
337  * 		int __deprecated foo(void)
338  */
339 #ifndef __deprecated
340 # define __deprecated		/* unimplemented */
341 #endif
342 
343 #ifdef MODULE
344 #define __deprecated_for_modules __deprecated
345 #else
346 #define __deprecated_for_modules
347 #endif
348 
349 #ifndef __must_check
350 #define __must_check
351 #endif
352 
353 #ifndef CONFIG_ENABLE_MUST_CHECK
354 #undef __must_check
355 #define __must_check
356 #endif
357 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
358 #undef __deprecated
359 #undef __deprecated_for_modules
360 #define __deprecated
361 #define __deprecated_for_modules
362 #endif
363 
364 #ifndef __malloc
365 #define __malloc
366 #endif
367 
368 /*
369  * Allow us to avoid 'defined but not used' warnings on functions and data,
370  * as well as force them to be emitted to the assembly file.
371  *
372  * As of gcc 3.4, static functions that are not marked with attribute((used))
373  * may be elided from the assembly file.  As of gcc 3.4, static data not so
374  * marked will not be elided, but this may change in a future gcc version.
375  *
376  * NOTE: Because distributions shipped with a backported unit-at-a-time
377  * compiler in gcc 3.3, we must define __used to be __attribute__((used))
378  * for gcc >=3.3 instead of 3.4.
379  *
380  * In prior versions of gcc, such functions and data would be emitted, but
381  * would be warned about except with attribute((unused)).
382  *
383  * Mark functions that are referenced only in inline assembly as __used so
384  * the code is emitted even though it appears to be unreferenced.
385  */
386 #ifndef __used
387 # define __used			/* unimplemented */
388 #endif
389 
390 #ifndef __maybe_unused
391 # define __maybe_unused		/* unimplemented */
392 #endif
393 
394 #ifndef __always_unused
395 # define __always_unused	/* unimplemented */
396 #endif
397 
398 #ifndef noinline
399 #define noinline
400 #endif
401 
402 /*
403  * Rather then using noinline to prevent stack consumption, use
404  * noinline_for_stack instead.  For documentation reasons.
405  */
406 #define noinline_for_stack noinline
407 
408 #ifndef __always_inline
409 #define __always_inline inline
410 #endif
411 
412 #endif /* __KERNEL__ */
413 
414 /*
415  * From the GCC manual:
416  *
417  * Many functions do not examine any values except their arguments,
418  * and have no effects except the return value.  Basically this is
419  * just slightly more strict class than the `pure' attribute above,
420  * since function is not allowed to read global memory.
421  *
422  * Note that a function that has pointer arguments and examines the
423  * data pointed to must _not_ be declared `const'.  Likewise, a
424  * function that calls a non-`const' function usually must not be
425  * `const'.  It does not make sense for a `const' function to return
426  * `void'.
427  */
428 #ifndef __attribute_const__
429 # define __attribute_const__	/* unimplemented */
430 #endif
431 
432 #ifndef __latent_entropy
433 # define __latent_entropy
434 #endif
435 
436 /*
437  * Tell gcc if a function is cold. The compiler will assume any path
438  * directly leading to the call is unlikely.
439  */
440 
441 #ifndef __cold
442 #define __cold
443 #endif
444 
445 /* Simple shorthand for a section definition */
446 #ifndef __section
447 # define __section(S) __attribute__ ((__section__(#S)))
448 #endif
449 
450 #ifndef __visible
451 #define __visible
452 #endif
453 
454 #ifndef __norecordmcount
455 #define __norecordmcount
456 #endif
457 
458 #ifndef __nocfi
459 #define __nocfi
460 #endif
461 
462 /*
463  * Assume alignment of return value.
464  */
465 #ifndef __assume_aligned
466 #define __assume_aligned(a, ...)
467 #endif
468 
469 
470 /* Are two types/vars the same type (ignoring qualifiers)? */
471 #ifndef __same_type
472 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
473 #endif
474 
475 /* Is this type a native word size -- useful for atomic operations */
476 #ifndef __native_word
477 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
478 #endif
479 
480 #ifndef __optimize
481 # define __optimize(level)
482 #endif
483 
484 /* Compile time object size, -1 for unknown */
485 #ifndef __compiletime_object_size
486 # define __compiletime_object_size(obj) -1
487 #endif
488 #ifndef __compiletime_warning
489 # define __compiletime_warning(message)
490 #endif
491 #ifndef __compiletime_error
492 # define __compiletime_error(message)
493 /*
494  * Sparse complains of variable sized arrays due to the temporary variable in
495  * __compiletime_assert. Unfortunately we can't just expand it out to make
496  * sparse see a constant array size without breaking compiletime_assert on old
497  * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
498  */
499 # ifndef __CHECKER__
500 #  define __compiletime_error_fallback(condition) \
501 	do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
502 # endif
503 #endif
504 #ifndef __compiletime_error_fallback
505 # define __compiletime_error_fallback(condition) do { } while (0)
506 #endif
507 
508 #define __compiletime_assert(condition, msg, prefix, suffix)		\
509 	do {								\
510 		bool __cond = !(condition);				\
511 		extern void prefix ## suffix(void) __compiletime_error(msg); \
512 		if (__cond)						\
513 			prefix ## suffix();				\
514 		__compiletime_error_fallback(__cond);			\
515 	} while (0)
516 
517 #define _compiletime_assert(condition, msg, prefix, suffix) \
518 	__compiletime_assert(condition, msg, prefix, suffix)
519 
520 /**
521  * compiletime_assert - break build and emit msg if condition is false
522  * @condition: a compile-time constant condition to check
523  * @msg:       a message to emit if condition is false
524  *
525  * In tradition of POSIX assert, this macro will break the build if the
526  * supplied condition is *false*, emitting the supplied error message if the
527  * compiler has support to do so.
528  */
529 #define compiletime_assert(condition, msg) \
530 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
531 
532 #define compiletime_assert_atomic_type(t)				\
533 	compiletime_assert(__native_word(t),				\
534 		"Need native word sized stores/loads for atomicity.")
535 
536 /*
537  * Prevent the compiler from merging or refetching accesses.  The compiler
538  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
539  * but only when the compiler is aware of some particular ordering.  One way
540  * to make the compiler aware of ordering is to put the two invocations of
541  * ACCESS_ONCE() in different C statements.
542  *
543  * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
544  * on a union member will work as long as the size of the member matches the
545  * size of the union and the size is smaller than word size.
546  *
547  * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
548  * between process-level code and irq/NMI handlers, all running on the same CPU,
549  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
550  * mutilate accesses that either do not require ordering or that interact
551  * with an explicit memory barrier or atomic instruction that provides the
552  * required ordering.
553  *
554  * If possible use READ_ONCE()/WRITE_ONCE() instead.
555  */
556 #define __ACCESS_ONCE(x) ({ \
557 	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
558 	(volatile typeof(x) *)&(x); })
559 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
560 
561 /**
562  * lockless_dereference() - safely load a pointer for later dereference
563  * @p: The pointer to load
564  *
565  * Similar to rcu_dereference(), but for situations where the pointed-to
566  * object's lifetime is managed by something other than RCU.  That
567  * "something other" might be reference counting or simple immortality.
568  *
569  * The seemingly unused variable ___typecheck_p validates that @p is
570  * indeed a pointer type by using a pointer to typeof(*p) as the type.
571  * Taking a pointer to typeof(*p) again is needed in case p is void *.
572  */
573 #define lockless_dereference(p) \
574 ({ \
575 	typeof(p) _________p1 = READ_ONCE(p); \
576 	typeof(*(p)) *___typecheck_p __maybe_unused; \
577 	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
578 	(_________p1); \
579 })
580 
581 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
582 #ifdef CONFIG_KPROBES
583 # define __kprobes	__attribute__((__section__(".kprobes.text")))
584 # define nokprobe_inline	__always_inline
585 #else
586 # define __kprobes
587 # define nokprobe_inline	inline
588 #endif
589 #endif /* __LINUX_COMPILER_H */
590