• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_JUMP_LABEL_H
3 #define _LINUX_JUMP_LABEL_H
4 
5 /*
6  * Jump label support
7  *
8  * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
9  * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10  *
11  * DEPRECATED API:
12  *
13  * The use of 'struct static_key' directly, is now DEPRECATED. In addition
14  * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
15  *
16  * struct static_key false = STATIC_KEY_INIT_FALSE;
17  * struct static_key true = STATIC_KEY_INIT_TRUE;
18  * static_key_true()
19  * static_key_false()
20  *
21  * The updated API replacements are:
22  *
23  * DEFINE_STATIC_KEY_TRUE(key);
24  * DEFINE_STATIC_KEY_FALSE(key);
25  * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
26  * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
27  * static_branch_likely()
28  * static_branch_unlikely()
29  *
30  * Jump labels provide an interface to generate dynamic branches using
31  * self-modifying code. Assuming toolchain and architecture support, if we
32  * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
33  * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
34  * (which defaults to false - and the true block is placed out of line).
35  * Similarly, we can define an initially true key via
36  * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
37  * "if (static_branch_unlikely(&key))", in which case we will generate an
38  * unconditional branch to the out-of-line true branch. Keys that are
39  * initially true or false can be using in both static_branch_unlikely()
40  * and static_branch_likely() statements.
41  *
42  * At runtime we can change the branch target by setting the key
43  * to true via a call to static_branch_enable(), or false using
44  * static_branch_disable(). If the direction of the branch is switched by
45  * these calls then we run-time modify the branch target via a
46  * no-op -> jump or jump -> no-op conversion. For example, for an
47  * initially false key that is used in an "if (static_branch_unlikely(&key))"
48  * statement, setting the key to true requires us to patch in a jump
49  * to the out-of-line of true branch.
50  *
51  * In addition to static_branch_{enable,disable}, we can also reference count
52  * the key or branch direction via static_branch_{inc,dec}. Thus,
53  * static_branch_inc() can be thought of as a 'make more true' and
54  * static_branch_dec() as a 'make more false'.
55  *
56  * Since this relies on modifying code, the branch modifying functions
57  * must be considered absolute slow paths (machine wide synchronization etc.).
58  * OTOH, since the affected branches are unconditional, their runtime overhead
59  * will be absolutely minimal, esp. in the default (off) case where the total
60  * effect is a single NOP of appropriate size. The on case will patch in a jump
61  * to the out-of-line block.
62  *
63  * When the control is directly exposed to userspace, it is prudent to delay the
64  * decrement to avoid high frequency code modifications which can (and do)
65  * cause significant performance degradation. Struct static_key_deferred and
66  * static_key_slow_dec_deferred() provide for this.
67  *
68  * Lacking toolchain and or architecture support, static keys fall back to a
69  * simple conditional branch.
70  *
71  * Additional babbling in: Documentation/staging/static-keys.rst
72  */
73 
74 #ifndef __ASSEMBLY__
75 
76 #include <linux/types.h>
77 #include <linux/compiler.h>
78 
79 extern bool static_key_initialized;
80 
81 #define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized,		      \
82 				    "%s(): static key '%pS' used before call to jump_label_init()", \
83 				    __func__, (key))
84 
85 #ifdef CONFIG_JUMP_LABEL
86 
87 struct static_key {
88 	atomic_t enabled;
89 /*
90  * Note:
91  *   To make anonymous unions work with old compilers, the static
92  *   initialization of them requires brackets. This creates a dependency
93  *   on the order of the struct with the initializers. If any fields
94  *   are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
95  *   to be modified.
96  *
97  * bit 0 => 1 if key is initially true
98  *	    0 if initially false
99  * bit 1 => 1 if points to struct static_key_mod
100  *	    0 if points to struct jump_entry
101  */
102 	union {
103 		unsigned long type;
104 		struct jump_entry *entries;
105 		struct static_key_mod *next;
106 	};
107 };
108 
109 #else
110 struct static_key {
111 	atomic_t enabled;
112 };
113 #endif	/* CONFIG_JUMP_LABEL */
114 #endif /* __ASSEMBLY__ */
115 
116 #if defined(CONFIG_JUMP_LABEL) && !defined(BUILD_FIPS140_KO)
117 #include <asm/jump_label.h>
118 
119 #ifndef __ASSEMBLY__
120 #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
121 
122 struct jump_entry {
123 	s32 code;
124 	s32 target;
125 	long key;	// key may be far away from the core kernel under KASLR
126 };
127 
jump_entry_code(const struct jump_entry * entry)128 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
129 {
130 	return (unsigned long)&entry->code + entry->code;
131 }
132 
jump_entry_target(const struct jump_entry * entry)133 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
134 {
135 	return (unsigned long)&entry->target + entry->target;
136 }
137 
jump_entry_key(const struct jump_entry * entry)138 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
139 {
140 	long offset = entry->key & ~3L;
141 
142 	return (struct static_key *)((unsigned long)&entry->key + offset);
143 }
144 
145 #else
146 
jump_entry_code(const struct jump_entry * entry)147 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
148 {
149 	return entry->code;
150 }
151 
jump_entry_target(const struct jump_entry * entry)152 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
153 {
154 	return entry->target;
155 }
156 
jump_entry_key(const struct jump_entry * entry)157 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
158 {
159 	return (struct static_key *)((unsigned long)entry->key & ~3UL);
160 }
161 
162 #endif
163 
jump_entry_is_branch(const struct jump_entry * entry)164 static inline bool jump_entry_is_branch(const struct jump_entry *entry)
165 {
166 	return (unsigned long)entry->key & 1UL;
167 }
168 
jump_entry_is_init(const struct jump_entry * entry)169 static inline bool jump_entry_is_init(const struct jump_entry *entry)
170 {
171 	return (unsigned long)entry->key & 2UL;
172 }
173 
jump_entry_set_init(struct jump_entry * entry)174 static inline void jump_entry_set_init(struct jump_entry *entry)
175 {
176 	entry->key |= 2;
177 }
178 
179 #endif
180 #endif
181 
182 #ifndef __ASSEMBLY__
183 
184 enum jump_label_type {
185 	JUMP_LABEL_NOP = 0,
186 	JUMP_LABEL_JMP,
187 };
188 
189 struct module;
190 
191 #ifdef BUILD_FIPS140_KO
192 
193 #include <linux/atomic.h>
194 
static_key_count(struct static_key * key)195 static inline int static_key_count(struct static_key *key)
196 {
197 	return atomic_read(&key->enabled);
198 }
199 
static_key_false(struct static_key * key)200 static __always_inline bool static_key_false(struct static_key *key)
201 {
202 	if (unlikely(static_key_count(key) > 0))
203 		return true;
204 	return false;
205 }
206 
static_key_true(struct static_key * key)207 static __always_inline bool static_key_true(struct static_key *key)
208 {
209 	if (likely(static_key_count(key) > 0))
210 		return true;
211 	return false;
212 }
213 
214 #elif defined(CONFIG_JUMP_LABEL)
215 
216 #define JUMP_TYPE_FALSE		0UL
217 #define JUMP_TYPE_TRUE		1UL
218 #define JUMP_TYPE_LINKED	2UL
219 #define JUMP_TYPE_MASK		3UL
220 
static_key_false(struct static_key * key)221 static __always_inline bool static_key_false(struct static_key *key)
222 {
223 	return arch_static_branch(key, false);
224 }
225 
static_key_true(struct static_key * key)226 static __always_inline bool static_key_true(struct static_key *key)
227 {
228 	return !arch_static_branch(key, true);
229 }
230 
231 extern struct jump_entry __start___jump_table[];
232 extern struct jump_entry __stop___jump_table[];
233 
234 extern void jump_label_init(void);
235 extern void jump_label_lock(void);
236 extern void jump_label_unlock(void);
237 extern void arch_jump_label_transform(struct jump_entry *entry,
238 				      enum jump_label_type type);
239 extern void arch_jump_label_transform_static(struct jump_entry *entry,
240 					     enum jump_label_type type);
241 extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
242 					    enum jump_label_type type);
243 extern void arch_jump_label_transform_apply(void);
244 extern int jump_label_text_reserved(void *start, void *end);
245 extern void static_key_slow_inc(struct static_key *key);
246 extern void static_key_slow_dec(struct static_key *key);
247 extern void static_key_slow_inc_cpuslocked(struct static_key *key);
248 extern void static_key_slow_dec_cpuslocked(struct static_key *key);
249 extern void jump_label_apply_nops(struct module *mod);
250 extern int static_key_count(struct static_key *key);
251 extern void static_key_enable(struct static_key *key);
252 extern void static_key_disable(struct static_key *key);
253 extern void static_key_enable_cpuslocked(struct static_key *key);
254 extern void static_key_disable_cpuslocked(struct static_key *key);
255 
256 /*
257  * We should be using ATOMIC_INIT() for initializing .enabled, but
258  * the inclusion of atomic.h is problematic for inclusion of jump_label.h
259  * in 'low-level' headers. Thus, we are initializing .enabled with a
260  * raw value, but have added a BUILD_BUG_ON() to catch any issues in
261  * jump_label_init() see: kernel/jump_label.c.
262  */
263 #define STATIC_KEY_INIT_TRUE					\
264 	{ .enabled = { 1 },					\
265 	  { .entries = (void *)JUMP_TYPE_TRUE } }
266 #define STATIC_KEY_INIT_FALSE					\
267 	{ .enabled = { 0 },					\
268 	  { .entries = (void *)JUMP_TYPE_FALSE } }
269 
270 #else  /* !CONFIG_JUMP_LABEL */
271 
272 #include <linux/atomic.h>
273 #include <linux/bug.h>
274 
static_key_count(struct static_key * key)275 static __always_inline int static_key_count(struct static_key *key)
276 {
277 	return arch_atomic_read(&key->enabled);
278 }
279 
jump_label_init(void)280 static __always_inline void jump_label_init(void)
281 {
282 	static_key_initialized = true;
283 }
284 
static_key_false(struct static_key * key)285 static __always_inline bool static_key_false(struct static_key *key)
286 {
287 	if (unlikely(static_key_count(key) > 0))
288 		return true;
289 	return false;
290 }
291 
static_key_true(struct static_key * key)292 static __always_inline bool static_key_true(struct static_key *key)
293 {
294 	if (likely(static_key_count(key) > 0))
295 		return true;
296 	return false;
297 }
298 
static_key_slow_inc(struct static_key * key)299 static inline void static_key_slow_inc(struct static_key *key)
300 {
301 	STATIC_KEY_CHECK_USE(key);
302 	atomic_inc(&key->enabled);
303 }
304 
static_key_slow_dec(struct static_key * key)305 static inline void static_key_slow_dec(struct static_key *key)
306 {
307 	STATIC_KEY_CHECK_USE(key);
308 	atomic_dec(&key->enabled);
309 }
310 
311 #define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
312 #define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
313 
jump_label_text_reserved(void * start,void * end)314 static inline int jump_label_text_reserved(void *start, void *end)
315 {
316 	return 0;
317 }
318 
jump_label_lock(void)319 static inline void jump_label_lock(void) {}
jump_label_unlock(void)320 static inline void jump_label_unlock(void) {}
321 
jump_label_apply_nops(struct module * mod)322 static inline int jump_label_apply_nops(struct module *mod)
323 {
324 	return 0;
325 }
326 
static_key_enable(struct static_key * key)327 static inline void static_key_enable(struct static_key *key)
328 {
329 	STATIC_KEY_CHECK_USE(key);
330 
331 	if (atomic_read(&key->enabled) != 0) {
332 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
333 		return;
334 	}
335 	atomic_set(&key->enabled, 1);
336 }
337 
static_key_disable(struct static_key * key)338 static inline void static_key_disable(struct static_key *key)
339 {
340 	STATIC_KEY_CHECK_USE(key);
341 
342 	if (atomic_read(&key->enabled) != 1) {
343 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
344 		return;
345 	}
346 	atomic_set(&key->enabled, 0);
347 }
348 
349 #define static_key_enable_cpuslocked(k)		static_key_enable((k))
350 #define static_key_disable_cpuslocked(k)	static_key_disable((k))
351 
352 #define STATIC_KEY_INIT_TRUE	{ .enabled = ATOMIC_INIT(1) }
353 #define STATIC_KEY_INIT_FALSE	{ .enabled = ATOMIC_INIT(0) }
354 
355 #endif	/* CONFIG_JUMP_LABEL */
356 
357 #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
358 #define jump_label_enabled static_key_enabled
359 
360 /* -------------------------------------------------------------------------- */
361 
362 /*
363  * Two type wrappers around static_key, such that we can use compile time
364  * type differentiation to emit the right code.
365  *
366  * All the below code is macros in order to play type games.
367  */
368 
369 struct static_key_true {
370 	struct static_key key;
371 };
372 
373 struct static_key_false {
374 	struct static_key key;
375 };
376 
377 #define STATIC_KEY_TRUE_INIT  (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE,  }
378 #define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
379 
380 #define DEFINE_STATIC_KEY_TRUE(name)	\
381 	struct static_key_true name = STATIC_KEY_TRUE_INIT
382 
383 #define DEFINE_STATIC_KEY_TRUE_RO(name)	\
384 	struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
385 
386 #define DECLARE_STATIC_KEY_TRUE(name)	\
387 	extern struct static_key_true name
388 
389 #define DEFINE_STATIC_KEY_FALSE(name)	\
390 	struct static_key_false name = STATIC_KEY_FALSE_INIT
391 
392 #define DEFINE_STATIC_KEY_FALSE_RO(name)	\
393 	struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
394 
395 #define DECLARE_STATIC_KEY_FALSE(name)	\
396 	extern struct static_key_false name
397 
398 #define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count)		\
399 	struct static_key_true name[count] = {			\
400 		[0 ... (count) - 1] = STATIC_KEY_TRUE_INIT,	\
401 	}
402 
403 #define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count)		\
404 	struct static_key_false name[count] = {			\
405 		[0 ... (count) - 1] = STATIC_KEY_FALSE_INIT,	\
406 	}
407 
408 extern bool ____wrong_branch_error(void);
409 
410 #define static_key_enabled(x)							\
411 ({										\
412 	if (!__builtin_types_compatible_p(typeof(*x), struct static_key) &&	\
413 	    !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
414 	    !__builtin_types_compatible_p(typeof(*x), struct static_key_false))	\
415 		____wrong_branch_error();					\
416 	static_key_count((struct static_key *)x) > 0;				\
417 })
418 
419 #if defined(CONFIG_JUMP_LABEL) && !defined(BUILD_FIPS140_KO)
420 
421 /*
422  * Combine the right initial value (type) with the right branch order
423  * to generate the desired result.
424  *
425  *
426  * type\branch|	likely (1)	      |	unlikely (0)
427  * -----------+-----------------------+------------------
428  *            |                       |
429  *  true (1)  |	   ...		      |	   ...
430  *            |    NOP		      |	   JMP L
431  *            |    <br-stmts>	      |	1: ...
432  *            |	L: ...		      |
433  *            |			      |
434  *            |			      |	L: <br-stmts>
435  *            |			      |	   jmp 1b
436  *            |                       |
437  * -----------+-----------------------+------------------
438  *            |                       |
439  *  false (0) |	   ...		      |	   ...
440  *            |    JMP L	      |	   NOP
441  *            |    <br-stmts>	      |	1: ...
442  *            |	L: ...		      |
443  *            |			      |
444  *            |			      |	L: <br-stmts>
445  *            |			      |	   jmp 1b
446  *            |                       |
447  * -----------+-----------------------+------------------
448  *
449  * The initial value is encoded in the LSB of static_key::entries,
450  * type: 0 = false, 1 = true.
451  *
452  * The branch type is encoded in the LSB of jump_entry::key,
453  * branch: 0 = unlikely, 1 = likely.
454  *
455  * This gives the following logic table:
456  *
457  *	enabled	type	branch	  instuction
458  * -----------------------------+-----------
459  *	0	0	0	| NOP
460  *	0	0	1	| JMP
461  *	0	1	0	| NOP
462  *	0	1	1	| JMP
463  *
464  *	1	0	0	| JMP
465  *	1	0	1	| NOP
466  *	1	1	0	| JMP
467  *	1	1	1	| NOP
468  *
469  * Which gives the following functions:
470  *
471  *   dynamic: instruction = enabled ^ branch
472  *   static:  instruction = type ^ branch
473  *
474  * See jump_label_type() / jump_label_init_type().
475  */
476 
477 #define static_branch_likely(x)							\
478 ({										\
479 	bool branch;								\
480 	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
481 		branch = !arch_static_branch(&(x)->key, true);			\
482 	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
483 		branch = !arch_static_branch_jump(&(x)->key, true);		\
484 	else									\
485 		branch = ____wrong_branch_error();				\
486 	likely(branch);								\
487 })
488 
489 #define static_branch_unlikely(x)						\
490 ({										\
491 	bool branch;								\
492 	if (__builtin_types_compatible_p(typeof(*x), struct static_key_true))	\
493 		branch = arch_static_branch_jump(&(x)->key, false);		\
494 	else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
495 		branch = arch_static_branch(&(x)->key, false);			\
496 	else									\
497 		branch = ____wrong_branch_error();				\
498 	unlikely(branch);							\
499 })
500 
501 #else /* !CONFIG_JUMP_LABEL */
502 
503 #define static_branch_likely(x)		likely(static_key_enabled(&(x)->key))
504 #define static_branch_unlikely(x)	unlikely(static_key_enabled(&(x)->key))
505 
506 #endif /* CONFIG_JUMP_LABEL */
507 
508 /*
509  * Advanced usage; refcount, branch is enabled when: count != 0
510  */
511 
512 #define static_branch_inc(x)		static_key_slow_inc(&(x)->key)
513 #define static_branch_dec(x)		static_key_slow_dec(&(x)->key)
514 #define static_branch_inc_cpuslocked(x)	static_key_slow_inc_cpuslocked(&(x)->key)
515 #define static_branch_dec_cpuslocked(x)	static_key_slow_dec_cpuslocked(&(x)->key)
516 
517 /*
518  * Normal usage; boolean enable/disable.
519  */
520 
521 #define static_branch_enable(x)			static_key_enable(&(x)->key)
522 #define static_branch_disable(x)		static_key_disable(&(x)->key)
523 #define static_branch_enable_cpuslocked(x)	static_key_enable_cpuslocked(&(x)->key)
524 #define static_branch_disable_cpuslocked(x)	static_key_disable_cpuslocked(&(x)->key)
525 
526 #endif /* __ASSEMBLY__ */
527 
528 #endif	/* _LINUX_JUMP_LABEL_H */
529