1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_JUMP_LABEL_H
3 #define _LINUX_JUMP_LABEL_H
4
5 /*
6 * Jump label support
7 *
8 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10 *
11 * DEPRECATED API:
12 *
13 * The use of 'struct static_key' directly, is now DEPRECATED. In addition
14 * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
15 *
16 * struct static_key false = STATIC_KEY_INIT_FALSE;
17 * struct static_key true = STATIC_KEY_INIT_TRUE;
18 * static_key_true()
19 * static_key_false()
20 *
21 * The updated API replacements are:
22 *
23 * DEFINE_STATIC_KEY_TRUE(key);
24 * DEFINE_STATIC_KEY_FALSE(key);
25 * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
26 * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
27 * static_branch_likely()
28 * static_branch_unlikely()
29 *
30 * Jump labels provide an interface to generate dynamic branches using
31 * self-modifying code. Assuming toolchain and architecture support, if we
32 * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
33 * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
34 * (which defaults to false - and the true block is placed out of line).
35 * Similarly, we can define an initially true key via
36 * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
37 * "if (static_branch_unlikely(&key))", in which case we will generate an
38 * unconditional branch to the out-of-line true branch. Keys that are
39 * initially true or false can be using in both static_branch_unlikely()
40 * and static_branch_likely() statements.
41 *
42 * At runtime we can change the branch target by setting the key
43 * to true via a call to static_branch_enable(), or false using
44 * static_branch_disable(). If the direction of the branch is switched by
45 * these calls then we run-time modify the branch target via a
46 * no-op -> jump or jump -> no-op conversion. For example, for an
47 * initially false key that is used in an "if (static_branch_unlikely(&key))"
48 * statement, setting the key to true requires us to patch in a jump
49 * to the out-of-line of true branch.
50 *
51 * In addition to static_branch_{enable,disable}, we can also reference count
52 * the key or branch direction via static_branch_{inc,dec}. Thus,
53 * static_branch_inc() can be thought of as a 'make more true' and
54 * static_branch_dec() as a 'make more false'.
55 *
56 * Since this relies on modifying code, the branch modifying functions
57 * must be considered absolute slow paths (machine wide synchronization etc.).
58 * OTOH, since the affected branches are unconditional, their runtime overhead
59 * will be absolutely minimal, esp. in the default (off) case where the total
60 * effect is a single NOP of appropriate size. The on case will patch in a jump
61 * to the out-of-line block.
62 *
63 * When the control is directly exposed to userspace, it is prudent to delay the
64 * decrement to avoid high frequency code modifications which can (and do)
65 * cause significant performance degradation. Struct static_key_deferred and
66 * static_key_slow_dec_deferred() provide for this.
67 *
68 * Lacking toolchain and or architecture support, static keys fall back to a
69 * simple conditional branch.
70 *
71 * Additional babbling in: Documentation/static-keys.txt
72 */
73
74 #ifndef __ASSEMBLY__
75
76 #include <linux/types.h>
77 #include <linux/compiler.h>
78
79 extern bool static_key_initialized;
80
81 #define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \
82 "%s(): static key '%pS' used before call to jump_label_init()", \
83 __func__, (key))
84
85 #ifdef CONFIG_JUMP_LABEL
86
87 struct static_key {
88 atomic_t enabled;
89 /*
90 * Note:
91 * To make anonymous unions work with old compilers, the static
92 * initialization of them requires brackets. This creates a dependency
93 * on the order of the struct with the initializers. If any fields
94 * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
95 * to be modified.
96 *
97 * bit 0 => 1 if key is initially true
98 * 0 if initially false
99 * bit 1 => 1 if points to struct static_key_mod
100 * 0 if points to struct jump_entry
101 */
102 union {
103 unsigned long type;
104 struct jump_entry *entries;
105 struct static_key_mod *next;
106 };
107 };
108
109 #else
110 struct static_key {
111 atomic_t enabled;
112 };
113 #endif /* CONFIG_JUMP_LABEL */
114 #endif /* __ASSEMBLY__ */
115
116 #ifdef CONFIG_JUMP_LABEL
117 #include <asm/jump_label.h>
118
119 #ifndef __ASSEMBLY__
120 #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
121
122 struct jump_entry {
123 s32 code;
124 s32 target;
125 long key; // key may be far away from the core kernel under KASLR
126 };
127
jump_entry_code(const struct jump_entry * entry)128 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
129 {
130 return (unsigned long)&entry->code + entry->code;
131 }
132
jump_entry_target(const struct jump_entry * entry)133 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
134 {
135 return (unsigned long)&entry->target + entry->target;
136 }
137
jump_entry_key(const struct jump_entry * entry)138 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
139 {
140 long offset = entry->key & ~3L;
141
142 return (struct static_key *)((unsigned long)&entry->key + offset);
143 }
144
145 #else
146
jump_entry_code(const struct jump_entry * entry)147 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
148 {
149 return entry->code;
150 }
151
jump_entry_target(const struct jump_entry * entry)152 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
153 {
154 return entry->target;
155 }
156
jump_entry_key(const struct jump_entry * entry)157 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
158 {
159 return (struct static_key *)((unsigned long)entry->key & ~3UL);
160 }
161
162 #endif
163
jump_entry_is_branch(const struct jump_entry * entry)164 static inline bool jump_entry_is_branch(const struct jump_entry *entry)
165 {
166 return (unsigned long)entry->key & 1UL;
167 }
168
jump_entry_is_init(const struct jump_entry * entry)169 static inline bool jump_entry_is_init(const struct jump_entry *entry)
170 {
171 return (unsigned long)entry->key & 2UL;
172 }
173
jump_entry_set_init(struct jump_entry * entry)174 static inline void jump_entry_set_init(struct jump_entry *entry)
175 {
176 entry->key |= 2;
177 }
178
179 #endif
180 #endif
181
182 #ifndef __ASSEMBLY__
183
184 enum jump_label_type {
185 JUMP_LABEL_NOP = 0,
186 JUMP_LABEL_JMP,
187 };
188
189 struct module;
190
191 #ifdef CONFIG_JUMP_LABEL
192
193 #define JUMP_TYPE_FALSE 0UL
194 #define JUMP_TYPE_TRUE 1UL
195 #define JUMP_TYPE_LINKED 2UL
196 #define JUMP_TYPE_MASK 3UL
197
static_key_false(struct static_key * key)198 static __always_inline bool static_key_false(struct static_key *key)
199 {
200 return arch_static_branch(key, false);
201 }
202
static_key_true(struct static_key * key)203 static __always_inline bool static_key_true(struct static_key *key)
204 {
205 return !arch_static_branch(key, true);
206 }
207
208 extern struct jump_entry __start___jump_table[];
209 extern struct jump_entry __stop___jump_table[];
210
211 extern void jump_label_init(void);
212 extern void jump_label_lock(void);
213 extern void jump_label_unlock(void);
214 extern void arch_jump_label_transform(struct jump_entry *entry,
215 enum jump_label_type type);
216 extern void arch_jump_label_transform_static(struct jump_entry *entry,
217 enum jump_label_type type);
218 extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
219 enum jump_label_type type);
220 extern void arch_jump_label_transform_apply(void);
221 extern int jump_label_text_reserved(void *start, void *end);
222 extern void static_key_slow_inc(struct static_key *key);
223 extern void static_key_slow_dec(struct static_key *key);
224 extern void static_key_slow_inc_cpuslocked(struct static_key *key);
225 extern void static_key_slow_dec_cpuslocked(struct static_key *key);
226 extern void jump_label_apply_nops(struct module *mod);
227 extern int static_key_count(struct static_key *key);
228 extern void static_key_enable(struct static_key *key);
229 extern void static_key_disable(struct static_key *key);
230 extern void static_key_enable_cpuslocked(struct static_key *key);
231 extern void static_key_disable_cpuslocked(struct static_key *key);
232
233 /*
234 * We should be using ATOMIC_INIT() for initializing .enabled, but
235 * the inclusion of atomic.h is problematic for inclusion of jump_label.h
236 * in 'low-level' headers. Thus, we are initializing .enabled with a
237 * raw value, but have added a BUILD_BUG_ON() to catch any issues in
238 * jump_label_init() see: kernel/jump_label.c.
239 */
240 #define STATIC_KEY_INIT_TRUE \
241 { .enabled = { 1 }, \
242 { .entries = (void *)JUMP_TYPE_TRUE } }
243 #define STATIC_KEY_INIT_FALSE \
244 { .enabled = { 0 }, \
245 { .entries = (void *)JUMP_TYPE_FALSE } }
246
247 #else /* !CONFIG_JUMP_LABEL */
248
249 #include <linux/atomic.h>
250 #include <linux/bug.h>
251
static_key_count(struct static_key * key)252 static inline int static_key_count(struct static_key *key)
253 {
254 return atomic_read(&key->enabled);
255 }
256
jump_label_init(void)257 static __always_inline void jump_label_init(void)
258 {
259 static_key_initialized = true;
260 }
261
static_key_false(struct static_key * key)262 static __always_inline bool static_key_false(struct static_key *key)
263 {
264 if (unlikely(static_key_count(key) > 0))
265 return true;
266 return false;
267 }
268
static_key_true(struct static_key * key)269 static __always_inline bool static_key_true(struct static_key *key)
270 {
271 if (likely(static_key_count(key) > 0))
272 return true;
273 return false;
274 }
275
static_key_slow_inc(struct static_key * key)276 static inline void static_key_slow_inc(struct static_key *key)
277 {
278 STATIC_KEY_CHECK_USE(key);
279 atomic_inc(&key->enabled);
280 }
281
static_key_slow_dec(struct static_key * key)282 static inline void static_key_slow_dec(struct static_key *key)
283 {
284 STATIC_KEY_CHECK_USE(key);
285 atomic_dec(&key->enabled);
286 }
287
288 #define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
289 #define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
290
jump_label_text_reserved(void * start,void * end)291 static inline int jump_label_text_reserved(void *start, void *end)
292 {
293 return 0;
294 }
295
jump_label_lock(void)296 static inline void jump_label_lock(void) {}
jump_label_unlock(void)297 static inline void jump_label_unlock(void) {}
298
jump_label_apply_nops(struct module * mod)299 static inline int jump_label_apply_nops(struct module *mod)
300 {
301 return 0;
302 }
303
static_key_enable(struct static_key * key)304 static inline void static_key_enable(struct static_key *key)
305 {
306 STATIC_KEY_CHECK_USE(key);
307
308 if (atomic_read(&key->enabled) != 0) {
309 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
310 return;
311 }
312 atomic_set(&key->enabled, 1);
313 }
314
static_key_disable(struct static_key * key)315 static inline void static_key_disable(struct static_key *key)
316 {
317 STATIC_KEY_CHECK_USE(key);
318
319 if (atomic_read(&key->enabled) != 1) {
320 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
321 return;
322 }
323 atomic_set(&key->enabled, 0);
324 }
325
326 #define static_key_enable_cpuslocked(k) static_key_enable((k))
327 #define static_key_disable_cpuslocked(k) static_key_disable((k))
328
329 #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
330 #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
331
332 #endif /* CONFIG_JUMP_LABEL */
333
334 #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
335 #define jump_label_enabled static_key_enabled
336
337 /* -------------------------------------------------------------------------- */
338
339 /*
340 * Two type wrappers around static_key, such that we can use compile time
341 * type differentiation to emit the right code.
342 *
343 * All the below code is macros in order to play type games.
344 */
345
346 struct static_key_true {
347 struct static_key key;
348 };
349
350 struct static_key_false {
351 struct static_key key;
352 };
353
354 #define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
355 #define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
356
357 #define DEFINE_STATIC_KEY_TRUE(name) \
358 struct static_key_true name = STATIC_KEY_TRUE_INIT
359
360 #define DEFINE_STATIC_KEY_TRUE_RO(name) \
361 struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
362
363 #define DECLARE_STATIC_KEY_TRUE(name) \
364 extern struct static_key_true name
365
366 #define DEFINE_STATIC_KEY_FALSE(name) \
367 struct static_key_false name = STATIC_KEY_FALSE_INIT
368
369 #define DEFINE_STATIC_KEY_FALSE_RO(name) \
370 struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
371
372 #define DECLARE_STATIC_KEY_FALSE(name) \
373 extern struct static_key_false name
374
375 #define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \
376 struct static_key_true name[count] = { \
377 [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \
378 }
379
380 #define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \
381 struct static_key_false name[count] = { \
382 [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
383 }
384
385 extern bool ____wrong_branch_error(void);
386
387 #define static_key_enabled(x) \
388 ({ \
389 if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \
390 !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
391 !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
392 ____wrong_branch_error(); \
393 static_key_count((struct static_key *)x) > 0; \
394 })
395
396 #ifdef CONFIG_JUMP_LABEL
397
398 /*
399 * Combine the right initial value (type) with the right branch order
400 * to generate the desired result.
401 *
402 *
403 * type\branch| likely (1) | unlikely (0)
404 * -----------+-----------------------+------------------
405 * | |
406 * true (1) | ... | ...
407 * | NOP | JMP L
408 * | <br-stmts> | 1: ...
409 * | L: ... |
410 * | |
411 * | | L: <br-stmts>
412 * | | jmp 1b
413 * | |
414 * -----------+-----------------------+------------------
415 * | |
416 * false (0) | ... | ...
417 * | JMP L | NOP
418 * | <br-stmts> | 1: ...
419 * | L: ... |
420 * | |
421 * | | L: <br-stmts>
422 * | | jmp 1b
423 * | |
424 * -----------+-----------------------+------------------
425 *
426 * The initial value is encoded in the LSB of static_key::entries,
427 * type: 0 = false, 1 = true.
428 *
429 * The branch type is encoded in the LSB of jump_entry::key,
430 * branch: 0 = unlikely, 1 = likely.
431 *
432 * This gives the following logic table:
433 *
434 * enabled type branch instuction
435 * -----------------------------+-----------
436 * 0 0 0 | NOP
437 * 0 0 1 | JMP
438 * 0 1 0 | NOP
439 * 0 1 1 | JMP
440 *
441 * 1 0 0 | JMP
442 * 1 0 1 | NOP
443 * 1 1 0 | JMP
444 * 1 1 1 | NOP
445 *
446 * Which gives the following functions:
447 *
448 * dynamic: instruction = enabled ^ branch
449 * static: instruction = type ^ branch
450 *
451 * See jump_label_type() / jump_label_init_type().
452 */
453
454 #define static_branch_likely(x) \
455 ({ \
456 bool branch; \
457 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
458 branch = !arch_static_branch(&(x)->key, true); \
459 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
460 branch = !arch_static_branch_jump(&(x)->key, true); \
461 else \
462 branch = ____wrong_branch_error(); \
463 likely(branch); \
464 })
465
466 #define static_branch_unlikely(x) \
467 ({ \
468 bool branch; \
469 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
470 branch = arch_static_branch_jump(&(x)->key, false); \
471 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
472 branch = arch_static_branch(&(x)->key, false); \
473 else \
474 branch = ____wrong_branch_error(); \
475 unlikely(branch); \
476 })
477
478 #else /* !CONFIG_JUMP_LABEL */
479
480 #define static_branch_likely(x) likely(static_key_enabled(&(x)->key))
481 #define static_branch_unlikely(x) unlikely(static_key_enabled(&(x)->key))
482
483 #endif /* CONFIG_JUMP_LABEL */
484
485 /*
486 * Advanced usage; refcount, branch is enabled when: count != 0
487 */
488
489 #define static_branch_inc(x) static_key_slow_inc(&(x)->key)
490 #define static_branch_dec(x) static_key_slow_dec(&(x)->key)
491 #define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
492 #define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
493
494 /*
495 * Normal usage; boolean enable/disable.
496 */
497
498 #define static_branch_enable(x) static_key_enable(&(x)->key)
499 #define static_branch_disable(x) static_key_disable(&(x)->key)
500 #define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
501 #define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
502
503 #endif /* __ASSEMBLY__ */
504
505 #endif /* _LINUX_JUMP_LABEL_H */
506