1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_JUMP_LABEL_H
3 #define _LINUX_JUMP_LABEL_H
4
5 /*
6 * Jump label support
7 *
8 * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
9 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra
10 *
11 * DEPRECATED API:
12 *
13 * The use of 'struct static_key' directly, is now DEPRECATED. In addition
14 * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following:
15 *
16 * struct static_key false = STATIC_KEY_INIT_FALSE;
17 * struct static_key true = STATIC_KEY_INIT_TRUE;
18 * static_key_true()
19 * static_key_false()
20 *
21 * The updated API replacements are:
22 *
23 * DEFINE_STATIC_KEY_TRUE(key);
24 * DEFINE_STATIC_KEY_FALSE(key);
25 * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
26 * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
27 * static_branch_likely()
28 * static_branch_unlikely()
29 *
30 * Jump labels provide an interface to generate dynamic branches using
31 * self-modifying code. Assuming toolchain and architecture support, if we
32 * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)",
33 * an "if (static_branch_unlikely(&key))" statement is an unconditional branch
34 * (which defaults to false - and the true block is placed out of line).
35 * Similarly, we can define an initially true key via
36 * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same
37 * "if (static_branch_unlikely(&key))", in which case we will generate an
38 * unconditional branch to the out-of-line true branch. Keys that are
39 * initially true or false can be using in both static_branch_unlikely()
40 * and static_branch_likely() statements.
41 *
42 * At runtime we can change the branch target by setting the key
43 * to true via a call to static_branch_enable(), or false using
44 * static_branch_disable(). If the direction of the branch is switched by
45 * these calls then we run-time modify the branch target via a
46 * no-op -> jump or jump -> no-op conversion. For example, for an
47 * initially false key that is used in an "if (static_branch_unlikely(&key))"
48 * statement, setting the key to true requires us to patch in a jump
49 * to the out-of-line of true branch.
50 *
51 * In addition to static_branch_{enable,disable}, we can also reference count
52 * the key or branch direction via static_branch_{inc,dec}. Thus,
53 * static_branch_inc() can be thought of as a 'make more true' and
54 * static_branch_dec() as a 'make more false'.
55 *
56 * Since this relies on modifying code, the branch modifying functions
57 * must be considered absolute slow paths (machine wide synchronization etc.).
58 * OTOH, since the affected branches are unconditional, their runtime overhead
59 * will be absolutely minimal, esp. in the default (off) case where the total
60 * effect is a single NOP of appropriate size. The on case will patch in a jump
61 * to the out-of-line block.
62 *
63 * When the control is directly exposed to userspace, it is prudent to delay the
64 * decrement to avoid high frequency code modifications which can (and do)
65 * cause significant performance degradation. Struct static_key_deferred and
66 * static_key_slow_dec_deferred() provide for this.
67 *
68 * Lacking toolchain and or architecture support, static keys fall back to a
69 * simple conditional branch.
70 *
71 * Additional babbling in: Documentation/staging/static-keys.rst
72 */
73
74 #ifndef __ASSEMBLY__
75
76 #include <linux/types.h>
77 #include <linux/compiler.h>
78
79 extern bool static_key_initialized;
80
81 #define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \
82 "%s(): static key '%pS' used before call to jump_label_init()", \
83 __func__, (key))
84
85 struct static_key {
86 atomic_t enabled;
87 #ifdef CONFIG_JUMP_LABEL
88 /*
89 * Note:
90 * To make anonymous unions work with old compilers, the static
91 * initialization of them requires brackets. This creates a dependency
92 * on the order of the struct with the initializers. If any fields
93 * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need
94 * to be modified.
95 *
96 * bit 0 => 1 if key is initially true
97 * 0 if initially false
98 * bit 1 => 1 if points to struct static_key_mod
99 * 0 if points to struct jump_entry
100 */
101 union {
102 unsigned long type;
103 struct jump_entry *entries;
104 struct static_key_mod *next;
105 };
106 #endif /* CONFIG_JUMP_LABEL */
107 };
108
109 #endif /* __ASSEMBLY__ */
110
111 #if defined(CONFIG_JUMP_LABEL) && !defined(BUILD_FIPS140_KO)
112 #include <asm/jump_label.h>
113
114 #ifndef __ASSEMBLY__
115 #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE
116
117 struct jump_entry {
118 s32 code;
119 s32 target;
120 long key; // key may be far away from the core kernel under KASLR
121 };
122
jump_entry_code(const struct jump_entry * entry)123 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
124 {
125 return (unsigned long)&entry->code + entry->code;
126 }
127
jump_entry_target(const struct jump_entry * entry)128 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
129 {
130 return (unsigned long)&entry->target + entry->target;
131 }
132
jump_entry_key(const struct jump_entry * entry)133 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
134 {
135 long offset = entry->key & ~3L;
136
137 return (struct static_key *)((unsigned long)&entry->key + offset);
138 }
139
140 #else
141
jump_entry_code(const struct jump_entry * entry)142 static inline unsigned long jump_entry_code(const struct jump_entry *entry)
143 {
144 return entry->code;
145 }
146
jump_entry_target(const struct jump_entry * entry)147 static inline unsigned long jump_entry_target(const struct jump_entry *entry)
148 {
149 return entry->target;
150 }
151
jump_entry_key(const struct jump_entry * entry)152 static inline struct static_key *jump_entry_key(const struct jump_entry *entry)
153 {
154 return (struct static_key *)((unsigned long)entry->key & ~3UL);
155 }
156
157 #endif
158
jump_entry_is_branch(const struct jump_entry * entry)159 static inline bool jump_entry_is_branch(const struct jump_entry *entry)
160 {
161 return (unsigned long)entry->key & 1UL;
162 }
163
jump_entry_is_init(const struct jump_entry * entry)164 static inline bool jump_entry_is_init(const struct jump_entry *entry)
165 {
166 return (unsigned long)entry->key & 2UL;
167 }
168
jump_entry_set_init(struct jump_entry * entry,bool set)169 static inline void jump_entry_set_init(struct jump_entry *entry, bool set)
170 {
171 if (set)
172 entry->key |= 2;
173 else
174 entry->key &= ~2;
175 }
176
jump_entry_size(struct jump_entry * entry)177 static inline int jump_entry_size(struct jump_entry *entry)
178 {
179 #ifdef JUMP_LABEL_NOP_SIZE
180 return JUMP_LABEL_NOP_SIZE;
181 #else
182 return arch_jump_entry_size(entry);
183 #endif
184 }
185
186 #endif
187 #endif
188
189 #ifndef __ASSEMBLY__
190
191 enum jump_label_type {
192 JUMP_LABEL_NOP = 0,
193 JUMP_LABEL_JMP,
194 };
195
196 struct module;
197
198 #ifdef BUILD_FIPS140_KO
199
200 #include <linux/atomic.h>
201
static_key_count(struct static_key * key)202 static inline int static_key_count(struct static_key *key)
203 {
204 return raw_atomic_read(&key->enabled);
205 }
206
static_key_false(struct static_key * key)207 static __always_inline bool static_key_false(struct static_key *key)
208 {
209 if (unlikely(static_key_count(key) > 0))
210 return true;
211 return false;
212 }
213
static_key_true(struct static_key * key)214 static __always_inline bool static_key_true(struct static_key *key)
215 {
216 if (likely(static_key_count(key) > 0))
217 return true;
218 return false;
219 }
220
221 #elif defined(CONFIG_JUMP_LABEL)
222
223 #define JUMP_TYPE_FALSE 0UL
224 #define JUMP_TYPE_TRUE 1UL
225 #define JUMP_TYPE_LINKED 2UL
226 #define JUMP_TYPE_MASK 3UL
227
static_key_false(struct static_key * key)228 static __always_inline bool static_key_false(struct static_key *key)
229 {
230 return arch_static_branch(key, false);
231 }
232
static_key_true(struct static_key * key)233 static __always_inline bool static_key_true(struct static_key *key)
234 {
235 return !arch_static_branch(key, true);
236 }
237
238 extern struct jump_entry __start___jump_table[];
239 extern struct jump_entry __stop___jump_table[];
240
241 extern void jump_label_init(void);
242 extern void jump_label_lock(void);
243 extern void jump_label_unlock(void);
244 extern void arch_jump_label_transform(struct jump_entry *entry,
245 enum jump_label_type type);
246 extern bool arch_jump_label_transform_queue(struct jump_entry *entry,
247 enum jump_label_type type);
248 extern void arch_jump_label_transform_apply(void);
249 extern int jump_label_text_reserved(void *start, void *end);
250 extern bool static_key_slow_inc(struct static_key *key);
251 extern bool static_key_fast_inc_not_disabled(struct static_key *key);
252 extern void static_key_slow_dec(struct static_key *key);
253 extern bool static_key_slow_inc_cpuslocked(struct static_key *key);
254 extern void static_key_slow_dec_cpuslocked(struct static_key *key);
255 extern int static_key_count(struct static_key *key);
256 extern void static_key_enable(struct static_key *key);
257 extern void static_key_disable(struct static_key *key);
258 extern void static_key_enable_cpuslocked(struct static_key *key);
259 extern void static_key_disable_cpuslocked(struct static_key *key);
260 extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
261
262 /*
263 * We should be using ATOMIC_INIT() for initializing .enabled, but
264 * the inclusion of atomic.h is problematic for inclusion of jump_label.h
265 * in 'low-level' headers. Thus, we are initializing .enabled with a
266 * raw value, but have added a BUILD_BUG_ON() to catch any issues in
267 * jump_label_init() see: kernel/jump_label.c.
268 */
269 #define STATIC_KEY_INIT_TRUE \
270 { .enabled = { 1 }, \
271 { .type = JUMP_TYPE_TRUE } }
272 #define STATIC_KEY_INIT_FALSE \
273 { .enabled = { 0 }, \
274 { .type = JUMP_TYPE_FALSE } }
275
276 #else /* !CONFIG_JUMP_LABEL */
277
278 #include <linux/atomic.h>
279 #include <linux/bug.h>
280
static_key_count(struct static_key * key)281 static __always_inline int static_key_count(struct static_key *key)
282 {
283 return raw_atomic_read(&key->enabled);
284 }
285
jump_label_init(void)286 static __always_inline void jump_label_init(void)
287 {
288 static_key_initialized = true;
289 }
290
static_key_false(struct static_key * key)291 static __always_inline bool static_key_false(struct static_key *key)
292 {
293 if (unlikely_notrace(static_key_count(key) > 0))
294 return true;
295 return false;
296 }
297
static_key_true(struct static_key * key)298 static __always_inline bool static_key_true(struct static_key *key)
299 {
300 if (likely_notrace(static_key_count(key) > 0))
301 return true;
302 return false;
303 }
304
static_key_fast_inc_not_disabled(struct static_key * key)305 static inline bool static_key_fast_inc_not_disabled(struct static_key *key)
306 {
307 int v;
308
309 STATIC_KEY_CHECK_USE(key);
310 /*
311 * Prevent key->enabled getting negative to follow the same semantics
312 * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment.
313 */
314 v = atomic_read(&key->enabled);
315 do {
316 if (v < 0 || (v + 1) < 0)
317 return false;
318 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
319 return true;
320 }
321 #define static_key_slow_inc(key) static_key_fast_inc_not_disabled(key)
322
static_key_slow_dec(struct static_key * key)323 static inline void static_key_slow_dec(struct static_key *key)
324 {
325 STATIC_KEY_CHECK_USE(key);
326 atomic_dec(&key->enabled);
327 }
328
329 #define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key)
330 #define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key)
331
jump_label_text_reserved(void * start,void * end)332 static inline int jump_label_text_reserved(void *start, void *end)
333 {
334 return 0;
335 }
336
jump_label_lock(void)337 static inline void jump_label_lock(void) {}
jump_label_unlock(void)338 static inline void jump_label_unlock(void) {}
339
static_key_enable(struct static_key * key)340 static inline void static_key_enable(struct static_key *key)
341 {
342 STATIC_KEY_CHECK_USE(key);
343
344 if (atomic_read(&key->enabled) != 0) {
345 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
346 return;
347 }
348 atomic_set(&key->enabled, 1);
349 }
350
static_key_disable(struct static_key * key)351 static inline void static_key_disable(struct static_key *key)
352 {
353 STATIC_KEY_CHECK_USE(key);
354
355 if (atomic_read(&key->enabled) != 1) {
356 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
357 return;
358 }
359 atomic_set(&key->enabled, 0);
360 }
361
362 #define static_key_enable_cpuslocked(k) static_key_enable((k))
363 #define static_key_disable_cpuslocked(k) static_key_disable((k))
364
365 #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
366 #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
367
368 #endif /* CONFIG_JUMP_LABEL */
369
370 #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
371 #define jump_label_enabled static_key_enabled
372
373 /* -------------------------------------------------------------------------- */
374
375 /*
376 * Two type wrappers around static_key, such that we can use compile time
377 * type differentiation to emit the right code.
378 *
379 * All the below code is macros in order to play type games.
380 */
381
382 struct static_key_true {
383 struct static_key key;
384 };
385
386 struct static_key_false {
387 struct static_key key;
388 };
389
390 #define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, }
391 #define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, }
392
393 #define DEFINE_STATIC_KEY_TRUE(name) \
394 struct static_key_true name = STATIC_KEY_TRUE_INIT
395
396 #define DEFINE_STATIC_KEY_TRUE_RO(name) \
397 struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
398
399 #define DECLARE_STATIC_KEY_TRUE(name) \
400 extern struct static_key_true name
401
402 #define DEFINE_STATIC_KEY_FALSE(name) \
403 struct static_key_false name = STATIC_KEY_FALSE_INIT
404
405 #define DEFINE_STATIC_KEY_FALSE_RO(name) \
406 struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
407
408 #define DECLARE_STATIC_KEY_FALSE(name) \
409 extern struct static_key_false name
410
411 #define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \
412 struct static_key_true name[count] = { \
413 [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \
414 }
415
416 #define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \
417 struct static_key_false name[count] = { \
418 [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
419 }
420
421 #define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name)
422 #define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name)
423 #define DEFINE_STATIC_KEY_MAYBE(cfg, name) \
424 __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name)
425
426 #define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name)
427 #define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name)
428 #define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \
429 __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name)
430
431 #define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name)
432 #define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name)
433 #define DECLARE_STATIC_KEY_MAYBE(cfg, name) \
434 __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name)
435
436 extern bool ____wrong_branch_error(void);
437
438 #define static_key_enabled(x) \
439 ({ \
440 if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \
441 !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\
442 !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
443 ____wrong_branch_error(); \
444 static_key_count((struct static_key *)x) > 0; \
445 })
446
447 #if defined(CONFIG_JUMP_LABEL) && !defined(BUILD_FIPS140_KO)
448
449 /*
450 * Combine the right initial value (type) with the right branch order
451 * to generate the desired result.
452 *
453 *
454 * type\branch| likely (1) | unlikely (0)
455 * -----------+-----------------------+------------------
456 * | |
457 * true (1) | ... | ...
458 * | NOP | JMP L
459 * | <br-stmts> | 1: ...
460 * | L: ... |
461 * | |
462 * | | L: <br-stmts>
463 * | | jmp 1b
464 * | |
465 * -----------+-----------------------+------------------
466 * | |
467 * false (0) | ... | ...
468 * | JMP L | NOP
469 * | <br-stmts> | 1: ...
470 * | L: ... |
471 * | |
472 * | | L: <br-stmts>
473 * | | jmp 1b
474 * | |
475 * -----------+-----------------------+------------------
476 *
477 * The initial value is encoded in the LSB of static_key::entries,
478 * type: 0 = false, 1 = true.
479 *
480 * The branch type is encoded in the LSB of jump_entry::key,
481 * branch: 0 = unlikely, 1 = likely.
482 *
483 * This gives the following logic table:
484 *
485 * enabled type branch instuction
486 * -----------------------------+-----------
487 * 0 0 0 | NOP
488 * 0 0 1 | JMP
489 * 0 1 0 | NOP
490 * 0 1 1 | JMP
491 *
492 * 1 0 0 | JMP
493 * 1 0 1 | NOP
494 * 1 1 0 | JMP
495 * 1 1 1 | NOP
496 *
497 * Which gives the following functions:
498 *
499 * dynamic: instruction = enabled ^ branch
500 * static: instruction = type ^ branch
501 *
502 * See jump_label_type() / jump_label_init_type().
503 */
504
505 #define static_branch_likely(x) \
506 ({ \
507 bool branch; \
508 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
509 branch = !arch_static_branch(&(x)->key, true); \
510 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
511 branch = !arch_static_branch_jump(&(x)->key, true); \
512 else \
513 branch = ____wrong_branch_error(); \
514 likely_notrace(branch); \
515 })
516
517 #define static_branch_unlikely(x) \
518 ({ \
519 bool branch; \
520 if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \
521 branch = arch_static_branch_jump(&(x)->key, false); \
522 else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \
523 branch = arch_static_branch(&(x)->key, false); \
524 else \
525 branch = ____wrong_branch_error(); \
526 unlikely_notrace(branch); \
527 })
528
529 #else /* !CONFIG_JUMP_LABEL */
530
531 #define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key))
532 #define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key))
533
534 #endif /* CONFIG_JUMP_LABEL */
535
536 #define static_branch_maybe(config, x) \
537 (IS_ENABLED(config) ? static_branch_likely(x) \
538 : static_branch_unlikely(x))
539
540 /*
541 * Advanced usage; refcount, branch is enabled when: count != 0
542 */
543
544 #define static_branch_inc(x) static_key_slow_inc(&(x)->key)
545 #define static_branch_dec(x) static_key_slow_dec(&(x)->key)
546 #define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key)
547 #define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key)
548
549 /*
550 * Normal usage; boolean enable/disable.
551 */
552
553 #define static_branch_enable(x) static_key_enable(&(x)->key)
554 #define static_branch_disable(x) static_key_disable(&(x)->key)
555 #define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
556 #define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
557
558 #endif /* __ASSEMBLY__ */
559
560 #endif /* _LINUX_JUMP_LABEL_H */
561