1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * jump label support
4 *
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 * Copyright (C) 2011 Peter Zijlstra
7 *
8 */
9 #include <linux/memory.h>
10 #include <linux/uaccess.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <linux/err.h>
16 #include <linux/static_key.h>
17 #include <linux/jump_label_ratelimit.h>
18 #include <linux/bug.h>
19 #include <linux/cpu.h>
20 #include <asm/sections.h>
21
22 /* mutex to protect coming/going of the jump_label table */
23 static DEFINE_MUTEX(jump_label_mutex);
24
jump_label_lock(void)25 void jump_label_lock(void)
26 {
27 mutex_lock(&jump_label_mutex);
28 }
29
jump_label_unlock(void)30 void jump_label_unlock(void)
31 {
32 mutex_unlock(&jump_label_mutex);
33 }
34
jump_label_cmp(const void * a,const void * b)35 static int jump_label_cmp(const void *a, const void *b)
36 {
37 const struct jump_entry *jea = a;
38 const struct jump_entry *jeb = b;
39
40 /*
41 * Entrires are sorted by key.
42 */
43 if (jump_entry_key(jea) < jump_entry_key(jeb))
44 return -1;
45
46 if (jump_entry_key(jea) > jump_entry_key(jeb))
47 return 1;
48
49 /*
50 * In the batching mode, entries should also be sorted by the code
51 * inside the already sorted list of entries, enabling a bsearch in
52 * the vector.
53 */
54 if (jump_entry_code(jea) < jump_entry_code(jeb))
55 return -1;
56
57 if (jump_entry_code(jea) > jump_entry_code(jeb))
58 return 1;
59
60 return 0;
61 }
62
jump_label_swap(void * a,void * b,int size)63 static void jump_label_swap(void *a, void *b, int size)
64 {
65 long delta = (unsigned long)a - (unsigned long)b;
66 struct jump_entry *jea = a;
67 struct jump_entry *jeb = b;
68 struct jump_entry tmp = *jea;
69
70 jea->code = jeb->code - delta;
71 jea->target = jeb->target - delta;
72 jea->key = jeb->key - delta;
73
74 jeb->code = tmp.code + delta;
75 jeb->target = tmp.target + delta;
76 jeb->key = tmp.key + delta;
77 }
78
79 static void
jump_label_sort_entries(struct jump_entry * start,struct jump_entry * stop)80 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
81 {
82 unsigned long size;
83 void *swapfn = NULL;
84
85 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86 swapfn = jump_label_swap;
87
88 size = (((unsigned long)stop - (unsigned long)start)
89 / sizeof(struct jump_entry));
90 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
91 }
92
93 static void jump_label_update(struct static_key *key);
94
95 /*
96 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97 * The use of 'atomic_read()' requires atomic.h and its problematic for some
98 * kernel headers such as kernel.h and others. Since static_key_count() is not
99 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100 * to have it be a function here. Similarly, for 'static_key_enable()' and
101 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102 * to be included from most/all places for CONFIG_JUMP_LABEL.
103 */
static_key_count(struct static_key * key)104 int static_key_count(struct static_key *key)
105 {
106 /*
107 * -1 means the first static_key_slow_inc() is in progress.
108 * static_key_enabled() must return true, so return 1 here.
109 */
110 int n = atomic_read(&key->enabled);
111
112 return n >= 0 ? n : 1;
113 }
114 EXPORT_SYMBOL_GPL(static_key_count);
115
static_key_slow_inc_cpuslocked(struct static_key * key)116 void static_key_slow_inc_cpuslocked(struct static_key *key)
117 {
118 int v, v1;
119
120 STATIC_KEY_CHECK_USE(key);
121 lockdep_assert_cpus_held();
122
123 /*
124 * Careful if we get concurrent static_key_slow_inc() calls;
125 * later calls must wait for the first one to _finish_ the
126 * jump_label_update() process. At the same time, however,
127 * the jump_label_update() call below wants to see
128 * static_key_enabled(&key) for jumps to be updated properly.
129 *
130 * So give a special meaning to negative key->enabled: it sends
131 * static_key_slow_inc() down the slow path, and it is non-zero
132 * so it counts as "enabled" in jump_label_update(). Note that
133 * atomic_inc_unless_negative() checks >= 0, so roll our own.
134 */
135 for (v = atomic_read(&key->enabled); v > 0; v = v1) {
136 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
137 if (likely(v1 == v))
138 return;
139 }
140
141 jump_label_lock();
142 if (atomic_read(&key->enabled) == 0) {
143 atomic_set(&key->enabled, -1);
144 jump_label_update(key);
145 /*
146 * Ensure that if the above cmpxchg loop observes our positive
147 * value, it must also observe all the text changes.
148 */
149 atomic_set_release(&key->enabled, 1);
150 } else {
151 atomic_inc(&key->enabled);
152 }
153 jump_label_unlock();
154 }
155
static_key_slow_inc(struct static_key * key)156 void static_key_slow_inc(struct static_key *key)
157 {
158 cpus_read_lock();
159 static_key_slow_inc_cpuslocked(key);
160 cpus_read_unlock();
161 }
162 EXPORT_SYMBOL_GPL(static_key_slow_inc);
163
static_key_enable_cpuslocked(struct static_key * key)164 void static_key_enable_cpuslocked(struct static_key *key)
165 {
166 STATIC_KEY_CHECK_USE(key);
167 lockdep_assert_cpus_held();
168
169 if (atomic_read(&key->enabled) > 0) {
170 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
171 return;
172 }
173
174 jump_label_lock();
175 if (atomic_read(&key->enabled) == 0) {
176 atomic_set(&key->enabled, -1);
177 jump_label_update(key);
178 /*
179 * See static_key_slow_inc().
180 */
181 atomic_set_release(&key->enabled, 1);
182 }
183 jump_label_unlock();
184 }
185 EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
186
static_key_enable(struct static_key * key)187 void static_key_enable(struct static_key *key)
188 {
189 cpus_read_lock();
190 static_key_enable_cpuslocked(key);
191 cpus_read_unlock();
192 }
193 EXPORT_SYMBOL_GPL(static_key_enable);
194
static_key_disable_cpuslocked(struct static_key * key)195 void static_key_disable_cpuslocked(struct static_key *key)
196 {
197 STATIC_KEY_CHECK_USE(key);
198 lockdep_assert_cpus_held();
199
200 if (atomic_read(&key->enabled) != 1) {
201 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
202 return;
203 }
204
205 jump_label_lock();
206 if (atomic_cmpxchg(&key->enabled, 1, 0))
207 jump_label_update(key);
208 jump_label_unlock();
209 }
210 EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
211
static_key_disable(struct static_key * key)212 void static_key_disable(struct static_key *key)
213 {
214 cpus_read_lock();
215 static_key_disable_cpuslocked(key);
216 cpus_read_unlock();
217 }
218 EXPORT_SYMBOL_GPL(static_key_disable);
219
static_key_slow_try_dec(struct static_key * key)220 static bool static_key_slow_try_dec(struct static_key *key)
221 {
222 int val;
223
224 val = atomic_fetch_add_unless(&key->enabled, -1, 1);
225 if (val == 1)
226 return false;
227
228 /*
229 * The negative count check is valid even when a negative
230 * key->enabled is in use by static_key_slow_inc(); a
231 * __static_key_slow_dec() before the first static_key_slow_inc()
232 * returns is unbalanced, because all other static_key_slow_inc()
233 * instances block while the update is in progress.
234 */
235 WARN(val < 0, "jump label: negative count!\n");
236 return true;
237 }
238
__static_key_slow_dec_cpuslocked(struct static_key * key)239 static void __static_key_slow_dec_cpuslocked(struct static_key *key)
240 {
241 lockdep_assert_cpus_held();
242
243 if (static_key_slow_try_dec(key))
244 return;
245
246 jump_label_lock();
247 if (atomic_dec_and_test(&key->enabled))
248 jump_label_update(key);
249 jump_label_unlock();
250 }
251
__static_key_slow_dec(struct static_key * key)252 static void __static_key_slow_dec(struct static_key *key)
253 {
254 cpus_read_lock();
255 __static_key_slow_dec_cpuslocked(key);
256 cpus_read_unlock();
257 }
258
jump_label_update_timeout(struct work_struct * work)259 void jump_label_update_timeout(struct work_struct *work)
260 {
261 struct static_key_deferred *key =
262 container_of(work, struct static_key_deferred, work.work);
263 __static_key_slow_dec(&key->key);
264 }
265 EXPORT_SYMBOL_GPL(jump_label_update_timeout);
266
static_key_slow_dec(struct static_key * key)267 void static_key_slow_dec(struct static_key *key)
268 {
269 STATIC_KEY_CHECK_USE(key);
270 __static_key_slow_dec(key);
271 }
272 EXPORT_SYMBOL_GPL(static_key_slow_dec);
273
static_key_slow_dec_cpuslocked(struct static_key * key)274 void static_key_slow_dec_cpuslocked(struct static_key *key)
275 {
276 STATIC_KEY_CHECK_USE(key);
277 __static_key_slow_dec_cpuslocked(key);
278 }
279
__static_key_slow_dec_deferred(struct static_key * key,struct delayed_work * work,unsigned long timeout)280 void __static_key_slow_dec_deferred(struct static_key *key,
281 struct delayed_work *work,
282 unsigned long timeout)
283 {
284 STATIC_KEY_CHECK_USE(key);
285
286 if (static_key_slow_try_dec(key))
287 return;
288
289 schedule_delayed_work(work, timeout);
290 }
291 EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
292
__static_key_deferred_flush(void * key,struct delayed_work * work)293 void __static_key_deferred_flush(void *key, struct delayed_work *work)
294 {
295 STATIC_KEY_CHECK_USE(key);
296 flush_delayed_work(work);
297 }
298 EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
299
jump_label_rate_limit(struct static_key_deferred * key,unsigned long rl)300 void jump_label_rate_limit(struct static_key_deferred *key,
301 unsigned long rl)
302 {
303 STATIC_KEY_CHECK_USE(key);
304 key->timeout = rl;
305 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
306 }
307 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
308
addr_conflict(struct jump_entry * entry,void * start,void * end)309 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
310 {
311 if (jump_entry_code(entry) <= (unsigned long)end &&
312 jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
313 return 1;
314
315 return 0;
316 }
317
__jump_label_text_reserved(struct jump_entry * iter_start,struct jump_entry * iter_stop,void * start,void * end,bool init)318 static int __jump_label_text_reserved(struct jump_entry *iter_start,
319 struct jump_entry *iter_stop, void *start, void *end, bool init)
320 {
321 struct jump_entry *iter;
322
323 iter = iter_start;
324 while (iter < iter_stop) {
325 if (init || !jump_entry_is_init(iter)) {
326 if (addr_conflict(iter, start, end))
327 return 1;
328 }
329 iter++;
330 }
331
332 return 0;
333 }
334
335 /*
336 * Update code which is definitely not currently executing.
337 * Architectures which need heavyweight synchronization to modify
338 * running code can override this to make the non-live update case
339 * cheaper.
340 */
arch_jump_label_transform_static(struct jump_entry * entry,enum jump_label_type type)341 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
342 enum jump_label_type type)
343 {
344 arch_jump_label_transform(entry, type);
345 }
346
static_key_entries(struct static_key * key)347 static inline struct jump_entry *static_key_entries(struct static_key *key)
348 {
349 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
350 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
351 }
352
static_key_type(struct static_key * key)353 static inline bool static_key_type(struct static_key *key)
354 {
355 return key->type & JUMP_TYPE_TRUE;
356 }
357
static_key_linked(struct static_key * key)358 static inline bool static_key_linked(struct static_key *key)
359 {
360 return key->type & JUMP_TYPE_LINKED;
361 }
362
static_key_clear_linked(struct static_key * key)363 static inline void static_key_clear_linked(struct static_key *key)
364 {
365 key->type &= ~JUMP_TYPE_LINKED;
366 }
367
static_key_set_linked(struct static_key * key)368 static inline void static_key_set_linked(struct static_key *key)
369 {
370 key->type |= JUMP_TYPE_LINKED;
371 }
372
373 /***
374 * A 'struct static_key' uses a union such that it either points directly
375 * to a table of 'struct jump_entry' or to a linked list of modules which in
376 * turn point to 'struct jump_entry' tables.
377 *
378 * The two lower bits of the pointer are used to keep track of which pointer
379 * type is in use and to store the initial branch direction, we use an access
380 * function which preserves these bits.
381 */
static_key_set_entries(struct static_key * key,struct jump_entry * entries)382 static void static_key_set_entries(struct static_key *key,
383 struct jump_entry *entries)
384 {
385 unsigned long type;
386
387 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
388 type = key->type & JUMP_TYPE_MASK;
389 key->entries = entries;
390 key->type |= type;
391 }
392
jump_label_type(struct jump_entry * entry)393 static enum jump_label_type jump_label_type(struct jump_entry *entry)
394 {
395 struct static_key *key = jump_entry_key(entry);
396 bool enabled = static_key_enabled(key);
397 bool branch = jump_entry_is_branch(entry);
398
399 /* See the comment in linux/jump_label.h */
400 return enabled ^ branch;
401 }
402
jump_label_can_update(struct jump_entry * entry,bool init)403 static bool jump_label_can_update(struct jump_entry *entry, bool init)
404 {
405 /*
406 * Cannot update code that was in an init text area.
407 */
408 if (!init && jump_entry_is_init(entry))
409 return false;
410
411 if (!kernel_text_address(jump_entry_code(entry))) {
412 /*
413 * This skips patching built-in __exit, which
414 * is part of init_section_contains() but is
415 * not part of kernel_text_address().
416 *
417 * Skipping built-in __exit is fine since it
418 * will never be executed.
419 */
420 WARN_ONCE(!jump_entry_is_init(entry),
421 "can't patch jump_label at %pS",
422 (void *)jump_entry_code(entry));
423 return false;
424 }
425
426 return true;
427 }
428
429 #ifndef HAVE_JUMP_LABEL_BATCH
__jump_label_update(struct static_key * key,struct jump_entry * entry,struct jump_entry * stop,bool init)430 static void __jump_label_update(struct static_key *key,
431 struct jump_entry *entry,
432 struct jump_entry *stop,
433 bool init)
434 {
435 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
436 if (jump_label_can_update(entry, init))
437 arch_jump_label_transform(entry, jump_label_type(entry));
438 }
439 }
440 #else
__jump_label_update(struct static_key * key,struct jump_entry * entry,struct jump_entry * stop,bool init)441 static void __jump_label_update(struct static_key *key,
442 struct jump_entry *entry,
443 struct jump_entry *stop,
444 bool init)
445 {
446 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
447
448 if (!jump_label_can_update(entry, init))
449 continue;
450
451 if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
452 /*
453 * Queue is full: Apply the current queue and try again.
454 */
455 arch_jump_label_transform_apply();
456 BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
457 }
458 }
459 arch_jump_label_transform_apply();
460 }
461 #endif
462
jump_label_init(void)463 void __init jump_label_init(void)
464 {
465 struct jump_entry *iter_start = __start___jump_table;
466 struct jump_entry *iter_stop = __stop___jump_table;
467 struct static_key *key = NULL;
468 struct jump_entry *iter;
469
470 /*
471 * Since we are initializing the static_key.enabled field with
472 * with the 'raw' int values (to avoid pulling in atomic.h) in
473 * jump_label.h, let's make sure that is safe. There are only two
474 * cases to check since we initialize to 0 or 1.
475 */
476 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
477 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
478
479 if (static_key_initialized)
480 return;
481
482 cpus_read_lock();
483 jump_label_lock();
484 jump_label_sort_entries(iter_start, iter_stop);
485
486 for (iter = iter_start; iter < iter_stop; iter++) {
487 struct static_key *iterk;
488
489 /* rewrite NOPs */
490 if (jump_label_type(iter) == JUMP_LABEL_NOP)
491 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
492
493 if (init_section_contains((void *)jump_entry_code(iter), 1))
494 jump_entry_set_init(iter);
495
496 iterk = jump_entry_key(iter);
497 if (iterk == key)
498 continue;
499
500 key = iterk;
501 static_key_set_entries(key, iter);
502 }
503 static_key_initialized = true;
504 jump_label_unlock();
505 cpus_read_unlock();
506 }
507
508 #ifdef CONFIG_MODULES
509
jump_label_init_type(struct jump_entry * entry)510 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
511 {
512 struct static_key *key = jump_entry_key(entry);
513 bool type = static_key_type(key);
514 bool branch = jump_entry_is_branch(entry);
515
516 /* See the comment in linux/jump_label.h */
517 return type ^ branch;
518 }
519
520 struct static_key_mod {
521 struct static_key_mod *next;
522 struct jump_entry *entries;
523 struct module *mod;
524 };
525
static_key_mod(struct static_key * key)526 static inline struct static_key_mod *static_key_mod(struct static_key *key)
527 {
528 WARN_ON_ONCE(!static_key_linked(key));
529 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
530 }
531
532 /***
533 * key->type and key->next are the same via union.
534 * This sets key->next and preserves the type bits.
535 *
536 * See additional comments above static_key_set_entries().
537 */
static_key_set_mod(struct static_key * key,struct static_key_mod * mod)538 static void static_key_set_mod(struct static_key *key,
539 struct static_key_mod *mod)
540 {
541 unsigned long type;
542
543 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
544 type = key->type & JUMP_TYPE_MASK;
545 key->next = mod;
546 key->type |= type;
547 }
548
__jump_label_mod_text_reserved(void * start,void * end)549 static int __jump_label_mod_text_reserved(void *start, void *end)
550 {
551 struct module *mod;
552 int ret;
553
554 preempt_disable();
555 mod = __module_text_address((unsigned long)start);
556 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
557 if (!try_module_get(mod))
558 mod = NULL;
559 preempt_enable();
560
561 if (!mod)
562 return 0;
563
564 ret = __jump_label_text_reserved(mod->jump_entries,
565 mod->jump_entries + mod->num_jump_entries,
566 start, end, mod->state == MODULE_STATE_COMING);
567
568 module_put(mod);
569
570 return ret;
571 }
572
__jump_label_mod_update(struct static_key * key)573 static void __jump_label_mod_update(struct static_key *key)
574 {
575 struct static_key_mod *mod;
576
577 for (mod = static_key_mod(key); mod; mod = mod->next) {
578 struct jump_entry *stop;
579 struct module *m;
580
581 /*
582 * NULL if the static_key is defined in a module
583 * that does not use it
584 */
585 if (!mod->entries)
586 continue;
587
588 m = mod->mod;
589 if (!m)
590 stop = __stop___jump_table;
591 else
592 stop = m->jump_entries + m->num_jump_entries;
593 __jump_label_update(key, mod->entries, stop,
594 m && m->state == MODULE_STATE_COMING);
595 }
596 }
597
598 /***
599 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
600 * @mod: module to patch
601 *
602 * Allow for run-time selection of the optimal nops. Before the module
603 * loads patch these with arch_get_jump_label_nop(), which is specified by
604 * the arch specific jump label code.
605 */
jump_label_apply_nops(struct module * mod)606 void jump_label_apply_nops(struct module *mod)
607 {
608 struct jump_entry *iter_start = mod->jump_entries;
609 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
610 struct jump_entry *iter;
611
612 /* if the module doesn't have jump label entries, just return */
613 if (iter_start == iter_stop)
614 return;
615
616 for (iter = iter_start; iter < iter_stop; iter++) {
617 /* Only write NOPs for arch_branch_static(). */
618 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
619 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
620 }
621 }
622
jump_label_add_module(struct module * mod)623 static int jump_label_add_module(struct module *mod)
624 {
625 struct jump_entry *iter_start = mod->jump_entries;
626 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
627 struct jump_entry *iter;
628 struct static_key *key = NULL;
629 struct static_key_mod *jlm, *jlm2;
630
631 /* if the module doesn't have jump label entries, just return */
632 if (iter_start == iter_stop)
633 return 0;
634
635 jump_label_sort_entries(iter_start, iter_stop);
636
637 for (iter = iter_start; iter < iter_stop; iter++) {
638 struct static_key *iterk;
639
640 if (within_module_init(jump_entry_code(iter), mod))
641 jump_entry_set_init(iter);
642
643 iterk = jump_entry_key(iter);
644 if (iterk == key)
645 continue;
646
647 key = iterk;
648 if (within_module((unsigned long)key, mod)) {
649 static_key_set_entries(key, iter);
650 continue;
651 }
652 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
653 if (!jlm)
654 return -ENOMEM;
655 if (!static_key_linked(key)) {
656 jlm2 = kzalloc(sizeof(struct static_key_mod),
657 GFP_KERNEL);
658 if (!jlm2) {
659 kfree(jlm);
660 return -ENOMEM;
661 }
662 preempt_disable();
663 jlm2->mod = __module_address((unsigned long)key);
664 preempt_enable();
665 jlm2->entries = static_key_entries(key);
666 jlm2->next = NULL;
667 static_key_set_mod(key, jlm2);
668 static_key_set_linked(key);
669 }
670 jlm->mod = mod;
671 jlm->entries = iter;
672 jlm->next = static_key_mod(key);
673 static_key_set_mod(key, jlm);
674 static_key_set_linked(key);
675
676 /* Only update if we've changed from our initial state */
677 if (jump_label_type(iter) != jump_label_init_type(iter))
678 __jump_label_update(key, iter, iter_stop, true);
679 }
680
681 return 0;
682 }
683
jump_label_del_module(struct module * mod)684 static void jump_label_del_module(struct module *mod)
685 {
686 struct jump_entry *iter_start = mod->jump_entries;
687 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
688 struct jump_entry *iter;
689 struct static_key *key = NULL;
690 struct static_key_mod *jlm, **prev;
691
692 for (iter = iter_start; iter < iter_stop; iter++) {
693 if (jump_entry_key(iter) == key)
694 continue;
695
696 key = jump_entry_key(iter);
697
698 if (within_module((unsigned long)key, mod))
699 continue;
700
701 /* No memory during module load */
702 if (WARN_ON(!static_key_linked(key)))
703 continue;
704
705 prev = &key->next;
706 jlm = static_key_mod(key);
707
708 while (jlm && jlm->mod != mod) {
709 prev = &jlm->next;
710 jlm = jlm->next;
711 }
712
713 /* No memory during module load */
714 if (WARN_ON(!jlm))
715 continue;
716
717 if (prev == &key->next)
718 static_key_set_mod(key, jlm->next);
719 else
720 *prev = jlm->next;
721
722 kfree(jlm);
723
724 jlm = static_key_mod(key);
725 /* if only one etry is left, fold it back into the static_key */
726 if (jlm->next == NULL) {
727 static_key_set_entries(key, jlm->entries);
728 static_key_clear_linked(key);
729 kfree(jlm);
730 }
731 }
732 }
733
734 static int
jump_label_module_notify(struct notifier_block * self,unsigned long val,void * data)735 jump_label_module_notify(struct notifier_block *self, unsigned long val,
736 void *data)
737 {
738 struct module *mod = data;
739 int ret = 0;
740
741 cpus_read_lock();
742 jump_label_lock();
743
744 switch (val) {
745 case MODULE_STATE_COMING:
746 ret = jump_label_add_module(mod);
747 if (ret) {
748 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
749 jump_label_del_module(mod);
750 }
751 break;
752 case MODULE_STATE_GOING:
753 jump_label_del_module(mod);
754 break;
755 }
756
757 jump_label_unlock();
758 cpus_read_unlock();
759
760 return notifier_from_errno(ret);
761 }
762
763 static struct notifier_block jump_label_module_nb = {
764 .notifier_call = jump_label_module_notify,
765 .priority = 1, /* higher than tracepoints */
766 };
767
jump_label_init_module(void)768 static __init int jump_label_init_module(void)
769 {
770 return register_module_notifier(&jump_label_module_nb);
771 }
772 early_initcall(jump_label_init_module);
773
774 #endif /* CONFIG_MODULES */
775
776 /***
777 * jump_label_text_reserved - check if addr range is reserved
778 * @start: start text addr
779 * @end: end text addr
780 *
781 * checks if the text addr located between @start and @end
782 * overlaps with any of the jump label patch addresses. Code
783 * that wants to modify kernel text should first verify that
784 * it does not overlap with any of the jump label addresses.
785 * Caller must hold jump_label_mutex.
786 *
787 * returns 1 if there is an overlap, 0 otherwise
788 */
jump_label_text_reserved(void * start,void * end)789 int jump_label_text_reserved(void *start, void *end)
790 {
791 bool init = system_state < SYSTEM_RUNNING;
792 int ret = __jump_label_text_reserved(__start___jump_table,
793 __stop___jump_table, start, end, init);
794
795 if (ret)
796 return ret;
797
798 #ifdef CONFIG_MODULES
799 ret = __jump_label_mod_text_reserved(start, end);
800 #endif
801 return ret;
802 }
803
jump_label_update(struct static_key * key)804 static void jump_label_update(struct static_key *key)
805 {
806 struct jump_entry *stop = __stop___jump_table;
807 struct jump_entry *entry;
808 #ifdef CONFIG_MODULES
809 struct module *mod;
810
811 if (static_key_linked(key)) {
812 __jump_label_mod_update(key);
813 return;
814 }
815
816 preempt_disable();
817 mod = __module_address((unsigned long)key);
818 if (mod)
819 stop = mod->jump_entries + mod->num_jump_entries;
820 preempt_enable();
821 #endif
822 entry = static_key_entries(key);
823 /* if there are no users, entry can be NULL */
824 if (entry)
825 __jump_label_update(key, entry, stop,
826 system_state < SYSTEM_RUNNING);
827 }
828
829 #ifdef CONFIG_STATIC_KEYS_SELFTEST
830 static DEFINE_STATIC_KEY_TRUE(sk_true);
831 static DEFINE_STATIC_KEY_FALSE(sk_false);
832
jump_label_test(void)833 static __init int jump_label_test(void)
834 {
835 int i;
836
837 for (i = 0; i < 2; i++) {
838 WARN_ON(static_key_enabled(&sk_true.key) != true);
839 WARN_ON(static_key_enabled(&sk_false.key) != false);
840
841 WARN_ON(!static_branch_likely(&sk_true));
842 WARN_ON(!static_branch_unlikely(&sk_true));
843 WARN_ON(static_branch_likely(&sk_false));
844 WARN_ON(static_branch_unlikely(&sk_false));
845
846 static_branch_disable(&sk_true);
847 static_branch_enable(&sk_false);
848
849 WARN_ON(static_key_enabled(&sk_true.key) == true);
850 WARN_ON(static_key_enabled(&sk_false.key) == false);
851
852 WARN_ON(static_branch_likely(&sk_true));
853 WARN_ON(static_branch_unlikely(&sk_true));
854 WARN_ON(!static_branch_likely(&sk_false));
855 WARN_ON(!static_branch_unlikely(&sk_false));
856
857 static_branch_enable(&sk_true);
858 static_branch_disable(&sk_false);
859 }
860
861 return 0;
862 }
863 early_initcall(jump_label_test);
864 #endif /* STATIC_KEYS_SELFTEST */
865