• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 #include <linux/jump_label_ratelimit.h>
17 
18 #ifdef HAVE_JUMP_LABEL
19 
20 /* mutex to protect coming/going of the the jump_label table */
21 static DEFINE_MUTEX(jump_label_mutex);
22 
jump_label_lock(void)23 void jump_label_lock(void)
24 {
25 	mutex_lock(&jump_label_mutex);
26 }
27 
jump_label_unlock(void)28 void jump_label_unlock(void)
29 {
30 	mutex_unlock(&jump_label_mutex);
31 }
32 
jump_label_cmp(const void * a,const void * b)33 static int jump_label_cmp(const void *a, const void *b)
34 {
35 	const struct jump_entry *jea = a;
36 	const struct jump_entry *jeb = b;
37 
38 	if (jea->key < jeb->key)
39 		return -1;
40 
41 	if (jea->key > jeb->key)
42 		return 1;
43 
44 	return 0;
45 }
46 
47 static void
jump_label_sort_entries(struct jump_entry * start,struct jump_entry * stop)48 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
49 {
50 	unsigned long size;
51 
52 	size = (((unsigned long)stop - (unsigned long)start)
53 					/ sizeof(struct jump_entry));
54 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
55 }
56 
57 static void jump_label_update(struct static_key *key);
58 
static_key_slow_inc(struct static_key * key)59 void static_key_slow_inc(struct static_key *key)
60 {
61 	int v, v1;
62 
63 	STATIC_KEY_CHECK_USE();
64 
65 	/*
66 	 * Careful if we get concurrent static_key_slow_inc() calls;
67 	 * later calls must wait for the first one to _finish_ the
68 	 * jump_label_update() process.  At the same time, however,
69 	 * the jump_label_update() call below wants to see
70 	 * static_key_enabled(&key) for jumps to be updated properly.
71 	 *
72 	 * So give a special meaning to negative key->enabled: it sends
73 	 * static_key_slow_inc() down the slow path, and it is non-zero
74 	 * so it counts as "enabled" in jump_label_update().  Note that
75 	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
76 	 */
77 	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
78 		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
79 		if (likely(v1 == v))
80 			return;
81 	}
82 
83 	jump_label_lock();
84 	if (atomic_read(&key->enabled) == 0) {
85 		atomic_set(&key->enabled, -1);
86 		jump_label_update(key);
87 		atomic_set(&key->enabled, 1);
88 	} else {
89 		atomic_inc(&key->enabled);
90 	}
91 	jump_label_unlock();
92 }
93 EXPORT_SYMBOL_GPL(static_key_slow_inc);
94 
__static_key_slow_dec(struct static_key * key,unsigned long rate_limit,struct delayed_work * work)95 static void __static_key_slow_dec(struct static_key *key,
96 		unsigned long rate_limit, struct delayed_work *work)
97 {
98 	/*
99 	 * The negative count check is valid even when a negative
100 	 * key->enabled is in use by static_key_slow_inc(); a
101 	 * __static_key_slow_dec() before the first static_key_slow_inc()
102 	 * returns is unbalanced, because all other static_key_slow_inc()
103 	 * instances block while the update is in progress.
104 	 */
105 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
106 		WARN(atomic_read(&key->enabled) < 0,
107 		     "jump label: negative count!\n");
108 		return;
109 	}
110 
111 	if (rate_limit) {
112 		atomic_inc(&key->enabled);
113 		schedule_delayed_work(work, rate_limit);
114 	} else {
115 		jump_label_update(key);
116 	}
117 	jump_label_unlock();
118 }
119 
jump_label_update_timeout(struct work_struct * work)120 static void jump_label_update_timeout(struct work_struct *work)
121 {
122 	struct static_key_deferred *key =
123 		container_of(work, struct static_key_deferred, work.work);
124 	__static_key_slow_dec(&key->key, 0, NULL);
125 }
126 
static_key_slow_dec(struct static_key * key)127 void static_key_slow_dec(struct static_key *key)
128 {
129 	STATIC_KEY_CHECK_USE();
130 	__static_key_slow_dec(key, 0, NULL);
131 }
132 EXPORT_SYMBOL_GPL(static_key_slow_dec);
133 
static_key_slow_dec_deferred(struct static_key_deferred * key)134 void static_key_slow_dec_deferred(struct static_key_deferred *key)
135 {
136 	STATIC_KEY_CHECK_USE();
137 	__static_key_slow_dec(&key->key, key->timeout, &key->work);
138 }
139 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
140 
static_key_deferred_flush(struct static_key_deferred * key)141 void static_key_deferred_flush(struct static_key_deferred *key)
142 {
143 	STATIC_KEY_CHECK_USE();
144 	flush_delayed_work(&key->work);
145 }
146 EXPORT_SYMBOL_GPL(static_key_deferred_flush);
147 
jump_label_rate_limit(struct static_key_deferred * key,unsigned long rl)148 void jump_label_rate_limit(struct static_key_deferred *key,
149 		unsigned long rl)
150 {
151 	STATIC_KEY_CHECK_USE();
152 	key->timeout = rl;
153 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
154 }
155 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
156 
addr_conflict(struct jump_entry * entry,void * start,void * end)157 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
158 {
159 	if (entry->code <= (unsigned long)end &&
160 		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
161 		return 1;
162 
163 	return 0;
164 }
165 
__jump_label_text_reserved(struct jump_entry * iter_start,struct jump_entry * iter_stop,void * start,void * end)166 static int __jump_label_text_reserved(struct jump_entry *iter_start,
167 		struct jump_entry *iter_stop, void *start, void *end)
168 {
169 	struct jump_entry *iter;
170 
171 	iter = iter_start;
172 	while (iter < iter_stop) {
173 		if (addr_conflict(iter, start, end))
174 			return 1;
175 		iter++;
176 	}
177 
178 	return 0;
179 }
180 
181 /*
182  * Update code which is definitely not currently executing.
183  * Architectures which need heavyweight synchronization to modify
184  * running code can override this to make the non-live update case
185  * cheaper.
186  */
arch_jump_label_transform_static(struct jump_entry * entry,enum jump_label_type type)187 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
188 					    enum jump_label_type type)
189 {
190 	arch_jump_label_transform(entry, type);
191 }
192 
static_key_entries(struct static_key * key)193 static inline struct jump_entry *static_key_entries(struct static_key *key)
194 {
195 	return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
196 }
197 
static_key_type(struct static_key * key)198 static inline bool static_key_type(struct static_key *key)
199 {
200 	return (unsigned long)key->entries & JUMP_TYPE_MASK;
201 }
202 
jump_entry_key(struct jump_entry * entry)203 static inline struct static_key *jump_entry_key(struct jump_entry *entry)
204 {
205 	return (struct static_key *)((unsigned long)entry->key & ~1UL);
206 }
207 
jump_entry_branch(struct jump_entry * entry)208 static bool jump_entry_branch(struct jump_entry *entry)
209 {
210 	return (unsigned long)entry->key & 1UL;
211 }
212 
jump_label_type(struct jump_entry * entry)213 static enum jump_label_type jump_label_type(struct jump_entry *entry)
214 {
215 	struct static_key *key = jump_entry_key(entry);
216 	bool enabled = static_key_enabled(key);
217 	bool branch = jump_entry_branch(entry);
218 
219 	/* See the comment in linux/jump_label.h */
220 	return enabled ^ branch;
221 }
222 
__jump_label_update(struct static_key * key,struct jump_entry * entry,struct jump_entry * stop)223 static void __jump_label_update(struct static_key *key,
224 				struct jump_entry *entry,
225 				struct jump_entry *stop)
226 {
227 	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
228 		/*
229 		 * entry->code set to 0 invalidates module init text sections
230 		 * kernel_text_address() verifies we are not in core kernel
231 		 * init code, see jump_label_invalidate_module_init().
232 		 */
233 		if (entry->code && kernel_text_address(entry->code))
234 			arch_jump_label_transform(entry, jump_label_type(entry));
235 	}
236 }
237 
jump_label_init(void)238 void __init jump_label_init(void)
239 {
240 	struct jump_entry *iter_start = __start___jump_table;
241 	struct jump_entry *iter_stop = __stop___jump_table;
242 	struct static_key *key = NULL;
243 	struct jump_entry *iter;
244 
245 	jump_label_lock();
246 	jump_label_sort_entries(iter_start, iter_stop);
247 
248 	for (iter = iter_start; iter < iter_stop; iter++) {
249 		struct static_key *iterk;
250 
251 		/* rewrite NOPs */
252 		if (jump_label_type(iter) == JUMP_LABEL_NOP)
253 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
254 
255 		iterk = jump_entry_key(iter);
256 		if (iterk == key)
257 			continue;
258 
259 		key = iterk;
260 		/*
261 		 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
262 		 */
263 		*((unsigned long *)&key->entries) += (unsigned long)iter;
264 #ifdef CONFIG_MODULES
265 		key->next = NULL;
266 #endif
267 	}
268 	static_key_initialized = true;
269 	jump_label_unlock();
270 }
271 
272 #ifdef CONFIG_MODULES
273 
jump_label_init_type(struct jump_entry * entry)274 static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
275 {
276 	struct static_key *key = jump_entry_key(entry);
277 	bool type = static_key_type(key);
278 	bool branch = jump_entry_branch(entry);
279 
280 	/* See the comment in linux/jump_label.h */
281 	return type ^ branch;
282 }
283 
284 struct static_key_mod {
285 	struct static_key_mod *next;
286 	struct jump_entry *entries;
287 	struct module *mod;
288 };
289 
__jump_label_mod_text_reserved(void * start,void * end)290 static int __jump_label_mod_text_reserved(void *start, void *end)
291 {
292 	struct module *mod;
293 
294 	mod = __module_text_address((unsigned long)start);
295 	if (!mod)
296 		return 0;
297 
298 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
299 
300 	return __jump_label_text_reserved(mod->jump_entries,
301 				mod->jump_entries + mod->num_jump_entries,
302 				start, end);
303 }
304 
__jump_label_mod_update(struct static_key * key)305 static void __jump_label_mod_update(struct static_key *key)
306 {
307 	struct static_key_mod *mod;
308 
309 	for (mod = key->next; mod; mod = mod->next) {
310 		struct module *m = mod->mod;
311 
312 		__jump_label_update(key, mod->entries,
313 				    m->jump_entries + m->num_jump_entries);
314 	}
315 }
316 
317 /***
318  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
319  * @mod: module to patch
320  *
321  * Allow for run-time selection of the optimal nops. Before the module
322  * loads patch these with arch_get_jump_label_nop(), which is specified by
323  * the arch specific jump label code.
324  */
jump_label_apply_nops(struct module * mod)325 void jump_label_apply_nops(struct module *mod)
326 {
327 	struct jump_entry *iter_start = mod->jump_entries;
328 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
329 	struct jump_entry *iter;
330 
331 	/* if the module doesn't have jump label entries, just return */
332 	if (iter_start == iter_stop)
333 		return;
334 
335 	for (iter = iter_start; iter < iter_stop; iter++) {
336 		/* Only write NOPs for arch_branch_static(). */
337 		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
338 			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
339 	}
340 }
341 
jump_label_add_module(struct module * mod)342 static int jump_label_add_module(struct module *mod)
343 {
344 	struct jump_entry *iter_start = mod->jump_entries;
345 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
346 	struct jump_entry *iter;
347 	struct static_key *key = NULL;
348 	struct static_key_mod *jlm;
349 
350 	/* if the module doesn't have jump label entries, just return */
351 	if (iter_start == iter_stop)
352 		return 0;
353 
354 	jump_label_sort_entries(iter_start, iter_stop);
355 
356 	for (iter = iter_start; iter < iter_stop; iter++) {
357 		struct static_key *iterk;
358 
359 		iterk = jump_entry_key(iter);
360 		if (iterk == key)
361 			continue;
362 
363 		key = iterk;
364 		if (within_module(iter->key, mod)) {
365 			/*
366 			 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
367 			 */
368 			*((unsigned long *)&key->entries) += (unsigned long)iter;
369 			key->next = NULL;
370 			continue;
371 		}
372 		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
373 		if (!jlm)
374 			return -ENOMEM;
375 		jlm->mod = mod;
376 		jlm->entries = iter;
377 		jlm->next = key->next;
378 		key->next = jlm;
379 
380 		/* Only update if we've changed from our initial state */
381 		if (jump_label_type(iter) != jump_label_init_type(iter))
382 			__jump_label_update(key, iter, iter_stop);
383 	}
384 
385 	return 0;
386 }
387 
jump_label_del_module(struct module * mod)388 static void jump_label_del_module(struct module *mod)
389 {
390 	struct jump_entry *iter_start = mod->jump_entries;
391 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
392 	struct jump_entry *iter;
393 	struct static_key *key = NULL;
394 	struct static_key_mod *jlm, **prev;
395 
396 	for (iter = iter_start; iter < iter_stop; iter++) {
397 		if (jump_entry_key(iter) == key)
398 			continue;
399 
400 		key = jump_entry_key(iter);
401 
402 		if (within_module(iter->key, mod))
403 			continue;
404 
405 		prev = &key->next;
406 		jlm = key->next;
407 
408 		while (jlm && jlm->mod != mod) {
409 			prev = &jlm->next;
410 			jlm = jlm->next;
411 		}
412 
413 		if (jlm) {
414 			*prev = jlm->next;
415 			kfree(jlm);
416 		}
417 	}
418 }
419 
jump_label_invalidate_module_init(struct module * mod)420 static void jump_label_invalidate_module_init(struct module *mod)
421 {
422 	struct jump_entry *iter_start = mod->jump_entries;
423 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
424 	struct jump_entry *iter;
425 
426 	for (iter = iter_start; iter < iter_stop; iter++) {
427 		if (within_module_init(iter->code, mod))
428 			iter->code = 0;
429 	}
430 }
431 
432 static int
jump_label_module_notify(struct notifier_block * self,unsigned long val,void * data)433 jump_label_module_notify(struct notifier_block *self, unsigned long val,
434 			 void *data)
435 {
436 	struct module *mod = data;
437 	int ret = 0;
438 
439 	switch (val) {
440 	case MODULE_STATE_COMING:
441 		jump_label_lock();
442 		ret = jump_label_add_module(mod);
443 		if (ret)
444 			jump_label_del_module(mod);
445 		jump_label_unlock();
446 		break;
447 	case MODULE_STATE_GOING:
448 		jump_label_lock();
449 		jump_label_del_module(mod);
450 		jump_label_unlock();
451 		break;
452 	case MODULE_STATE_LIVE:
453 		jump_label_lock();
454 		jump_label_invalidate_module_init(mod);
455 		jump_label_unlock();
456 		break;
457 	}
458 
459 	return notifier_from_errno(ret);
460 }
461 
462 struct notifier_block jump_label_module_nb = {
463 	.notifier_call = jump_label_module_notify,
464 	.priority = 1, /* higher than tracepoints */
465 };
466 
jump_label_init_module(void)467 static __init int jump_label_init_module(void)
468 {
469 	return register_module_notifier(&jump_label_module_nb);
470 }
471 early_initcall(jump_label_init_module);
472 
473 #endif /* CONFIG_MODULES */
474 
475 /***
476  * jump_label_text_reserved - check if addr range is reserved
477  * @start: start text addr
478  * @end: end text addr
479  *
480  * checks if the text addr located between @start and @end
481  * overlaps with any of the jump label patch addresses. Code
482  * that wants to modify kernel text should first verify that
483  * it does not overlap with any of the jump label addresses.
484  * Caller must hold jump_label_mutex.
485  *
486  * returns 1 if there is an overlap, 0 otherwise
487  */
jump_label_text_reserved(void * start,void * end)488 int jump_label_text_reserved(void *start, void *end)
489 {
490 	int ret = __jump_label_text_reserved(__start___jump_table,
491 			__stop___jump_table, start, end);
492 
493 	if (ret)
494 		return ret;
495 
496 #ifdef CONFIG_MODULES
497 	ret = __jump_label_mod_text_reserved(start, end);
498 #endif
499 	return ret;
500 }
501 
jump_label_update(struct static_key * key)502 static void jump_label_update(struct static_key *key)
503 {
504 	struct jump_entry *stop = __stop___jump_table;
505 	struct jump_entry *entry = static_key_entries(key);
506 #ifdef CONFIG_MODULES
507 	struct module *mod;
508 
509 	__jump_label_mod_update(key);
510 
511 	preempt_disable();
512 	mod = __module_address((unsigned long)key);
513 	if (mod)
514 		stop = mod->jump_entries + mod->num_jump_entries;
515 	preempt_enable();
516 #endif
517 	/* if there are no users, entry can be NULL */
518 	if (entry)
519 		__jump_label_update(key, entry, stop);
520 }
521 
522 #ifdef CONFIG_STATIC_KEYS_SELFTEST
523 static DEFINE_STATIC_KEY_TRUE(sk_true);
524 static DEFINE_STATIC_KEY_FALSE(sk_false);
525 
jump_label_test(void)526 static __init int jump_label_test(void)
527 {
528 	int i;
529 
530 	for (i = 0; i < 2; i++) {
531 		WARN_ON(static_key_enabled(&sk_true.key) != true);
532 		WARN_ON(static_key_enabled(&sk_false.key) != false);
533 
534 		WARN_ON(!static_branch_likely(&sk_true));
535 		WARN_ON(!static_branch_unlikely(&sk_true));
536 		WARN_ON(static_branch_likely(&sk_false));
537 		WARN_ON(static_branch_unlikely(&sk_false));
538 
539 		static_branch_disable(&sk_true);
540 		static_branch_enable(&sk_false);
541 
542 		WARN_ON(static_key_enabled(&sk_true.key) == true);
543 		WARN_ON(static_key_enabled(&sk_false.key) == false);
544 
545 		WARN_ON(static_branch_likely(&sk_true));
546 		WARN_ON(static_branch_unlikely(&sk_true));
547 		WARN_ON(!static_branch_likely(&sk_false));
548 		WARN_ON(!static_branch_unlikely(&sk_false));
549 
550 		static_branch_enable(&sk_true);
551 		static_branch_disable(&sk_false);
552 	}
553 
554 	return 0;
555 }
556 early_initcall(jump_label_test);
557 #endif /* STATIC_KEYS_SELFTEST */
558 
559 #endif /* HAVE_JUMP_LABEL */
560