• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * jump label support
3  *
4  * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5  * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
6  *
7  */
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 #include <linux/err.h>
15 #include <linux/static_key.h>
16 
17 #ifdef HAVE_JUMP_LABEL
18 
19 /* mutex to protect coming/going of the the jump_label table */
20 static DEFINE_MUTEX(jump_label_mutex);
21 
jump_label_lock(void)22 void jump_label_lock(void)
23 {
24 	mutex_lock(&jump_label_mutex);
25 }
26 
jump_label_unlock(void)27 void jump_label_unlock(void)
28 {
29 	mutex_unlock(&jump_label_mutex);
30 }
31 
jump_label_cmp(const void * a,const void * b)32 static int jump_label_cmp(const void *a, const void *b)
33 {
34 	const struct jump_entry *jea = a;
35 	const struct jump_entry *jeb = b;
36 
37 	if (jea->key < jeb->key)
38 		return -1;
39 
40 	if (jea->key > jeb->key)
41 		return 1;
42 
43 	return 0;
44 }
45 
46 static void
jump_label_sort_entries(struct jump_entry * start,struct jump_entry * stop)47 jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
48 {
49 	unsigned long size;
50 
51 	size = (((unsigned long)stop - (unsigned long)start)
52 					/ sizeof(struct jump_entry));
53 	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
54 }
55 
56 static void jump_label_update(struct static_key *key, int enable);
57 
static_key_slow_inc(struct static_key * key)58 void static_key_slow_inc(struct static_key *key)
59 {
60 	if (atomic_inc_not_zero(&key->enabled))
61 		return;
62 
63 	jump_label_lock();
64 	if (atomic_read(&key->enabled) == 0) {
65 		if (!jump_label_get_branch_default(key))
66 			jump_label_update(key, JUMP_LABEL_ENABLE);
67 		else
68 			jump_label_update(key, JUMP_LABEL_DISABLE);
69 	}
70 	atomic_inc(&key->enabled);
71 	jump_label_unlock();
72 }
73 EXPORT_SYMBOL_GPL(static_key_slow_inc);
74 
__static_key_slow_dec(struct static_key * key,unsigned long rate_limit,struct delayed_work * work)75 static void __static_key_slow_dec(struct static_key *key,
76 		unsigned long rate_limit, struct delayed_work *work)
77 {
78 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
79 		WARN(atomic_read(&key->enabled) < 0,
80 		     "jump label: negative count!\n");
81 		return;
82 	}
83 
84 	if (rate_limit) {
85 		atomic_inc(&key->enabled);
86 		schedule_delayed_work(work, rate_limit);
87 	} else {
88 		if (!jump_label_get_branch_default(key))
89 			jump_label_update(key, JUMP_LABEL_DISABLE);
90 		else
91 			jump_label_update(key, JUMP_LABEL_ENABLE);
92 	}
93 	jump_label_unlock();
94 }
95 
jump_label_update_timeout(struct work_struct * work)96 static void jump_label_update_timeout(struct work_struct *work)
97 {
98 	struct static_key_deferred *key =
99 		container_of(work, struct static_key_deferred, work.work);
100 	__static_key_slow_dec(&key->key, 0, NULL);
101 }
102 
static_key_slow_dec(struct static_key * key)103 void static_key_slow_dec(struct static_key *key)
104 {
105 	__static_key_slow_dec(key, 0, NULL);
106 }
107 EXPORT_SYMBOL_GPL(static_key_slow_dec);
108 
static_key_slow_dec_deferred(struct static_key_deferred * key)109 void static_key_slow_dec_deferred(struct static_key_deferred *key)
110 {
111 	__static_key_slow_dec(&key->key, key->timeout, &key->work);
112 }
113 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
114 
jump_label_rate_limit(struct static_key_deferred * key,unsigned long rl)115 void jump_label_rate_limit(struct static_key_deferred *key,
116 		unsigned long rl)
117 {
118 	key->timeout = rl;
119 	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
120 }
121 EXPORT_SYMBOL_GPL(jump_label_rate_limit);
122 
addr_conflict(struct jump_entry * entry,void * start,void * end)123 static int addr_conflict(struct jump_entry *entry, void *start, void *end)
124 {
125 	if (entry->code <= (unsigned long)end &&
126 		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
127 		return 1;
128 
129 	return 0;
130 }
131 
__jump_label_text_reserved(struct jump_entry * iter_start,struct jump_entry * iter_stop,void * start,void * end)132 static int __jump_label_text_reserved(struct jump_entry *iter_start,
133 		struct jump_entry *iter_stop, void *start, void *end)
134 {
135 	struct jump_entry *iter;
136 
137 	iter = iter_start;
138 	while (iter < iter_stop) {
139 		if (addr_conflict(iter, start, end))
140 			return 1;
141 		iter++;
142 	}
143 
144 	return 0;
145 }
146 
147 /*
148  * Update code which is definitely not currently executing.
149  * Architectures which need heavyweight synchronization to modify
150  * running code can override this to make the non-live update case
151  * cheaper.
152  */
arch_jump_label_transform_static(struct jump_entry * entry,enum jump_label_type type)153 void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
154 					    enum jump_label_type type)
155 {
156 	arch_jump_label_transform(entry, type);
157 }
158 
__jump_label_update(struct static_key * key,struct jump_entry * entry,struct jump_entry * stop,int enable)159 static void __jump_label_update(struct static_key *key,
160 				struct jump_entry *entry,
161 				struct jump_entry *stop, int enable)
162 {
163 	for (; (entry < stop) &&
164 	      (entry->key == (jump_label_t)(unsigned long)key);
165 	      entry++) {
166 		/*
167 		 * entry->code set to 0 invalidates module init text sections
168 		 * kernel_text_address() verifies we are not in core kernel
169 		 * init code, see jump_label_invalidate_module_init().
170 		 */
171 		if (entry->code && kernel_text_address(entry->code))
172 			arch_jump_label_transform(entry, enable);
173 	}
174 }
175 
jump_label_type(struct static_key * key)176 static enum jump_label_type jump_label_type(struct static_key *key)
177 {
178 	bool true_branch = jump_label_get_branch_default(key);
179 	bool state = static_key_enabled(key);
180 
181 	if ((!true_branch && state) || (true_branch && !state))
182 		return JUMP_LABEL_ENABLE;
183 
184 	return JUMP_LABEL_DISABLE;
185 }
186 
jump_label_init(void)187 void __init jump_label_init(void)
188 {
189 	struct jump_entry *iter_start = __start___jump_table;
190 	struct jump_entry *iter_stop = __stop___jump_table;
191 	struct static_key *key = NULL;
192 	struct jump_entry *iter;
193 
194 	jump_label_lock();
195 	jump_label_sort_entries(iter_start, iter_stop);
196 
197 	for (iter = iter_start; iter < iter_stop; iter++) {
198 		struct static_key *iterk;
199 
200 		iterk = (struct static_key *)(unsigned long)iter->key;
201 		arch_jump_label_transform_static(iter, jump_label_type(iterk));
202 		if (iterk == key)
203 			continue;
204 
205 		key = iterk;
206 		/*
207 		 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
208 		 */
209 		*((unsigned long *)&key->entries) += (unsigned long)iter;
210 #ifdef CONFIG_MODULES
211 		key->next = NULL;
212 #endif
213 	}
214 	jump_label_unlock();
215 }
216 
217 #ifdef CONFIG_MODULES
218 
219 struct static_key_mod {
220 	struct static_key_mod *next;
221 	struct jump_entry *entries;
222 	struct module *mod;
223 };
224 
__jump_label_mod_text_reserved(void * start,void * end)225 static int __jump_label_mod_text_reserved(void *start, void *end)
226 {
227 	struct module *mod;
228 
229 	mod = __module_text_address((unsigned long)start);
230 	if (!mod)
231 		return 0;
232 
233 	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
234 
235 	return __jump_label_text_reserved(mod->jump_entries,
236 				mod->jump_entries + mod->num_jump_entries,
237 				start, end);
238 }
239 
__jump_label_mod_update(struct static_key * key,int enable)240 static void __jump_label_mod_update(struct static_key *key, int enable)
241 {
242 	struct static_key_mod *mod = key->next;
243 
244 	while (mod) {
245 		struct module *m = mod->mod;
246 
247 		__jump_label_update(key, mod->entries,
248 				    m->jump_entries + m->num_jump_entries,
249 				    enable);
250 		mod = mod->next;
251 	}
252 }
253 
254 /***
255  * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
256  * @mod: module to patch
257  *
258  * Allow for run-time selection of the optimal nops. Before the module
259  * loads patch these with arch_get_jump_label_nop(), which is specified by
260  * the arch specific jump label code.
261  */
jump_label_apply_nops(struct module * mod)262 void jump_label_apply_nops(struct module *mod)
263 {
264 	struct jump_entry *iter_start = mod->jump_entries;
265 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
266 	struct jump_entry *iter;
267 
268 	/* if the module doesn't have jump label entries, just return */
269 	if (iter_start == iter_stop)
270 		return;
271 
272 	for (iter = iter_start; iter < iter_stop; iter++) {
273 		arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
274 	}
275 }
276 
jump_label_add_module(struct module * mod)277 static int jump_label_add_module(struct module *mod)
278 {
279 	struct jump_entry *iter_start = mod->jump_entries;
280 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
281 	struct jump_entry *iter;
282 	struct static_key *key = NULL;
283 	struct static_key_mod *jlm;
284 
285 	/* if the module doesn't have jump label entries, just return */
286 	if (iter_start == iter_stop)
287 		return 0;
288 
289 	jump_label_sort_entries(iter_start, iter_stop);
290 
291 	for (iter = iter_start; iter < iter_stop; iter++) {
292 		struct static_key *iterk;
293 
294 		iterk = (struct static_key *)(unsigned long)iter->key;
295 		if (iterk == key)
296 			continue;
297 
298 		key = iterk;
299 		if (__module_address(iter->key) == mod) {
300 			/*
301 			 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
302 			 */
303 			*((unsigned long *)&key->entries) += (unsigned long)iter;
304 			key->next = NULL;
305 			continue;
306 		}
307 		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
308 		if (!jlm)
309 			return -ENOMEM;
310 		jlm->mod = mod;
311 		jlm->entries = iter;
312 		jlm->next = key->next;
313 		key->next = jlm;
314 
315 		if (jump_label_type(key) == JUMP_LABEL_ENABLE)
316 			__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
317 	}
318 
319 	return 0;
320 }
321 
jump_label_del_module(struct module * mod)322 static void jump_label_del_module(struct module *mod)
323 {
324 	struct jump_entry *iter_start = mod->jump_entries;
325 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
326 	struct jump_entry *iter;
327 	struct static_key *key = NULL;
328 	struct static_key_mod *jlm, **prev;
329 
330 	for (iter = iter_start; iter < iter_stop; iter++) {
331 		if (iter->key == (jump_label_t)(unsigned long)key)
332 			continue;
333 
334 		key = (struct static_key *)(unsigned long)iter->key;
335 
336 		if (__module_address(iter->key) == mod)
337 			continue;
338 
339 		prev = &key->next;
340 		jlm = key->next;
341 
342 		while (jlm && jlm->mod != mod) {
343 			prev = &jlm->next;
344 			jlm = jlm->next;
345 		}
346 
347 		if (jlm) {
348 			*prev = jlm->next;
349 			kfree(jlm);
350 		}
351 	}
352 }
353 
jump_label_invalidate_module_init(struct module * mod)354 static void jump_label_invalidate_module_init(struct module *mod)
355 {
356 	struct jump_entry *iter_start = mod->jump_entries;
357 	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
358 	struct jump_entry *iter;
359 
360 	for (iter = iter_start; iter < iter_stop; iter++) {
361 		if (within_module_init(iter->code, mod))
362 			iter->code = 0;
363 	}
364 }
365 
366 static int
jump_label_module_notify(struct notifier_block * self,unsigned long val,void * data)367 jump_label_module_notify(struct notifier_block *self, unsigned long val,
368 			 void *data)
369 {
370 	struct module *mod = data;
371 	int ret = 0;
372 
373 	switch (val) {
374 	case MODULE_STATE_COMING:
375 		jump_label_lock();
376 		ret = jump_label_add_module(mod);
377 		if (ret)
378 			jump_label_del_module(mod);
379 		jump_label_unlock();
380 		break;
381 	case MODULE_STATE_GOING:
382 		jump_label_lock();
383 		jump_label_del_module(mod);
384 		jump_label_unlock();
385 		break;
386 	case MODULE_STATE_LIVE:
387 		jump_label_lock();
388 		jump_label_invalidate_module_init(mod);
389 		jump_label_unlock();
390 		break;
391 	}
392 
393 	return notifier_from_errno(ret);
394 }
395 
396 struct notifier_block jump_label_module_nb = {
397 	.notifier_call = jump_label_module_notify,
398 	.priority = 1, /* higher than tracepoints */
399 };
400 
jump_label_init_module(void)401 static __init int jump_label_init_module(void)
402 {
403 	return register_module_notifier(&jump_label_module_nb);
404 }
405 early_initcall(jump_label_init_module);
406 
407 #endif /* CONFIG_MODULES */
408 
409 /***
410  * jump_label_text_reserved - check if addr range is reserved
411  * @start: start text addr
412  * @end: end text addr
413  *
414  * checks if the text addr located between @start and @end
415  * overlaps with any of the jump label patch addresses. Code
416  * that wants to modify kernel text should first verify that
417  * it does not overlap with any of the jump label addresses.
418  * Caller must hold jump_label_mutex.
419  *
420  * returns 1 if there is an overlap, 0 otherwise
421  */
jump_label_text_reserved(void * start,void * end)422 int jump_label_text_reserved(void *start, void *end)
423 {
424 	int ret = __jump_label_text_reserved(__start___jump_table,
425 			__stop___jump_table, start, end);
426 
427 	if (ret)
428 		return ret;
429 
430 #ifdef CONFIG_MODULES
431 	ret = __jump_label_mod_text_reserved(start, end);
432 #endif
433 	return ret;
434 }
435 
jump_label_update(struct static_key * key,int enable)436 static void jump_label_update(struct static_key *key, int enable)
437 {
438 	struct jump_entry *stop = __stop___jump_table;
439 	struct jump_entry *entry = jump_label_get_entries(key);
440 
441 #ifdef CONFIG_MODULES
442 	struct module *mod = __module_address((unsigned long)key);
443 
444 	__jump_label_mod_update(key, enable);
445 
446 	if (mod)
447 		stop = mod->jump_entries + mod->num_jump_entries;
448 #endif
449 	/* if there are no users, entry can be NULL */
450 	if (entry)
451 		__jump_label_update(key, entry, stop, enable);
452 }
453 
454 #endif
455