• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/hash.h>
4 #include <linux/bpf.h>
5 #include <linux/filter.h>
6 #include <linux/ftrace.h>
7 #include <linux/rbtree_latch.h>
8 #include <linux/perf_event.h>
9 #include <linux/btf.h>
10 #include <linux/rcupdate_trace.h>
11 #include <linux/rcupdate_wait.h>
12 
13 /* dummy _ops. The verifier will operate on target program's ops. */
14 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
15 };
16 const struct bpf_prog_ops bpf_extension_prog_ops = {
17 };
18 
19 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
20 #define TRAMPOLINE_HASH_BITS 10
21 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
22 
23 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
24 
25 /* serializes access to trampoline_table */
26 static DEFINE_MUTEX(trampoline_mutex);
27 
bpf_jit_alloc_exec_page(void)28 void *bpf_jit_alloc_exec_page(void)
29 {
30 	void *image;
31 
32 	image = bpf_jit_alloc_exec(PAGE_SIZE);
33 	if (!image)
34 		return NULL;
35 
36 	set_vm_flush_reset_perms(image);
37 	/* Keep image as writeable. The alternative is to keep flipping ro/rw
38 	 * everytime new program is attached or detached.
39 	 */
40 	set_memory_x((long)image, 1);
41 	return image;
42 }
43 
bpf_image_ksym_add(void * data,struct bpf_ksym * ksym)44 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
45 {
46 	ksym->start = (unsigned long) data;
47 	ksym->end = ksym->start + PAGE_SIZE;
48 	bpf_ksym_add(ksym);
49 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
50 			   PAGE_SIZE, false, ksym->name);
51 }
52 
bpf_image_ksym_del(struct bpf_ksym * ksym)53 void bpf_image_ksym_del(struct bpf_ksym *ksym)
54 {
55 	bpf_ksym_del(ksym);
56 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
57 			   PAGE_SIZE, true, ksym->name);
58 }
59 
bpf_trampoline_lookup(u64 key)60 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
61 {
62 	struct bpf_trampoline *tr;
63 	struct hlist_head *head;
64 	int i;
65 
66 	mutex_lock(&trampoline_mutex);
67 	head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
68 	hlist_for_each_entry(tr, head, hlist) {
69 		if (tr->key == key) {
70 			refcount_inc(&tr->refcnt);
71 			goto out;
72 		}
73 	}
74 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
75 	if (!tr)
76 		goto out;
77 
78 	tr->key = key;
79 	INIT_HLIST_NODE(&tr->hlist);
80 	hlist_add_head(&tr->hlist, head);
81 	refcount_set(&tr->refcnt, 1);
82 	mutex_init(&tr->mutex);
83 	for (i = 0; i < BPF_TRAMP_MAX; i++)
84 		INIT_HLIST_HEAD(&tr->progs_hlist[i]);
85 out:
86 	mutex_unlock(&trampoline_mutex);
87 	return tr;
88 }
89 
is_ftrace_location(void * ip)90 static int is_ftrace_location(void *ip)
91 {
92 	long addr;
93 
94 	addr = ftrace_location((long)ip);
95 	if (!addr)
96 		return 0;
97 	if (WARN_ON_ONCE(addr != (long)ip))
98 		return -EFAULT;
99 	return 1;
100 }
101 
unregister_fentry(struct bpf_trampoline * tr,void * old_addr)102 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
103 {
104 	void *ip = tr->func.addr;
105 	int ret;
106 
107 	if (tr->func.ftrace_managed)
108 		ret = unregister_ftrace_direct((long)ip, (long)old_addr);
109 	else
110 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
111 	return ret;
112 }
113 
modify_fentry(struct bpf_trampoline * tr,void * old_addr,void * new_addr)114 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
115 {
116 	void *ip = tr->func.addr;
117 	int ret;
118 
119 	if (tr->func.ftrace_managed)
120 		ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
121 	else
122 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
123 	return ret;
124 }
125 
126 /* first time registering */
register_fentry(struct bpf_trampoline * tr,void * new_addr)127 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
128 {
129 	void *ip = tr->func.addr;
130 	int ret;
131 
132 	ret = is_ftrace_location(ip);
133 	if (ret < 0)
134 		return ret;
135 	tr->func.ftrace_managed = ret;
136 
137 	if (tr->func.ftrace_managed)
138 		ret = register_ftrace_direct((long)ip, (long)new_addr);
139 	else
140 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
141 	return ret;
142 }
143 
144 static struct bpf_tramp_progs *
bpf_trampoline_get_progs(const struct bpf_trampoline * tr,int * total)145 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
146 {
147 	const struct bpf_prog_aux *aux;
148 	struct bpf_tramp_progs *tprogs;
149 	struct bpf_prog **progs;
150 	int kind;
151 
152 	*total = 0;
153 	tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
154 	if (!tprogs)
155 		return ERR_PTR(-ENOMEM);
156 
157 	for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
158 		tprogs[kind].nr_progs = tr->progs_cnt[kind];
159 		*total += tr->progs_cnt[kind];
160 		progs = tprogs[kind].progs;
161 
162 		hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist)
163 			*progs++ = aux->prog;
164 	}
165 	return tprogs;
166 }
167 
__bpf_tramp_image_put_deferred(struct work_struct * work)168 static void __bpf_tramp_image_put_deferred(struct work_struct *work)
169 {
170 	struct bpf_tramp_image *im;
171 
172 	im = container_of(work, struct bpf_tramp_image, work);
173 	bpf_image_ksym_del(&im->ksym);
174 	bpf_jit_free_exec(im->image);
175 	bpf_jit_uncharge_modmem(1);
176 	percpu_ref_exit(&im->pcref);
177 	kfree_rcu(im, rcu);
178 }
179 
180 /* callback, fexit step 3 or fentry step 2 */
__bpf_tramp_image_put_rcu(struct rcu_head * rcu)181 static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
182 {
183 	struct bpf_tramp_image *im;
184 
185 	im = container_of(rcu, struct bpf_tramp_image, rcu);
186 	INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
187 	schedule_work(&im->work);
188 }
189 
190 /* callback, fexit step 2. Called after percpu_ref_kill confirms. */
__bpf_tramp_image_release(struct percpu_ref * pcref)191 static void __bpf_tramp_image_release(struct percpu_ref *pcref)
192 {
193 	struct bpf_tramp_image *im;
194 
195 	im = container_of(pcref, struct bpf_tramp_image, pcref);
196 	call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
197 }
198 
199 /* callback, fexit or fentry step 1 */
__bpf_tramp_image_put_rcu_tasks(struct rcu_head * rcu)200 static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
201 {
202 	struct bpf_tramp_image *im;
203 
204 	im = container_of(rcu, struct bpf_tramp_image, rcu);
205 	if (im->ip_after_call)
206 		/* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
207 		percpu_ref_kill(&im->pcref);
208 	else
209 		/* the case of fentry trampoline */
210 		call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
211 }
212 
bpf_tramp_image_put(struct bpf_tramp_image * im)213 static void bpf_tramp_image_put(struct bpf_tramp_image *im)
214 {
215 	/* The trampoline image that calls original function is using:
216 	 * rcu_read_lock_trace to protect sleepable bpf progs
217 	 * rcu_read_lock to protect normal bpf progs
218 	 * percpu_ref to protect trampoline itself
219 	 * rcu tasks to protect trampoline asm not covered by percpu_ref
220 	 * (which are few asm insns before __bpf_tramp_enter and
221 	 *  after __bpf_tramp_exit)
222 	 *
223 	 * The trampoline is unreachable before bpf_tramp_image_put().
224 	 *
225 	 * First, patch the trampoline to avoid calling into fexit progs.
226 	 * The progs will be freed even if the original function is still
227 	 * executing or sleeping.
228 	 * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
229 	 * first few asm instructions to execute and call into
230 	 * __bpf_tramp_enter->percpu_ref_get.
231 	 * Then use percpu_ref_kill to wait for the trampoline and the original
232 	 * function to finish.
233 	 * Then use call_rcu_tasks() to make sure few asm insns in
234 	 * the trampoline epilogue are done as well.
235 	 *
236 	 * In !PREEMPT case the task that got interrupted in the first asm
237 	 * insns won't go through an RCU quiescent state which the
238 	 * percpu_ref_kill will be waiting for. Hence the first
239 	 * call_rcu_tasks() is not necessary.
240 	 */
241 	if (im->ip_after_call) {
242 		int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
243 					     NULL, im->ip_epilogue);
244 		WARN_ON(err);
245 		if (IS_ENABLED(CONFIG_PREEMPTION))
246 			call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
247 		else
248 			percpu_ref_kill(&im->pcref);
249 		return;
250 	}
251 
252 	/* The trampoline without fexit and fmod_ret progs doesn't call original
253 	 * function and doesn't use percpu_ref.
254 	 * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
255 	 * Then use call_rcu_tasks() to wait for the rest of trampoline asm
256 	 * and normal progs.
257 	 */
258 	call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
259 }
260 
bpf_tramp_image_alloc(u64 key,u32 idx)261 static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
262 {
263 	struct bpf_tramp_image *im;
264 	struct bpf_ksym *ksym;
265 	void *image;
266 	int err = -ENOMEM;
267 
268 	im = kzalloc(sizeof(*im), GFP_KERNEL);
269 	if (!im)
270 		goto out;
271 
272 	err = bpf_jit_charge_modmem(1);
273 	if (err)
274 		goto out_free_im;
275 
276 	err = -ENOMEM;
277 	im->image = image = bpf_jit_alloc_exec_page();
278 	if (!image)
279 		goto out_uncharge;
280 
281 	err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
282 	if (err)
283 		goto out_free_image;
284 
285 	ksym = &im->ksym;
286 	INIT_LIST_HEAD_RCU(&ksym->lnode);
287 	snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
288 	bpf_image_ksym_add(image, ksym);
289 	return im;
290 
291 out_free_image:
292 	bpf_jit_free_exec(im->image);
293 out_uncharge:
294 	bpf_jit_uncharge_modmem(1);
295 out_free_im:
296 	kfree(im);
297 out:
298 	return ERR_PTR(err);
299 }
300 
bpf_trampoline_update(struct bpf_trampoline * tr)301 static int bpf_trampoline_update(struct bpf_trampoline *tr)
302 {
303 	struct bpf_tramp_image *im;
304 	struct bpf_tramp_progs *tprogs;
305 	u32 flags = BPF_TRAMP_F_RESTORE_REGS;
306 	int err, total;
307 
308 	tprogs = bpf_trampoline_get_progs(tr, &total);
309 	if (IS_ERR(tprogs))
310 		return PTR_ERR(tprogs);
311 
312 	if (total == 0) {
313 		err = unregister_fentry(tr, tr->cur_image->image);
314 		bpf_tramp_image_put(tr->cur_image);
315 		tr->cur_image = NULL;
316 		tr->selector = 0;
317 		goto out;
318 	}
319 
320 	im = bpf_tramp_image_alloc(tr->key, tr->selector);
321 	if (IS_ERR(im)) {
322 		err = PTR_ERR(im);
323 		goto out;
324 	}
325 
326 	if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
327 	    tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
328 		flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
329 
330 	err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
331 					  &tr->func.model, flags, tprogs,
332 					  tr->func.addr);
333 	if (err < 0)
334 		goto out;
335 
336 	WARN_ON(tr->cur_image && tr->selector == 0);
337 	WARN_ON(!tr->cur_image && tr->selector);
338 	if (tr->cur_image)
339 		/* progs already running at this address */
340 		err = modify_fentry(tr, tr->cur_image->image, im->image);
341 	else
342 		/* first time registering */
343 		err = register_fentry(tr, im->image);
344 	if (err)
345 		goto out;
346 	if (tr->cur_image)
347 		bpf_tramp_image_put(tr->cur_image);
348 	tr->cur_image = im;
349 	tr->selector++;
350 out:
351 	kfree(tprogs);
352 	return err;
353 }
354 
bpf_attach_type_to_tramp(struct bpf_prog * prog)355 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
356 {
357 	switch (prog->expected_attach_type) {
358 	case BPF_TRACE_FENTRY:
359 		return BPF_TRAMP_FENTRY;
360 	case BPF_MODIFY_RETURN:
361 		return BPF_TRAMP_MODIFY_RETURN;
362 	case BPF_TRACE_FEXIT:
363 		return BPF_TRAMP_FEXIT;
364 	case BPF_LSM_MAC:
365 		if (!prog->aux->attach_func_proto->type)
366 			/* The function returns void, we cannot modify its
367 			 * return value.
368 			 */
369 			return BPF_TRAMP_FEXIT;
370 		else
371 			return BPF_TRAMP_MODIFY_RETURN;
372 	default:
373 		return BPF_TRAMP_REPLACE;
374 	}
375 }
376 
bpf_trampoline_link_prog(struct bpf_prog * prog,struct bpf_trampoline * tr)377 int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
378 {
379 	enum bpf_tramp_prog_type kind;
380 	int err = 0;
381 	int cnt = 0, i;
382 
383 	kind = bpf_attach_type_to_tramp(prog);
384 	mutex_lock(&tr->mutex);
385 	if (tr->extension_prog) {
386 		/* cannot attach fentry/fexit if extension prog is attached.
387 		 * cannot overwrite extension prog either.
388 		 */
389 		err = -EBUSY;
390 		goto out;
391 	}
392 
393 	for (i = 0; i < BPF_TRAMP_MAX; i++)
394 		cnt += tr->progs_cnt[i];
395 
396 	if (kind == BPF_TRAMP_REPLACE) {
397 		/* Cannot attach extension if fentry/fexit are in use. */
398 		if (cnt) {
399 			err = -EBUSY;
400 			goto out;
401 		}
402 		tr->extension_prog = prog;
403 		err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
404 					 prog->bpf_func);
405 		goto out;
406 	}
407 	if (cnt >= BPF_MAX_TRAMP_PROGS) {
408 		err = -E2BIG;
409 		goto out;
410 	}
411 	if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
412 		/* prog already linked */
413 		err = -EBUSY;
414 		goto out;
415 	}
416 	hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
417 	tr->progs_cnt[kind]++;
418 	err = bpf_trampoline_update(tr);
419 	if (err) {
420 		hlist_del(&prog->aux->tramp_hlist);
421 		tr->progs_cnt[kind]--;
422 	}
423 out:
424 	mutex_unlock(&tr->mutex);
425 	return err;
426 }
427 
428 /* bpf_trampoline_unlink_prog() should never fail. */
bpf_trampoline_unlink_prog(struct bpf_prog * prog,struct bpf_trampoline * tr)429 int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
430 {
431 	enum bpf_tramp_prog_type kind;
432 	int err;
433 
434 	kind = bpf_attach_type_to_tramp(prog);
435 	mutex_lock(&tr->mutex);
436 	if (kind == BPF_TRAMP_REPLACE) {
437 		WARN_ON_ONCE(!tr->extension_prog);
438 		err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
439 					 tr->extension_prog->bpf_func, NULL);
440 		tr->extension_prog = NULL;
441 		goto out;
442 	}
443 	hlist_del(&prog->aux->tramp_hlist);
444 	tr->progs_cnt[kind]--;
445 	err = bpf_trampoline_update(tr);
446 out:
447 	mutex_unlock(&tr->mutex);
448 	return err;
449 }
450 
bpf_trampoline_get(u64 key,struct bpf_attach_target_info * tgt_info)451 struct bpf_trampoline *bpf_trampoline_get(u64 key,
452 					  struct bpf_attach_target_info *tgt_info)
453 {
454 	struct bpf_trampoline *tr;
455 
456 	tr = bpf_trampoline_lookup(key);
457 	if (!tr)
458 		return NULL;
459 
460 	mutex_lock(&tr->mutex);
461 	if (tr->func.addr)
462 		goto out;
463 
464 	memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
465 	tr->func.addr = (void *)tgt_info->tgt_addr;
466 out:
467 	mutex_unlock(&tr->mutex);
468 	return tr;
469 }
470 
bpf_trampoline_put(struct bpf_trampoline * tr)471 void bpf_trampoline_put(struct bpf_trampoline *tr)
472 {
473 	int i;
474 
475 	if (!tr)
476 		return;
477 	mutex_lock(&trampoline_mutex);
478 	if (!refcount_dec_and_test(&tr->refcnt))
479 		goto out;
480 	WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
481 
482 	for (i = 0; i < BPF_TRAMP_MAX; i++)
483 		if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
484 			goto out;
485 
486 	/* This code will be executed even when the last bpf_tramp_image
487 	 * is alive. All progs are detached from the trampoline and the
488 	 * trampoline image is patched with jmp into epilogue to skip
489 	 * fexit progs. The fentry-only trampoline will be freed via
490 	 * multiple rcu callbacks.
491 	 */
492 	hlist_del(&tr->hlist);
493 	kfree(tr);
494 out:
495 	mutex_unlock(&trampoline_mutex);
496 }
497 
498 /* The logic is similar to BPF_PROG_RUN, but with an explicit
499  * rcu_read_lock() and migrate_disable() which are required
500  * for the trampoline. The macro is split into
501  * call _bpf_prog_enter
502  * call prog->bpf_func
503  * call __bpf_prog_exit
504  */
__bpf_prog_enter(void)505 u64 notrace __bpf_prog_enter(void)
506 	__acquires(RCU)
507 {
508 	u64 start = 0;
509 
510 	rcu_read_lock();
511 	migrate_disable();
512 	if (static_branch_unlikely(&bpf_stats_enabled_key))
513 		start = sched_clock();
514 	return start;
515 }
516 
__bpf_prog_exit(struct bpf_prog * prog,u64 start)517 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
518 	__releases(RCU)
519 {
520 	struct bpf_prog_stats *stats;
521 
522 	if (static_branch_unlikely(&bpf_stats_enabled_key) &&
523 	    /* static_key could be enabled in __bpf_prog_enter
524 	     * and disabled in __bpf_prog_exit.
525 	     * And vice versa.
526 	     * Hence check that 'start' is not zero.
527 	     */
528 	    start) {
529 		stats = this_cpu_ptr(prog->aux->stats);
530 		u64_stats_update_begin(&stats->syncp);
531 		stats->cnt++;
532 		stats->nsecs += sched_clock() - start;
533 		u64_stats_update_end(&stats->syncp);
534 	}
535 	migrate_enable();
536 	rcu_read_unlock();
537 }
538 
__bpf_prog_enter_sleepable(void)539 void notrace __bpf_prog_enter_sleepable(void)
540 {
541 	rcu_read_lock_trace();
542 	might_fault();
543 }
544 
__bpf_prog_exit_sleepable(void)545 void notrace __bpf_prog_exit_sleepable(void)
546 {
547 	rcu_read_unlock_trace();
548 }
549 
__bpf_tramp_enter(struct bpf_tramp_image * tr)550 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
551 {
552 	percpu_ref_get(&tr->pcref);
553 }
554 
__bpf_tramp_exit(struct bpf_tramp_image * tr)555 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
556 {
557 	percpu_ref_put(&tr->pcref);
558 }
559 
560 int __weak
arch_prepare_bpf_trampoline(struct bpf_tramp_image * tr,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_progs * tprogs,void * orig_call)561 arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
562 			    const struct btf_func_model *m, u32 flags,
563 			    struct bpf_tramp_progs *tprogs,
564 			    void *orig_call)
565 {
566 	return -ENOTSUPP;
567 }
568 
init_trampolines(void)569 static int __init init_trampolines(void)
570 {
571 	int i;
572 
573 	for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
574 		INIT_HLIST_HEAD(&trampoline_table[i]);
575 	return 0;
576 }
577 late_initcall(init_trampolines);
578