• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/hash.h>
4 #include <linux/bpf.h>
5 #include <linux/filter.h>
6 #include <linux/ftrace.h>
7 #include <linux/rbtree_latch.h>
8 #include <linux/perf_event.h>
9 #include <linux/btf.h>
10 #include <linux/rcupdate_trace.h>
11 #include <linux/rcupdate_wait.h>
12 #include <linux/static_call.h>
13 #include <linux/bpf_verifier.h>
14 #include <linux/bpf_lsm.h>
15 #include <linux/delay.h>
16 
17 /* dummy _ops. The verifier will operate on target program's ops. */
18 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
19 };
20 const struct bpf_prog_ops bpf_extension_prog_ops = {
21 };
22 
23 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
24 #define TRAMPOLINE_HASH_BITS 10
25 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
26 
27 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
28 
29 /* serializes access to trampoline_table */
30 static DEFINE_MUTEX(trampoline_mutex);
31 
32 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
33 static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex);
34 
bpf_tramp_ftrace_ops_func(struct ftrace_ops * ops,enum ftrace_ops_cmd cmd)35 static int bpf_tramp_ftrace_ops_func(struct ftrace_ops *ops, enum ftrace_ops_cmd cmd)
36 {
37 	struct bpf_trampoline *tr = ops->private;
38 	int ret = 0;
39 
40 	if (cmd == FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF) {
41 		/* This is called inside register_ftrace_direct_multi(), so
42 		 * tr->mutex is already locked.
43 		 */
44 		lockdep_assert_held_once(&tr->mutex);
45 
46 		/* Instead of updating the trampoline here, we propagate
47 		 * -EAGAIN to register_ftrace_direct(). Then we can
48 		 * retry register_ftrace_direct() after updating the
49 		 * trampoline.
50 		 */
51 		if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
52 		    !(tr->flags & BPF_TRAMP_F_ORIG_STACK)) {
53 			if (WARN_ON_ONCE(tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY))
54 				return -EBUSY;
55 
56 			tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
57 			return -EAGAIN;
58 		}
59 
60 		return 0;
61 	}
62 
63 	/* The normal locking order is
64 	 *    tr->mutex => direct_mutex (ftrace.c) => ftrace_lock (ftrace.c)
65 	 *
66 	 * The following two commands are called from
67 	 *
68 	 *   prepare_direct_functions_for_ipmodify
69 	 *   cleanup_direct_functions_after_ipmodify
70 	 *
71 	 * In both cases, direct_mutex is already locked. Use
72 	 * mutex_trylock(&tr->mutex) to avoid deadlock in race condition
73 	 * (something else is making changes to this same trampoline).
74 	 */
75 	if (!mutex_trylock(&tr->mutex)) {
76 		/* sleep 1 ms to make sure whatever holding tr->mutex makes
77 		 * some progress.
78 		 */
79 		msleep(1);
80 		return -EAGAIN;
81 	}
82 
83 	switch (cmd) {
84 	case FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER:
85 		tr->flags |= BPF_TRAMP_F_SHARE_IPMODIFY;
86 
87 		if ((tr->flags & BPF_TRAMP_F_CALL_ORIG) &&
88 		    !(tr->flags & BPF_TRAMP_F_ORIG_STACK))
89 			ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */);
90 		break;
91 	case FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER:
92 		tr->flags &= ~BPF_TRAMP_F_SHARE_IPMODIFY;
93 
94 		if (tr->flags & BPF_TRAMP_F_ORIG_STACK)
95 			ret = bpf_trampoline_update(tr, false /* lock_direct_mutex */);
96 		break;
97 	default:
98 		ret = -EINVAL;
99 		break;
100 	}
101 
102 	mutex_unlock(&tr->mutex);
103 	return ret;
104 }
105 #endif
106 
bpf_prog_has_trampoline(const struct bpf_prog * prog)107 bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
108 {
109 	enum bpf_attach_type eatype = prog->expected_attach_type;
110 	enum bpf_prog_type ptype = prog->type;
111 
112 	return (ptype == BPF_PROG_TYPE_TRACING &&
113 		(eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
114 		 eatype == BPF_MODIFY_RETURN)) ||
115 		(ptype == BPF_PROG_TYPE_LSM && eatype == BPF_LSM_MAC);
116 }
117 
bpf_image_ksym_add(void * data,struct bpf_ksym * ksym)118 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
119 {
120 	ksym->start = (unsigned long) data;
121 	ksym->end = ksym->start + PAGE_SIZE;
122 	bpf_ksym_add(ksym);
123 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
124 			   PAGE_SIZE, false, ksym->name);
125 }
126 
bpf_image_ksym_del(struct bpf_ksym * ksym)127 void bpf_image_ksym_del(struct bpf_ksym *ksym)
128 {
129 	bpf_ksym_del(ksym);
130 	perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
131 			   PAGE_SIZE, true, ksym->name);
132 }
133 
bpf_trampoline_lookup(u64 key)134 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
135 {
136 	struct bpf_trampoline *tr;
137 	struct hlist_head *head;
138 	int i;
139 
140 	mutex_lock(&trampoline_mutex);
141 	head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
142 	hlist_for_each_entry(tr, head, hlist) {
143 		if (tr->key == key) {
144 			refcount_inc(&tr->refcnt);
145 			goto out;
146 		}
147 	}
148 	tr = kzalloc(sizeof(*tr), GFP_KERNEL);
149 	if (!tr)
150 		goto out;
151 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
152 	tr->fops = kzalloc(sizeof(struct ftrace_ops), GFP_KERNEL);
153 	if (!tr->fops) {
154 		kfree(tr);
155 		tr = NULL;
156 		goto out;
157 	}
158 	tr->fops->private = tr;
159 	tr->fops->ops_func = bpf_tramp_ftrace_ops_func;
160 #endif
161 
162 	tr->key = key;
163 	INIT_HLIST_NODE(&tr->hlist);
164 	hlist_add_head(&tr->hlist, head);
165 	refcount_set(&tr->refcnt, 1);
166 	mutex_init(&tr->mutex);
167 	for (i = 0; i < BPF_TRAMP_MAX; i++)
168 		INIT_HLIST_HEAD(&tr->progs_hlist[i]);
169 out:
170 	mutex_unlock(&trampoline_mutex);
171 	return tr;
172 }
173 
unregister_fentry(struct bpf_trampoline * tr,void * old_addr)174 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
175 {
176 	void *ip = tr->func.addr;
177 	int ret;
178 
179 	if (tr->func.ftrace_managed)
180 		ret = unregister_ftrace_direct(tr->fops, (long)old_addr, false);
181 	else
182 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
183 
184 	return ret;
185 }
186 
modify_fentry(struct bpf_trampoline * tr,void * old_addr,void * new_addr,bool lock_direct_mutex)187 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr,
188 			 bool lock_direct_mutex)
189 {
190 	void *ip = tr->func.addr;
191 	int ret;
192 
193 	if (tr->func.ftrace_managed) {
194 		if (lock_direct_mutex)
195 			ret = modify_ftrace_direct(tr->fops, (long)new_addr);
196 		else
197 			ret = modify_ftrace_direct_nolock(tr->fops, (long)new_addr);
198 	} else {
199 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
200 	}
201 	return ret;
202 }
203 
204 /* first time registering */
register_fentry(struct bpf_trampoline * tr,void * new_addr)205 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
206 {
207 	void *ip = tr->func.addr;
208 	unsigned long faddr;
209 	int ret;
210 
211 	faddr = ftrace_location((unsigned long)ip);
212 	if (faddr) {
213 		if (!tr->fops)
214 			return -ENOTSUPP;
215 		tr->func.ftrace_managed = true;
216 	}
217 
218 	if (tr->func.ftrace_managed) {
219 		ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
220 		ret = register_ftrace_direct(tr->fops, (long)new_addr);
221 	} else {
222 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
223 	}
224 
225 	return ret;
226 }
227 
228 static struct bpf_tramp_links *
bpf_trampoline_get_progs(const struct bpf_trampoline * tr,int * total,bool * ip_arg)229 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
230 {
231 	struct bpf_tramp_link *link;
232 	struct bpf_tramp_links *tlinks;
233 	struct bpf_tramp_link **links;
234 	int kind;
235 
236 	*total = 0;
237 	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
238 	if (!tlinks)
239 		return ERR_PTR(-ENOMEM);
240 
241 	for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
242 		tlinks[kind].nr_links = tr->progs_cnt[kind];
243 		*total += tr->progs_cnt[kind];
244 		links = tlinks[kind].links;
245 
246 		hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
247 			*ip_arg |= link->link.prog->call_get_func_ip;
248 			*links++ = link;
249 		}
250 	}
251 	return tlinks;
252 }
253 
bpf_tramp_image_free(struct bpf_tramp_image * im)254 static void bpf_tramp_image_free(struct bpf_tramp_image *im)
255 {
256 	bpf_image_ksym_del(&im->ksym);
257 	bpf_jit_free_exec(im->image);
258 	bpf_jit_uncharge_modmem(PAGE_SIZE);
259 	percpu_ref_exit(&im->pcref);
260 	kfree_rcu(im, rcu);
261 }
262 
__bpf_tramp_image_put_deferred(struct work_struct * work)263 static void __bpf_tramp_image_put_deferred(struct work_struct *work)
264 {
265 	struct bpf_tramp_image *im;
266 
267 	im = container_of(work, struct bpf_tramp_image, work);
268 	bpf_tramp_image_free(im);
269 }
270 
271 /* callback, fexit step 3 or fentry step 2 */
__bpf_tramp_image_put_rcu(struct rcu_head * rcu)272 static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
273 {
274 	struct bpf_tramp_image *im;
275 
276 	im = container_of(rcu, struct bpf_tramp_image, rcu);
277 	INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
278 	schedule_work(&im->work);
279 }
280 
281 /* callback, fexit step 2. Called after percpu_ref_kill confirms. */
__bpf_tramp_image_release(struct percpu_ref * pcref)282 static void __bpf_tramp_image_release(struct percpu_ref *pcref)
283 {
284 	struct bpf_tramp_image *im;
285 
286 	im = container_of(pcref, struct bpf_tramp_image, pcref);
287 	call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
288 }
289 
290 /* callback, fexit or fentry step 1 */
__bpf_tramp_image_put_rcu_tasks(struct rcu_head * rcu)291 static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
292 {
293 	struct bpf_tramp_image *im;
294 
295 	im = container_of(rcu, struct bpf_tramp_image, rcu);
296 	if (im->ip_after_call)
297 		/* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
298 		percpu_ref_kill(&im->pcref);
299 	else
300 		/* the case of fentry trampoline */
301 		call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
302 }
303 
bpf_tramp_image_put(struct bpf_tramp_image * im)304 static void bpf_tramp_image_put(struct bpf_tramp_image *im)
305 {
306 	/* The trampoline image that calls original function is using:
307 	 * rcu_read_lock_trace to protect sleepable bpf progs
308 	 * rcu_read_lock to protect normal bpf progs
309 	 * percpu_ref to protect trampoline itself
310 	 * rcu tasks to protect trampoline asm not covered by percpu_ref
311 	 * (which are few asm insns before __bpf_tramp_enter and
312 	 *  after __bpf_tramp_exit)
313 	 *
314 	 * The trampoline is unreachable before bpf_tramp_image_put().
315 	 *
316 	 * First, patch the trampoline to avoid calling into fexit progs.
317 	 * The progs will be freed even if the original function is still
318 	 * executing or sleeping.
319 	 * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
320 	 * first few asm instructions to execute and call into
321 	 * __bpf_tramp_enter->percpu_ref_get.
322 	 * Then use percpu_ref_kill to wait for the trampoline and the original
323 	 * function to finish.
324 	 * Then use call_rcu_tasks() to make sure few asm insns in
325 	 * the trampoline epilogue are done as well.
326 	 *
327 	 * In !PREEMPT case the task that got interrupted in the first asm
328 	 * insns won't go through an RCU quiescent state which the
329 	 * percpu_ref_kill will be waiting for. Hence the first
330 	 * call_rcu_tasks() is not necessary.
331 	 */
332 	if (im->ip_after_call) {
333 		int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
334 					     NULL, im->ip_epilogue);
335 		WARN_ON(err);
336 		if (IS_ENABLED(CONFIG_PREEMPTION))
337 			call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
338 		else
339 			percpu_ref_kill(&im->pcref);
340 		return;
341 	}
342 
343 	/* The trampoline without fexit and fmod_ret progs doesn't call original
344 	 * function and doesn't use percpu_ref.
345 	 * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
346 	 * Then use call_rcu_tasks() to wait for the rest of trampoline asm
347 	 * and normal progs.
348 	 */
349 	call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
350 }
351 
bpf_tramp_image_alloc(u64 key)352 static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key)
353 {
354 	struct bpf_tramp_image *im;
355 	struct bpf_ksym *ksym;
356 	void *image;
357 	int err = -ENOMEM;
358 
359 	im = kzalloc(sizeof(*im), GFP_KERNEL);
360 	if (!im)
361 		goto out;
362 
363 	err = bpf_jit_charge_modmem(PAGE_SIZE);
364 	if (err)
365 		goto out_free_im;
366 
367 	err = -ENOMEM;
368 	im->image = image = bpf_jit_alloc_exec(PAGE_SIZE);
369 	if (!image)
370 		goto out_uncharge;
371 	set_vm_flush_reset_perms(image);
372 
373 	err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
374 	if (err)
375 		goto out_free_image;
376 
377 	ksym = &im->ksym;
378 	INIT_LIST_HEAD_RCU(&ksym->lnode);
379 	snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", key);
380 	bpf_image_ksym_add(image, ksym);
381 	return im;
382 
383 out_free_image:
384 	bpf_jit_free_exec(im->image);
385 out_uncharge:
386 	bpf_jit_uncharge_modmem(PAGE_SIZE);
387 out_free_im:
388 	kfree(im);
389 out:
390 	return ERR_PTR(err);
391 }
392 
bpf_trampoline_update(struct bpf_trampoline * tr,bool lock_direct_mutex)393 static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mutex)
394 {
395 	struct bpf_tramp_image *im;
396 	struct bpf_tramp_links *tlinks;
397 	u32 orig_flags = tr->flags;
398 	bool ip_arg = false;
399 	int err, total;
400 
401 	tlinks = bpf_trampoline_get_progs(tr, &total, &ip_arg);
402 	if (IS_ERR(tlinks))
403 		return PTR_ERR(tlinks);
404 
405 	if (total == 0) {
406 		err = unregister_fentry(tr, tr->cur_image->image);
407 		bpf_tramp_image_put(tr->cur_image);
408 		tr->cur_image = NULL;
409 		goto out;
410 	}
411 
412 	im = bpf_tramp_image_alloc(tr->key);
413 	if (IS_ERR(im)) {
414 		err = PTR_ERR(im);
415 		goto out;
416 	}
417 
418 	/* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */
419 	tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX);
420 
421 	if (tlinks[BPF_TRAMP_FEXIT].nr_links ||
422 	    tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) {
423 		/* NOTE: BPF_TRAMP_F_RESTORE_REGS and BPF_TRAMP_F_SKIP_FRAME
424 		 * should not be set together.
425 		 */
426 		tr->flags |= BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
427 	} else {
428 		tr->flags |= BPF_TRAMP_F_RESTORE_REGS;
429 	}
430 
431 	if (ip_arg)
432 		tr->flags |= BPF_TRAMP_F_IP_ARG;
433 
434 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
435 again:
436 	if ((tr->flags & BPF_TRAMP_F_SHARE_IPMODIFY) &&
437 	    (tr->flags & BPF_TRAMP_F_CALL_ORIG))
438 		tr->flags |= BPF_TRAMP_F_ORIG_STACK;
439 #endif
440 
441 	err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
442 					  &tr->func.model, tr->flags, tlinks,
443 					  tr->func.addr);
444 	if (err < 0)
445 		goto out_free;
446 
447 	set_memory_rox((long)im->image, 1);
448 
449 	WARN_ON(tr->cur_image && total == 0);
450 	if (tr->cur_image)
451 		/* progs already running at this address */
452 		err = modify_fentry(tr, tr->cur_image->image, im->image, lock_direct_mutex);
453 	else
454 		/* first time registering */
455 		err = register_fentry(tr, im->image);
456 
457 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
458 	if (err == -EAGAIN) {
459 		/* -EAGAIN from bpf_tramp_ftrace_ops_func. Now
460 		 * BPF_TRAMP_F_SHARE_IPMODIFY is set, we can generate the
461 		 * trampoline again, and retry register.
462 		 */
463 		/* reset fops->func and fops->trampoline for re-register */
464 		tr->fops->func = NULL;
465 		tr->fops->trampoline = 0;
466 
467 		/* reset im->image memory attr for arch_prepare_bpf_trampoline */
468 		set_memory_nx((long)im->image, 1);
469 		set_memory_rw((long)im->image, 1);
470 		goto again;
471 	}
472 #endif
473 	if (err)
474 		goto out_free;
475 
476 	if (tr->cur_image)
477 		bpf_tramp_image_put(tr->cur_image);
478 	tr->cur_image = im;
479 out:
480 	/* If any error happens, restore previous flags */
481 	if (err)
482 		tr->flags = orig_flags;
483 	kfree(tlinks);
484 	return err;
485 
486 out_free:
487 	bpf_tramp_image_free(im);
488 	goto out;
489 }
490 
bpf_attach_type_to_tramp(struct bpf_prog * prog)491 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
492 {
493 	switch (prog->expected_attach_type) {
494 	case BPF_TRACE_FENTRY:
495 		return BPF_TRAMP_FENTRY;
496 	case BPF_MODIFY_RETURN:
497 		return BPF_TRAMP_MODIFY_RETURN;
498 	case BPF_TRACE_FEXIT:
499 		return BPF_TRAMP_FEXIT;
500 	case BPF_LSM_MAC:
501 		if (!prog->aux->attach_func_proto->type)
502 			/* The function returns void, we cannot modify its
503 			 * return value.
504 			 */
505 			return BPF_TRAMP_FEXIT;
506 		else
507 			return BPF_TRAMP_MODIFY_RETURN;
508 	default:
509 		return BPF_TRAMP_REPLACE;
510 	}
511 }
512 
bpf_freplace_check_tgt_prog(struct bpf_prog * tgt_prog)513 static int bpf_freplace_check_tgt_prog(struct bpf_prog *tgt_prog)
514 {
515 	struct bpf_prog_aux *aux = tgt_prog->aux;
516 
517 	guard(mutex)(&aux->ext_mutex);
518 	if (aux->prog_array_member_cnt)
519 		/* Program extensions can not extend target prog when the target
520 		 * prog has been updated to any prog_array map as tail callee.
521 		 * It's to prevent a potential infinite loop like:
522 		 * tgt prog entry -> tgt prog subprog -> freplace prog entry
523 		 * --tailcall-> tgt prog entry.
524 		 */
525 		return -EBUSY;
526 
527 	aux->is_extended = true;
528 	return 0;
529 }
530 
__bpf_trampoline_link_prog(struct bpf_tramp_link * link,struct bpf_trampoline * tr,struct bpf_prog * tgt_prog)531 static int __bpf_trampoline_link_prog(struct bpf_tramp_link *link,
532 				      struct bpf_trampoline *tr,
533 				      struct bpf_prog *tgt_prog)
534 {
535 	enum bpf_tramp_prog_type kind;
536 	struct bpf_tramp_link *link_exiting;
537 	int err = 0;
538 	int cnt = 0, i;
539 
540 	kind = bpf_attach_type_to_tramp(link->link.prog);
541 	if (tr->extension_prog)
542 		/* cannot attach fentry/fexit if extension prog is attached.
543 		 * cannot overwrite extension prog either.
544 		 */
545 		return -EBUSY;
546 
547 	for (i = 0; i < BPF_TRAMP_MAX; i++)
548 		cnt += tr->progs_cnt[i];
549 
550 	if (kind == BPF_TRAMP_REPLACE) {
551 		/* Cannot attach extension if fentry/fexit are in use. */
552 		if (cnt)
553 			return -EBUSY;
554 		err = bpf_freplace_check_tgt_prog(tgt_prog);
555 		if (err)
556 			return err;
557 		tr->extension_prog = link->link.prog;
558 		return bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
559 					  link->link.prog->bpf_func);
560 	}
561 	if (cnt >= BPF_MAX_TRAMP_LINKS)
562 		return -E2BIG;
563 	if (!hlist_unhashed(&link->tramp_hlist))
564 		/* prog already linked */
565 		return -EBUSY;
566 	hlist_for_each_entry(link_exiting, &tr->progs_hlist[kind], tramp_hlist) {
567 		if (link_exiting->link.prog != link->link.prog)
568 			continue;
569 		/* prog already linked */
570 		return -EBUSY;
571 	}
572 
573 	hlist_add_head(&link->tramp_hlist, &tr->progs_hlist[kind]);
574 	tr->progs_cnt[kind]++;
575 	err = bpf_trampoline_update(tr, true /* lock_direct_mutex */);
576 	if (err) {
577 		hlist_del_init(&link->tramp_hlist);
578 		tr->progs_cnt[kind]--;
579 	}
580 	return err;
581 }
582 
bpf_trampoline_link_prog(struct bpf_tramp_link * link,struct bpf_trampoline * tr,struct bpf_prog * tgt_prog)583 int bpf_trampoline_link_prog(struct bpf_tramp_link *link,
584 			     struct bpf_trampoline *tr,
585 			     struct bpf_prog *tgt_prog)
586 {
587 	int err;
588 
589 	mutex_lock(&tr->mutex);
590 	err = __bpf_trampoline_link_prog(link, tr, tgt_prog);
591 	mutex_unlock(&tr->mutex);
592 	return err;
593 }
594 
__bpf_trampoline_unlink_prog(struct bpf_tramp_link * link,struct bpf_trampoline * tr,struct bpf_prog * tgt_prog)595 static int __bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
596 					struct bpf_trampoline *tr,
597 					struct bpf_prog *tgt_prog)
598 {
599 	enum bpf_tramp_prog_type kind;
600 	int err;
601 
602 	kind = bpf_attach_type_to_tramp(link->link.prog);
603 	if (kind == BPF_TRAMP_REPLACE) {
604 		WARN_ON_ONCE(!tr->extension_prog);
605 		err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
606 					 tr->extension_prog->bpf_func, NULL);
607 		tr->extension_prog = NULL;
608 		guard(mutex)(&tgt_prog->aux->ext_mutex);
609 		tgt_prog->aux->is_extended = false;
610 		return err;
611 	}
612 	hlist_del_init(&link->tramp_hlist);
613 	tr->progs_cnt[kind]--;
614 	return bpf_trampoline_update(tr, true /* lock_direct_mutex */);
615 }
616 
617 /* bpf_trampoline_unlink_prog() should never fail. */
bpf_trampoline_unlink_prog(struct bpf_tramp_link * link,struct bpf_trampoline * tr,struct bpf_prog * tgt_prog)618 int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
619 			       struct bpf_trampoline *tr,
620 			       struct bpf_prog *tgt_prog)
621 {
622 	int err;
623 
624 	mutex_lock(&tr->mutex);
625 	err = __bpf_trampoline_unlink_prog(link, tr, tgt_prog);
626 	mutex_unlock(&tr->mutex);
627 	return err;
628 }
629 
630 #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
bpf_shim_tramp_link_release(struct bpf_link * link)631 static void bpf_shim_tramp_link_release(struct bpf_link *link)
632 {
633 	struct bpf_shim_tramp_link *shim_link =
634 		container_of(link, struct bpf_shim_tramp_link, link.link);
635 
636 	/* paired with 'shim_link->trampoline = tr' in bpf_trampoline_link_cgroup_shim */
637 	if (!shim_link->trampoline)
638 		return;
639 
640 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link, shim_link->trampoline, NULL));
641 	bpf_trampoline_put(shim_link->trampoline);
642 }
643 
bpf_shim_tramp_link_dealloc(struct bpf_link * link)644 static void bpf_shim_tramp_link_dealloc(struct bpf_link *link)
645 {
646 	struct bpf_shim_tramp_link *shim_link =
647 		container_of(link, struct bpf_shim_tramp_link, link.link);
648 
649 	kfree(shim_link);
650 }
651 
652 static const struct bpf_link_ops bpf_shim_tramp_link_lops = {
653 	.release = bpf_shim_tramp_link_release,
654 	.dealloc = bpf_shim_tramp_link_dealloc,
655 };
656 
cgroup_shim_alloc(const struct bpf_prog * prog,bpf_func_t bpf_func,int cgroup_atype)657 static struct bpf_shim_tramp_link *cgroup_shim_alloc(const struct bpf_prog *prog,
658 						     bpf_func_t bpf_func,
659 						     int cgroup_atype)
660 {
661 	struct bpf_shim_tramp_link *shim_link = NULL;
662 	struct bpf_prog *p;
663 
664 	shim_link = kzalloc(sizeof(*shim_link), GFP_USER);
665 	if (!shim_link)
666 		return NULL;
667 
668 	p = bpf_prog_alloc(1, 0);
669 	if (!p) {
670 		kfree(shim_link);
671 		return NULL;
672 	}
673 
674 	p->jited = false;
675 	p->bpf_func = bpf_func;
676 
677 	p->aux->cgroup_atype = cgroup_atype;
678 	p->aux->attach_func_proto = prog->aux->attach_func_proto;
679 	p->aux->attach_btf_id = prog->aux->attach_btf_id;
680 	p->aux->attach_btf = prog->aux->attach_btf;
681 	btf_get(p->aux->attach_btf);
682 	p->type = BPF_PROG_TYPE_LSM;
683 	p->expected_attach_type = BPF_LSM_MAC;
684 	bpf_prog_inc(p);
685 	bpf_link_init(&shim_link->link.link, BPF_LINK_TYPE_UNSPEC,
686 		      &bpf_shim_tramp_link_lops, p);
687 	bpf_cgroup_atype_get(p->aux->attach_btf_id, cgroup_atype);
688 
689 	return shim_link;
690 }
691 
cgroup_shim_find(struct bpf_trampoline * tr,bpf_func_t bpf_func)692 static struct bpf_shim_tramp_link *cgroup_shim_find(struct bpf_trampoline *tr,
693 						    bpf_func_t bpf_func)
694 {
695 	struct bpf_tramp_link *link;
696 	int kind;
697 
698 	for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
699 		hlist_for_each_entry(link, &tr->progs_hlist[kind], tramp_hlist) {
700 			struct bpf_prog *p = link->link.prog;
701 
702 			if (p->bpf_func == bpf_func)
703 				return container_of(link, struct bpf_shim_tramp_link, link);
704 		}
705 	}
706 
707 	return NULL;
708 }
709 
bpf_trampoline_link_cgroup_shim(struct bpf_prog * prog,int cgroup_atype)710 int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
711 				    int cgroup_atype)
712 {
713 	struct bpf_shim_tramp_link *shim_link = NULL;
714 	struct bpf_attach_target_info tgt_info = {};
715 	struct bpf_trampoline *tr;
716 	bpf_func_t bpf_func;
717 	u64 key;
718 	int err;
719 
720 	err = bpf_check_attach_target(NULL, prog, NULL,
721 				      prog->aux->attach_btf_id,
722 				      &tgt_info);
723 	if (err)
724 		return err;
725 
726 	key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
727 					 prog->aux->attach_btf_id);
728 
729 	bpf_lsm_find_cgroup_shim(prog, &bpf_func);
730 	tr = bpf_trampoline_get(key, &tgt_info);
731 	if (!tr)
732 		return  -ENOMEM;
733 
734 	mutex_lock(&tr->mutex);
735 
736 	shim_link = cgroup_shim_find(tr, bpf_func);
737 	if (shim_link) {
738 		/* Reusing existing shim attached by the other program. */
739 		bpf_link_inc(&shim_link->link.link);
740 
741 		mutex_unlock(&tr->mutex);
742 		bpf_trampoline_put(tr); /* bpf_trampoline_get above */
743 		return 0;
744 	}
745 
746 	/* Allocate and install new shim. */
747 
748 	shim_link = cgroup_shim_alloc(prog, bpf_func, cgroup_atype);
749 	if (!shim_link) {
750 		err = -ENOMEM;
751 		goto err;
752 	}
753 
754 	err = __bpf_trampoline_link_prog(&shim_link->link, tr, NULL);
755 	if (err)
756 		goto err;
757 
758 	shim_link->trampoline = tr;
759 	/* note, we're still holding tr refcnt from above */
760 
761 	mutex_unlock(&tr->mutex);
762 
763 	return 0;
764 err:
765 	mutex_unlock(&tr->mutex);
766 
767 	if (shim_link)
768 		bpf_link_put(&shim_link->link.link);
769 
770 	/* have to release tr while _not_ holding its mutex */
771 	bpf_trampoline_put(tr); /* bpf_trampoline_get above */
772 
773 	return err;
774 }
775 
bpf_trampoline_unlink_cgroup_shim(struct bpf_prog * prog)776 void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
777 {
778 	struct bpf_shim_tramp_link *shim_link = NULL;
779 	struct bpf_trampoline *tr;
780 	bpf_func_t bpf_func;
781 	u64 key;
782 
783 	key = bpf_trampoline_compute_key(NULL, prog->aux->attach_btf,
784 					 prog->aux->attach_btf_id);
785 
786 	bpf_lsm_find_cgroup_shim(prog, &bpf_func);
787 	tr = bpf_trampoline_lookup(key);
788 	if (WARN_ON_ONCE(!tr))
789 		return;
790 
791 	mutex_lock(&tr->mutex);
792 	shim_link = cgroup_shim_find(tr, bpf_func);
793 	mutex_unlock(&tr->mutex);
794 
795 	if (shim_link)
796 		bpf_link_put(&shim_link->link.link);
797 
798 	bpf_trampoline_put(tr); /* bpf_trampoline_lookup above */
799 }
800 #endif
801 
bpf_trampoline_get(u64 key,struct bpf_attach_target_info * tgt_info)802 struct bpf_trampoline *bpf_trampoline_get(u64 key,
803 					  struct bpf_attach_target_info *tgt_info)
804 {
805 	struct bpf_trampoline *tr;
806 
807 	tr = bpf_trampoline_lookup(key);
808 	if (!tr)
809 		return NULL;
810 
811 	mutex_lock(&tr->mutex);
812 	if (tr->func.addr)
813 		goto out;
814 
815 	memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
816 	tr->func.addr = (void *)tgt_info->tgt_addr;
817 out:
818 	mutex_unlock(&tr->mutex);
819 	return tr;
820 }
821 
bpf_trampoline_put(struct bpf_trampoline * tr)822 void bpf_trampoline_put(struct bpf_trampoline *tr)
823 {
824 	int i;
825 
826 	if (!tr)
827 		return;
828 	mutex_lock(&trampoline_mutex);
829 	if (!refcount_dec_and_test(&tr->refcnt))
830 		goto out;
831 	WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
832 
833 	for (i = 0; i < BPF_TRAMP_MAX; i++)
834 		if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[i])))
835 			goto out;
836 
837 	/* This code will be executed even when the last bpf_tramp_image
838 	 * is alive. All progs are detached from the trampoline and the
839 	 * trampoline image is patched with jmp into epilogue to skip
840 	 * fexit progs. The fentry-only trampoline will be freed via
841 	 * multiple rcu callbacks.
842 	 */
843 	hlist_del(&tr->hlist);
844 	if (tr->fops) {
845 		ftrace_free_filter(tr->fops);
846 		kfree(tr->fops);
847 	}
848 	kfree(tr);
849 out:
850 	mutex_unlock(&trampoline_mutex);
851 }
852 
853 #define NO_START_TIME 1
bpf_prog_start_time(void)854 static __always_inline u64 notrace bpf_prog_start_time(void)
855 {
856 	u64 start = NO_START_TIME;
857 
858 	if (static_branch_unlikely(&bpf_stats_enabled_key)) {
859 		start = sched_clock();
860 		if (unlikely(!start))
861 			start = NO_START_TIME;
862 	}
863 	return start;
864 }
865 
866 /* The logic is similar to bpf_prog_run(), but with an explicit
867  * rcu_read_lock() and migrate_disable() which are required
868  * for the trampoline. The macro is split into
869  * call __bpf_prog_enter
870  * call prog->bpf_func
871  * call __bpf_prog_exit
872  *
873  * __bpf_prog_enter returns:
874  * 0 - skip execution of the bpf prog
875  * 1 - execute bpf prog
876  * [2..MAX_U64] - execute bpf prog and record execution time.
877  *     This is start time.
878  */
__bpf_prog_enter_recur(struct bpf_prog * prog,struct bpf_tramp_run_ctx * run_ctx)879 static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx)
880 	__acquires(RCU)
881 {
882 	rcu_read_lock();
883 	migrate_disable();
884 
885 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
886 
887 	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
888 		bpf_prog_inc_misses_counter(prog);
889 		return 0;
890 	}
891 	return bpf_prog_start_time();
892 }
893 
update_prog_stats(struct bpf_prog * prog,u64 start)894 static void notrace update_prog_stats(struct bpf_prog *prog,
895 				      u64 start)
896 {
897 	struct bpf_prog_stats *stats;
898 
899 	if (static_branch_unlikely(&bpf_stats_enabled_key) &&
900 	    /* static_key could be enabled in __bpf_prog_enter*
901 	     * and disabled in __bpf_prog_exit*.
902 	     * And vice versa.
903 	     * Hence check that 'start' is valid.
904 	     */
905 	    start > NO_START_TIME) {
906 		unsigned long flags;
907 
908 		stats = this_cpu_ptr(prog->stats);
909 		flags = u64_stats_update_begin_irqsave(&stats->syncp);
910 		u64_stats_inc(&stats->cnt);
911 		u64_stats_add(&stats->nsecs, sched_clock() - start);
912 		u64_stats_update_end_irqrestore(&stats->syncp, flags);
913 	}
914 }
915 
__bpf_prog_exit_recur(struct bpf_prog * prog,u64 start,struct bpf_tramp_run_ctx * run_ctx)916 static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
917 					  struct bpf_tramp_run_ctx *run_ctx)
918 	__releases(RCU)
919 {
920 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
921 
922 	update_prog_stats(prog, start);
923 	this_cpu_dec(*(prog->active));
924 	migrate_enable();
925 	rcu_read_unlock();
926 }
927 
__bpf_prog_enter_lsm_cgroup(struct bpf_prog * prog,struct bpf_tramp_run_ctx * run_ctx)928 static u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
929 					       struct bpf_tramp_run_ctx *run_ctx)
930 	__acquires(RCU)
931 {
932 	/* Runtime stats are exported via actual BPF_LSM_CGROUP
933 	 * programs, not the shims.
934 	 */
935 	rcu_read_lock();
936 	migrate_disable();
937 
938 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
939 
940 	return NO_START_TIME;
941 }
942 
__bpf_prog_exit_lsm_cgroup(struct bpf_prog * prog,u64 start,struct bpf_tramp_run_ctx * run_ctx)943 static void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
944 					       struct bpf_tramp_run_ctx *run_ctx)
945 	__releases(RCU)
946 {
947 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
948 
949 	migrate_enable();
950 	rcu_read_unlock();
951 }
952 
__bpf_prog_enter_sleepable_recur(struct bpf_prog * prog,struct bpf_tramp_run_ctx * run_ctx)953 u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
954 					     struct bpf_tramp_run_ctx *run_ctx)
955 {
956 	rcu_read_lock_trace();
957 	migrate_disable();
958 	might_fault();
959 
960 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
961 
962 	if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
963 		bpf_prog_inc_misses_counter(prog);
964 		return 0;
965 	}
966 	return bpf_prog_start_time();
967 }
968 
__bpf_prog_exit_sleepable_recur(struct bpf_prog * prog,u64 start,struct bpf_tramp_run_ctx * run_ctx)969 void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
970 					     struct bpf_tramp_run_ctx *run_ctx)
971 {
972 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
973 
974 	update_prog_stats(prog, start);
975 	this_cpu_dec(*(prog->active));
976 	migrate_enable();
977 	rcu_read_unlock_trace();
978 }
979 
__bpf_prog_enter_sleepable(struct bpf_prog * prog,struct bpf_tramp_run_ctx * run_ctx)980 static u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog,
981 					      struct bpf_tramp_run_ctx *run_ctx)
982 {
983 	rcu_read_lock_trace();
984 	migrate_disable();
985 	might_fault();
986 
987 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
988 
989 	return bpf_prog_start_time();
990 }
991 
__bpf_prog_exit_sleepable(struct bpf_prog * prog,u64 start,struct bpf_tramp_run_ctx * run_ctx)992 static void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
993 					      struct bpf_tramp_run_ctx *run_ctx)
994 {
995 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
996 
997 	update_prog_stats(prog, start);
998 	migrate_enable();
999 	rcu_read_unlock_trace();
1000 }
1001 
__bpf_prog_enter(struct bpf_prog * prog,struct bpf_tramp_run_ctx * run_ctx)1002 static u64 notrace __bpf_prog_enter(struct bpf_prog *prog,
1003 				    struct bpf_tramp_run_ctx *run_ctx)
1004 	__acquires(RCU)
1005 {
1006 	rcu_read_lock();
1007 	migrate_disable();
1008 
1009 	run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
1010 
1011 	return bpf_prog_start_time();
1012 }
1013 
__bpf_prog_exit(struct bpf_prog * prog,u64 start,struct bpf_tramp_run_ctx * run_ctx)1014 static void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start,
1015 				    struct bpf_tramp_run_ctx *run_ctx)
1016 	__releases(RCU)
1017 {
1018 	bpf_reset_run_ctx(run_ctx->saved_run_ctx);
1019 
1020 	update_prog_stats(prog, start);
1021 	migrate_enable();
1022 	rcu_read_unlock();
1023 }
1024 
__bpf_tramp_enter(struct bpf_tramp_image * tr)1025 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
1026 {
1027 	percpu_ref_get(&tr->pcref);
1028 }
1029 
__bpf_tramp_exit(struct bpf_tramp_image * tr)1030 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
1031 {
1032 	percpu_ref_put(&tr->pcref);
1033 }
1034 
bpf_trampoline_enter(const struct bpf_prog * prog)1035 bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
1036 {
1037 	bool sleepable = prog->aux->sleepable;
1038 
1039 	if (bpf_prog_check_recur(prog))
1040 		return sleepable ? __bpf_prog_enter_sleepable_recur :
1041 			__bpf_prog_enter_recur;
1042 
1043 	if (resolve_prog_type(prog) == BPF_PROG_TYPE_LSM &&
1044 	    prog->expected_attach_type == BPF_LSM_CGROUP)
1045 		return __bpf_prog_enter_lsm_cgroup;
1046 
1047 	return sleepable ? __bpf_prog_enter_sleepable : __bpf_prog_enter;
1048 }
1049 
bpf_trampoline_exit(const struct bpf_prog * prog)1050 bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog)
1051 {
1052 	bool sleepable = prog->aux->sleepable;
1053 
1054 	if (bpf_prog_check_recur(prog))
1055 		return sleepable ? __bpf_prog_exit_sleepable_recur :
1056 			__bpf_prog_exit_recur;
1057 
1058 	if (resolve_prog_type(prog) == BPF_PROG_TYPE_LSM &&
1059 	    prog->expected_attach_type == BPF_LSM_CGROUP)
1060 		return __bpf_prog_exit_lsm_cgroup;
1061 
1062 	return sleepable ? __bpf_prog_exit_sleepable : __bpf_prog_exit;
1063 }
1064 
1065 int __weak
arch_prepare_bpf_trampoline(struct bpf_tramp_image * tr,void * image,void * image_end,const struct btf_func_model * m,u32 flags,struct bpf_tramp_links * tlinks,void * orig_call)1066 arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
1067 			    const struct btf_func_model *m, u32 flags,
1068 			    struct bpf_tramp_links *tlinks,
1069 			    void *orig_call)
1070 {
1071 	return -ENOTSUPP;
1072 }
1073 
init_trampolines(void)1074 static int __init init_trampolines(void)
1075 {
1076 	int i;
1077 
1078 	for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
1079 		INIT_HLIST_HEAD(&trampoline_table[i]);
1080 	return 0;
1081 }
1082 late_initcall(init_trampolines);
1083