• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/btf.h>
6 #include <linux/bpf-cgroup.h>
7 #include <linux/cgroup.h>
8 #include <linux/rcupdate.h>
9 #include <linux/random.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/ktime.h>
13 #include <linux/sched.h>
14 #include <linux/uidgid.h>
15 #include <linux/filter.h>
16 #include <linux/ctype.h>
17 #include <linux/jiffies.h>
18 #include <linux/pid_namespace.h>
19 #include <linux/poison.h>
20 #include <linux/proc_ns.h>
21 #include <linux/sched/task.h>
22 #include <linux/security.h>
23 #include <linux/btf_ids.h>
24 #include <linux/bpf_mem_alloc.h>
25 #include <linux/kasan.h>
26 
27 #include "../../lib/kstrtox.h"
28 
29 /* If kernel subsystem is allowing eBPF programs to call this function,
30  * inside its own verifier_ops->get_func_proto() callback it should return
31  * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
32  *
33  * Different map implementations will rely on rcu in map methods
34  * lookup/update/delete, therefore eBPF programs must run under rcu lock
35  * if program is allowed to access maps, so check rcu_read_lock_held() or
36  * rcu_read_lock_trace_held() in all three functions.
37  */
BPF_CALL_2(bpf_map_lookup_elem,struct bpf_map *,map,void *,key)38 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
39 {
40 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
41 		     !rcu_read_lock_bh_held());
42 	return (unsigned long) map->ops->map_lookup_elem(map, key);
43 }
44 
45 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
46 	.func		= bpf_map_lookup_elem,
47 	.gpl_only	= false,
48 	.pkt_access	= true,
49 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
50 	.arg1_type	= ARG_CONST_MAP_PTR,
51 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
52 };
53 
BPF_CALL_4(bpf_map_update_elem,struct bpf_map *,map,void *,key,void *,value,u64,flags)54 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
55 	   void *, value, u64, flags)
56 {
57 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
58 		     !rcu_read_lock_bh_held());
59 	return map->ops->map_update_elem(map, key, value, flags);
60 }
61 
62 const struct bpf_func_proto bpf_map_update_elem_proto = {
63 	.func		= bpf_map_update_elem,
64 	.gpl_only	= false,
65 	.pkt_access	= true,
66 	.ret_type	= RET_INTEGER,
67 	.arg1_type	= ARG_CONST_MAP_PTR,
68 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
69 	.arg3_type	= ARG_PTR_TO_MAP_VALUE,
70 	.arg4_type	= ARG_ANYTHING,
71 };
72 
BPF_CALL_2(bpf_map_delete_elem,struct bpf_map *,map,void *,key)73 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
74 {
75 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
76 		     !rcu_read_lock_bh_held());
77 	return map->ops->map_delete_elem(map, key);
78 }
79 
80 const struct bpf_func_proto bpf_map_delete_elem_proto = {
81 	.func		= bpf_map_delete_elem,
82 	.gpl_only	= false,
83 	.pkt_access	= true,
84 	.ret_type	= RET_INTEGER,
85 	.arg1_type	= ARG_CONST_MAP_PTR,
86 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
87 };
88 
BPF_CALL_3(bpf_map_push_elem,struct bpf_map *,map,void *,value,u64,flags)89 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
90 {
91 	return map->ops->map_push_elem(map, value, flags);
92 }
93 
94 const struct bpf_func_proto bpf_map_push_elem_proto = {
95 	.func		= bpf_map_push_elem,
96 	.gpl_only	= false,
97 	.pkt_access	= true,
98 	.ret_type	= RET_INTEGER,
99 	.arg1_type	= ARG_CONST_MAP_PTR,
100 	.arg2_type	= ARG_PTR_TO_MAP_VALUE,
101 	.arg3_type	= ARG_ANYTHING,
102 };
103 
BPF_CALL_2(bpf_map_pop_elem,struct bpf_map *,map,void *,value)104 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
105 {
106 	return map->ops->map_pop_elem(map, value);
107 }
108 
109 const struct bpf_func_proto bpf_map_pop_elem_proto = {
110 	.func		= bpf_map_pop_elem,
111 	.gpl_only	= false,
112 	.ret_type	= RET_INTEGER,
113 	.arg1_type	= ARG_CONST_MAP_PTR,
114 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
115 };
116 
BPF_CALL_2(bpf_map_peek_elem,struct bpf_map *,map,void *,value)117 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
118 {
119 	return map->ops->map_peek_elem(map, value);
120 }
121 
122 const struct bpf_func_proto bpf_map_peek_elem_proto = {
123 	.func		= bpf_map_peek_elem,
124 	.gpl_only	= false,
125 	.ret_type	= RET_INTEGER,
126 	.arg1_type	= ARG_CONST_MAP_PTR,
127 	.arg2_type	= ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
128 };
129 
BPF_CALL_3(bpf_map_lookup_percpu_elem,struct bpf_map *,map,void *,key,u32,cpu)130 BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
131 {
132 	WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
133 		     !rcu_read_lock_bh_held());
134 	return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu);
135 }
136 
137 const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = {
138 	.func		= bpf_map_lookup_percpu_elem,
139 	.gpl_only	= false,
140 	.pkt_access	= true,
141 	.ret_type	= RET_PTR_TO_MAP_VALUE_OR_NULL,
142 	.arg1_type	= ARG_CONST_MAP_PTR,
143 	.arg2_type	= ARG_PTR_TO_MAP_KEY,
144 	.arg3_type	= ARG_ANYTHING,
145 };
146 
147 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
148 	.func		= bpf_user_rnd_u32,
149 	.gpl_only	= false,
150 	.ret_type	= RET_INTEGER,
151 };
152 
BPF_CALL_0(bpf_get_smp_processor_id)153 BPF_CALL_0(bpf_get_smp_processor_id)
154 {
155 	return smp_processor_id();
156 }
157 
158 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
159 	.func		= bpf_get_smp_processor_id,
160 	.gpl_only	= false,
161 	.ret_type	= RET_INTEGER,
162 	.allow_fastcall	= true,
163 };
164 
BPF_CALL_0(bpf_get_numa_node_id)165 BPF_CALL_0(bpf_get_numa_node_id)
166 {
167 	return numa_node_id();
168 }
169 
170 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
171 	.func		= bpf_get_numa_node_id,
172 	.gpl_only	= false,
173 	.ret_type	= RET_INTEGER,
174 };
175 
BPF_CALL_0(bpf_ktime_get_ns)176 BPF_CALL_0(bpf_ktime_get_ns)
177 {
178 	/* NMI safe access to clock monotonic */
179 	return ktime_get_mono_fast_ns();
180 }
181 
182 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
183 	.func		= bpf_ktime_get_ns,
184 	.gpl_only	= false,
185 	.ret_type	= RET_INTEGER,
186 };
187 
BPF_CALL_0(bpf_ktime_get_boot_ns)188 BPF_CALL_0(bpf_ktime_get_boot_ns)
189 {
190 	/* NMI safe access to clock boottime */
191 	return ktime_get_boot_fast_ns();
192 }
193 
194 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
195 	.func		= bpf_ktime_get_boot_ns,
196 	.gpl_only	= false,
197 	.ret_type	= RET_INTEGER,
198 };
199 
BPF_CALL_0(bpf_ktime_get_coarse_ns)200 BPF_CALL_0(bpf_ktime_get_coarse_ns)
201 {
202 	return ktime_get_coarse_ns();
203 }
204 
205 const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = {
206 	.func		= bpf_ktime_get_coarse_ns,
207 	.gpl_only	= false,
208 	.ret_type	= RET_INTEGER,
209 };
210 
BPF_CALL_0(bpf_ktime_get_tai_ns)211 BPF_CALL_0(bpf_ktime_get_tai_ns)
212 {
213 	/* NMI safe access to clock tai */
214 	return ktime_get_tai_fast_ns();
215 }
216 
217 const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = {
218 	.func		= bpf_ktime_get_tai_ns,
219 	.gpl_only	= false,
220 	.ret_type	= RET_INTEGER,
221 };
222 
BPF_CALL_0(bpf_get_current_pid_tgid)223 BPF_CALL_0(bpf_get_current_pid_tgid)
224 {
225 	struct task_struct *task = current;
226 
227 	if (unlikely(!task))
228 		return -EINVAL;
229 
230 	return (u64) task->tgid << 32 | task->pid;
231 }
232 
233 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
234 	.func		= bpf_get_current_pid_tgid,
235 	.gpl_only	= false,
236 	.ret_type	= RET_INTEGER,
237 };
238 
BPF_CALL_0(bpf_get_current_uid_gid)239 BPF_CALL_0(bpf_get_current_uid_gid)
240 {
241 	struct task_struct *task = current;
242 	kuid_t uid;
243 	kgid_t gid;
244 
245 	if (unlikely(!task))
246 		return -EINVAL;
247 
248 	current_uid_gid(&uid, &gid);
249 	return (u64) from_kgid(&init_user_ns, gid) << 32 |
250 		     from_kuid(&init_user_ns, uid);
251 }
252 
253 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
254 	.func		= bpf_get_current_uid_gid,
255 	.gpl_only	= false,
256 	.ret_type	= RET_INTEGER,
257 };
258 
BPF_CALL_2(bpf_get_current_comm,char *,buf,u32,size)259 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
260 {
261 	struct task_struct *task = current;
262 
263 	if (unlikely(!task))
264 		goto err_clear;
265 
266 	/* Verifier guarantees that size > 0 */
267 	strscpy_pad(buf, task->comm, size);
268 	return 0;
269 err_clear:
270 	memset(buf, 0, size);
271 	return -EINVAL;
272 }
273 
274 const struct bpf_func_proto bpf_get_current_comm_proto = {
275 	.func		= bpf_get_current_comm,
276 	.gpl_only	= false,
277 	.ret_type	= RET_INTEGER,
278 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
279 	.arg2_type	= ARG_CONST_SIZE,
280 };
281 
282 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
283 
__bpf_spin_lock(struct bpf_spin_lock * lock)284 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
285 {
286 	arch_spinlock_t *l = (void *)lock;
287 	union {
288 		__u32 val;
289 		arch_spinlock_t lock;
290 	} u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
291 
292 	compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
293 	BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
294 	BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
295 	preempt_disable();
296 	arch_spin_lock(l);
297 }
298 
__bpf_spin_unlock(struct bpf_spin_lock * lock)299 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
300 {
301 	arch_spinlock_t *l = (void *)lock;
302 
303 	arch_spin_unlock(l);
304 	preempt_enable();
305 }
306 
307 #else
308 
__bpf_spin_lock(struct bpf_spin_lock * lock)309 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
310 {
311 	atomic_t *l = (void *)lock;
312 
313 	BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
314 	do {
315 		atomic_cond_read_relaxed(l, !VAL);
316 	} while (atomic_xchg(l, 1));
317 }
318 
__bpf_spin_unlock(struct bpf_spin_lock * lock)319 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
320 {
321 	atomic_t *l = (void *)lock;
322 
323 	atomic_set_release(l, 0);
324 }
325 
326 #endif
327 
328 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
329 
__bpf_spin_lock_irqsave(struct bpf_spin_lock * lock)330 static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
331 {
332 	unsigned long flags;
333 
334 	local_irq_save(flags);
335 	__bpf_spin_lock(lock);
336 	__this_cpu_write(irqsave_flags, flags);
337 }
338 
NOTRACE_BPF_CALL_1(bpf_spin_lock,struct bpf_spin_lock *,lock)339 NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
340 {
341 	__bpf_spin_lock_irqsave(lock);
342 	return 0;
343 }
344 
345 const struct bpf_func_proto bpf_spin_lock_proto = {
346 	.func		= bpf_spin_lock,
347 	.gpl_only	= false,
348 	.ret_type	= RET_VOID,
349 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
350 	.arg1_btf_id    = BPF_PTR_POISON,
351 };
352 
__bpf_spin_unlock_irqrestore(struct bpf_spin_lock * lock)353 static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
354 {
355 	unsigned long flags;
356 
357 	flags = __this_cpu_read(irqsave_flags);
358 	__bpf_spin_unlock(lock);
359 	local_irq_restore(flags);
360 }
361 
NOTRACE_BPF_CALL_1(bpf_spin_unlock,struct bpf_spin_lock *,lock)362 NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
363 {
364 	__bpf_spin_unlock_irqrestore(lock);
365 	return 0;
366 }
367 
368 const struct bpf_func_proto bpf_spin_unlock_proto = {
369 	.func		= bpf_spin_unlock,
370 	.gpl_only	= false,
371 	.ret_type	= RET_VOID,
372 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
373 	.arg1_btf_id    = BPF_PTR_POISON,
374 };
375 
copy_map_value_locked(struct bpf_map * map,void * dst,void * src,bool lock_src)376 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
377 			   bool lock_src)
378 {
379 	struct bpf_spin_lock *lock;
380 
381 	if (lock_src)
382 		lock = src + map->record->spin_lock_off;
383 	else
384 		lock = dst + map->record->spin_lock_off;
385 	preempt_disable();
386 	__bpf_spin_lock_irqsave(lock);
387 	copy_map_value(map, dst, src);
388 	__bpf_spin_unlock_irqrestore(lock);
389 	preempt_enable();
390 }
391 
BPF_CALL_0(bpf_jiffies64)392 BPF_CALL_0(bpf_jiffies64)
393 {
394 	return get_jiffies_64();
395 }
396 
397 const struct bpf_func_proto bpf_jiffies64_proto = {
398 	.func		= bpf_jiffies64,
399 	.gpl_only	= false,
400 	.ret_type	= RET_INTEGER,
401 };
402 
403 #ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)404 BPF_CALL_0(bpf_get_current_cgroup_id)
405 {
406 	struct cgroup *cgrp;
407 	u64 cgrp_id;
408 
409 	rcu_read_lock();
410 	cgrp = task_dfl_cgroup(current);
411 	cgrp_id = cgroup_id(cgrp);
412 	rcu_read_unlock();
413 
414 	return cgrp_id;
415 }
416 
417 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
418 	.func		= bpf_get_current_cgroup_id,
419 	.gpl_only	= false,
420 	.ret_type	= RET_INTEGER,
421 };
422 
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id,int,ancestor_level)423 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
424 {
425 	struct cgroup *cgrp;
426 	struct cgroup *ancestor;
427 	u64 cgrp_id;
428 
429 	rcu_read_lock();
430 	cgrp = task_dfl_cgroup(current);
431 	ancestor = cgroup_ancestor(cgrp, ancestor_level);
432 	cgrp_id = ancestor ? cgroup_id(ancestor) : 0;
433 	rcu_read_unlock();
434 
435 	return cgrp_id;
436 }
437 
438 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
439 	.func		= bpf_get_current_ancestor_cgroup_id,
440 	.gpl_only	= false,
441 	.ret_type	= RET_INTEGER,
442 	.arg1_type	= ARG_ANYTHING,
443 };
444 #endif /* CONFIG_CGROUPS */
445 
446 #define BPF_STRTOX_BASE_MASK 0x1F
447 
__bpf_strtoull(const char * buf,size_t buf_len,u64 flags,unsigned long long * res,bool * is_negative)448 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
449 			  unsigned long long *res, bool *is_negative)
450 {
451 	unsigned int base = flags & BPF_STRTOX_BASE_MASK;
452 	const char *cur_buf = buf;
453 	size_t cur_len = buf_len;
454 	unsigned int consumed;
455 	size_t val_len;
456 	char str[64];
457 
458 	if (!buf || !buf_len || !res || !is_negative)
459 		return -EINVAL;
460 
461 	if (base != 0 && base != 8 && base != 10 && base != 16)
462 		return -EINVAL;
463 
464 	if (flags & ~BPF_STRTOX_BASE_MASK)
465 		return -EINVAL;
466 
467 	while (cur_buf < buf + buf_len && isspace(*cur_buf))
468 		++cur_buf;
469 
470 	*is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
471 	if (*is_negative)
472 		++cur_buf;
473 
474 	consumed = cur_buf - buf;
475 	cur_len -= consumed;
476 	if (!cur_len)
477 		return -EINVAL;
478 
479 	cur_len = min(cur_len, sizeof(str) - 1);
480 	memcpy(str, cur_buf, cur_len);
481 	str[cur_len] = '\0';
482 	cur_buf = str;
483 
484 	cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
485 	val_len = _parse_integer(cur_buf, base, res);
486 
487 	if (val_len & KSTRTOX_OVERFLOW)
488 		return -ERANGE;
489 
490 	if (val_len == 0)
491 		return -EINVAL;
492 
493 	cur_buf += val_len;
494 	consumed += cur_buf - str;
495 
496 	return consumed;
497 }
498 
__bpf_strtoll(const char * buf,size_t buf_len,u64 flags,long long * res)499 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
500 			 long long *res)
501 {
502 	unsigned long long _res;
503 	bool is_negative;
504 	int err;
505 
506 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
507 	if (err < 0)
508 		return err;
509 	if (is_negative) {
510 		if ((long long)-_res > 0)
511 			return -ERANGE;
512 		*res = -_res;
513 	} else {
514 		if ((long long)_res < 0)
515 			return -ERANGE;
516 		*res = _res;
517 	}
518 	return err;
519 }
520 
BPF_CALL_4(bpf_strtol,const char *,buf,size_t,buf_len,u64,flags,s64 *,res)521 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
522 	   s64 *, res)
523 {
524 	long long _res;
525 	int err;
526 
527 	*res = 0;
528 	err = __bpf_strtoll(buf, buf_len, flags, &_res);
529 	if (err < 0)
530 		return err;
531 	*res = _res;
532 	return err;
533 }
534 
535 const struct bpf_func_proto bpf_strtol_proto = {
536 	.func		= bpf_strtol,
537 	.gpl_only	= false,
538 	.ret_type	= RET_INTEGER,
539 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
540 	.arg2_type	= ARG_CONST_SIZE,
541 	.arg3_type	= ARG_ANYTHING,
542 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
543 	.arg4_size	= sizeof(s64),
544 };
545 
BPF_CALL_4(bpf_strtoul,const char *,buf,size_t,buf_len,u64,flags,u64 *,res)546 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
547 	   u64 *, res)
548 {
549 	unsigned long long _res;
550 	bool is_negative;
551 	int err;
552 
553 	*res = 0;
554 	err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
555 	if (err < 0)
556 		return err;
557 	if (is_negative)
558 		return -EINVAL;
559 	*res = _res;
560 	return err;
561 }
562 
563 const struct bpf_func_proto bpf_strtoul_proto = {
564 	.func		= bpf_strtoul,
565 	.gpl_only	= false,
566 	.ret_type	= RET_INTEGER,
567 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
568 	.arg2_type	= ARG_CONST_SIZE,
569 	.arg3_type	= ARG_ANYTHING,
570 	.arg4_type	= ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
571 	.arg4_size	= sizeof(u64),
572 };
573 
BPF_CALL_3(bpf_strncmp,const char *,s1,u32,s1_sz,const char *,s2)574 BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2)
575 {
576 	return strncmp(s1, s2, s1_sz);
577 }
578 
579 static const struct bpf_func_proto bpf_strncmp_proto = {
580 	.func		= bpf_strncmp,
581 	.gpl_only	= false,
582 	.ret_type	= RET_INTEGER,
583 	.arg1_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
584 	.arg2_type	= ARG_CONST_SIZE,
585 	.arg3_type	= ARG_PTR_TO_CONST_STR,
586 };
587 
BPF_CALL_4(bpf_get_ns_current_pid_tgid,u64,dev,u64,ino,struct bpf_pidns_info *,nsdata,u32,size)588 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
589 	   struct bpf_pidns_info *, nsdata, u32, size)
590 {
591 	struct task_struct *task = current;
592 	struct pid_namespace *pidns;
593 	int err = -EINVAL;
594 
595 	if (unlikely(size != sizeof(struct bpf_pidns_info)))
596 		goto clear;
597 
598 	if (unlikely((u64)(dev_t)dev != dev))
599 		goto clear;
600 
601 	if (unlikely(!task))
602 		goto clear;
603 
604 	pidns = task_active_pid_ns(task);
605 	if (unlikely(!pidns)) {
606 		err = -ENOENT;
607 		goto clear;
608 	}
609 
610 	if (!ns_match(&pidns->ns, (dev_t)dev, ino))
611 		goto clear;
612 
613 	nsdata->pid = task_pid_nr_ns(task, pidns);
614 	nsdata->tgid = task_tgid_nr_ns(task, pidns);
615 	return 0;
616 clear:
617 	memset((void *)nsdata, 0, (size_t) size);
618 	return err;
619 }
620 
621 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
622 	.func		= bpf_get_ns_current_pid_tgid,
623 	.gpl_only	= false,
624 	.ret_type	= RET_INTEGER,
625 	.arg1_type	= ARG_ANYTHING,
626 	.arg2_type	= ARG_ANYTHING,
627 	.arg3_type      = ARG_PTR_TO_UNINIT_MEM,
628 	.arg4_type      = ARG_CONST_SIZE,
629 };
630 
631 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
632 	.func		= bpf_get_raw_cpu_id,
633 	.gpl_only	= false,
634 	.ret_type	= RET_INTEGER,
635 };
636 
BPF_CALL_5(bpf_event_output_data,void *,ctx,struct bpf_map *,map,u64,flags,void *,data,u64,size)637 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
638 	   u64, flags, void *, data, u64, size)
639 {
640 	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
641 		return -EINVAL;
642 
643 	return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
644 }
645 
646 const struct bpf_func_proto bpf_event_output_data_proto =  {
647 	.func		= bpf_event_output_data,
648 	.gpl_only       = true,
649 	.ret_type       = RET_INTEGER,
650 	.arg1_type      = ARG_PTR_TO_CTX,
651 	.arg2_type      = ARG_CONST_MAP_PTR,
652 	.arg3_type      = ARG_ANYTHING,
653 	.arg4_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
654 	.arg5_type      = ARG_CONST_SIZE_OR_ZERO,
655 };
656 
BPF_CALL_3(bpf_copy_from_user,void *,dst,u32,size,const void __user *,user_ptr)657 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
658 	   const void __user *, user_ptr)
659 {
660 	int ret = copy_from_user(dst, user_ptr, size);
661 
662 	if (unlikely(ret)) {
663 		memset(dst, 0, size);
664 		ret = -EFAULT;
665 	}
666 
667 	return ret;
668 }
669 
670 const struct bpf_func_proto bpf_copy_from_user_proto = {
671 	.func		= bpf_copy_from_user,
672 	.gpl_only	= false,
673 	.might_sleep	= true,
674 	.ret_type	= RET_INTEGER,
675 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
676 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
677 	.arg3_type	= ARG_ANYTHING,
678 };
679 
BPF_CALL_5(bpf_copy_from_user_task,void *,dst,u32,size,const void __user *,user_ptr,struct task_struct *,tsk,u64,flags)680 BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size,
681 	   const void __user *, user_ptr, struct task_struct *, tsk, u64, flags)
682 {
683 	int ret;
684 
685 	/* flags is not used yet */
686 	if (unlikely(flags))
687 		return -EINVAL;
688 
689 	if (unlikely(!size))
690 		return 0;
691 
692 	ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0);
693 	if (ret == size)
694 		return 0;
695 
696 	memset(dst, 0, size);
697 	/* Return -EFAULT for partial read */
698 	return ret < 0 ? ret : -EFAULT;
699 }
700 
701 const struct bpf_func_proto bpf_copy_from_user_task_proto = {
702 	.func		= bpf_copy_from_user_task,
703 	.gpl_only	= true,
704 	.might_sleep	= true,
705 	.ret_type	= RET_INTEGER,
706 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
707 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
708 	.arg3_type	= ARG_ANYTHING,
709 	.arg4_type	= ARG_PTR_TO_BTF_ID,
710 	.arg4_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
711 	.arg5_type	= ARG_ANYTHING
712 };
713 
BPF_CALL_2(bpf_per_cpu_ptr,const void *,ptr,u32,cpu)714 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
715 {
716 	if (cpu >= nr_cpu_ids)
717 		return (unsigned long)NULL;
718 
719 	return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
720 }
721 
722 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
723 	.func		= bpf_per_cpu_ptr,
724 	.gpl_only	= false,
725 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
726 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
727 	.arg2_type	= ARG_ANYTHING,
728 };
729 
BPF_CALL_1(bpf_this_cpu_ptr,const void *,percpu_ptr)730 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
731 {
732 	return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
733 }
734 
735 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
736 	.func		= bpf_this_cpu_ptr,
737 	.gpl_only	= false,
738 	.ret_type	= RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
739 	.arg1_type	= ARG_PTR_TO_PERCPU_BTF_ID,
740 };
741 
bpf_trace_copy_string(char * buf,void * unsafe_ptr,char fmt_ptype,size_t bufsz)742 static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
743 		size_t bufsz)
744 {
745 	void __user *user_ptr = (__force void __user *)unsafe_ptr;
746 
747 	buf[0] = 0;
748 
749 	switch (fmt_ptype) {
750 	case 's':
751 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
752 		if ((unsigned long)unsafe_ptr < TASK_SIZE)
753 			return strncpy_from_user_nofault(buf, user_ptr, bufsz);
754 		fallthrough;
755 #endif
756 	case 'k':
757 		return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz);
758 	case 'u':
759 		return strncpy_from_user_nofault(buf, user_ptr, bufsz);
760 	}
761 
762 	return -EINVAL;
763 }
764 
765 /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary
766  * arguments representation.
767  */
768 #define MAX_BPRINTF_BIN_ARGS	512
769 
770 /* Support executing three nested bprintf helper calls on a given CPU */
771 #define MAX_BPRINTF_NEST_LEVEL	3
772 struct bpf_bprintf_buffers {
773 	char bin_args[MAX_BPRINTF_BIN_ARGS];
774 	char buf[MAX_BPRINTF_BUF];
775 };
776 
777 static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs);
778 static DEFINE_PER_CPU(int, bpf_bprintf_nest_level);
779 
try_get_buffers(struct bpf_bprintf_buffers ** bufs)780 static int try_get_buffers(struct bpf_bprintf_buffers **bufs)
781 {
782 	int nest_level;
783 
784 	preempt_disable();
785 	nest_level = this_cpu_inc_return(bpf_bprintf_nest_level);
786 	if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) {
787 		this_cpu_dec(bpf_bprintf_nest_level);
788 		preempt_enable();
789 		return -EBUSY;
790 	}
791 	*bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]);
792 
793 	return 0;
794 }
795 
bpf_bprintf_cleanup(struct bpf_bprintf_data * data)796 void bpf_bprintf_cleanup(struct bpf_bprintf_data *data)
797 {
798 	if (!data->bin_args && !data->buf)
799 		return;
800 	if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0))
801 		return;
802 	this_cpu_dec(bpf_bprintf_nest_level);
803 	preempt_enable();
804 }
805 
806 /*
807  * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers
808  *
809  * Returns a negative value if fmt is an invalid format string or 0 otherwise.
810  *
811  * This can be used in two ways:
812  * - Format string verification only: when data->get_bin_args is false
813  * - Arguments preparation: in addition to the above verification, it writes in
814  *   data->bin_args a binary representation of arguments usable by bstr_printf
815  *   where pointers from BPF have been sanitized.
816  *
817  * In argument preparation mode, if 0 is returned, safe temporary buffers are
818  * allocated and bpf_bprintf_cleanup should be called to free them after use.
819  */
bpf_bprintf_prepare(char * fmt,u32 fmt_size,const u64 * raw_args,u32 num_args,struct bpf_bprintf_data * data)820 int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args,
821 			u32 num_args, struct bpf_bprintf_data *data)
822 {
823 	bool get_buffers = (data->get_bin_args && num_args) || data->get_buf;
824 	char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end;
825 	struct bpf_bprintf_buffers *buffers = NULL;
826 	size_t sizeof_cur_arg, sizeof_cur_ip;
827 	int err, i, num_spec = 0;
828 	u64 cur_arg;
829 	char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX";
830 
831 	fmt_end = strnchr(fmt, fmt_size, 0);
832 	if (!fmt_end)
833 		return -EINVAL;
834 	fmt_size = fmt_end - fmt;
835 
836 	if (get_buffers && try_get_buffers(&buffers))
837 		return -EBUSY;
838 
839 	if (data->get_bin_args) {
840 		if (num_args)
841 			tmp_buf = buffers->bin_args;
842 		tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS;
843 		data->bin_args = (u32 *)tmp_buf;
844 	}
845 
846 	if (data->get_buf)
847 		data->buf = buffers->buf;
848 
849 	for (i = 0; i < fmt_size; i++) {
850 		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) {
851 			err = -EINVAL;
852 			goto out;
853 		}
854 
855 		if (fmt[i] != '%')
856 			continue;
857 
858 		if (fmt[i + 1] == '%') {
859 			i++;
860 			continue;
861 		}
862 
863 		if (num_spec >= num_args) {
864 			err = -EINVAL;
865 			goto out;
866 		}
867 
868 		/* The string is zero-terminated so if fmt[i] != 0, we can
869 		 * always access fmt[i + 1], in the worst case it will be a 0
870 		 */
871 		i++;
872 
873 		/* skip optional "[0 +-][num]" width formatting field */
874 		while (fmt[i] == '0' || fmt[i] == '+'  || fmt[i] == '-' ||
875 		       fmt[i] == ' ')
876 			i++;
877 		if (fmt[i] >= '1' && fmt[i] <= '9') {
878 			i++;
879 			while (fmt[i] >= '0' && fmt[i] <= '9')
880 				i++;
881 		}
882 
883 		if (fmt[i] == 'p') {
884 			sizeof_cur_arg = sizeof(long);
885 
886 			if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) ||
887 			    ispunct(fmt[i + 1])) {
888 				if (tmp_buf)
889 					cur_arg = raw_args[num_spec];
890 				goto nocopy_fmt;
891 			}
892 
893 			if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') &&
894 			    fmt[i + 2] == 's') {
895 				fmt_ptype = fmt[i + 1];
896 				i += 2;
897 				goto fmt_str;
898 			}
899 
900 			if (fmt[i + 1] == 'K' ||
901 			    fmt[i + 1] == 'x' || fmt[i + 1] == 's' ||
902 			    fmt[i + 1] == 'S') {
903 				if (tmp_buf)
904 					cur_arg = raw_args[num_spec];
905 				i++;
906 				goto nocopy_fmt;
907 			}
908 
909 			if (fmt[i + 1] == 'B') {
910 				if (tmp_buf)  {
911 					err = snprintf(tmp_buf,
912 						       (tmp_buf_end - tmp_buf),
913 						       "%pB",
914 						       (void *)(long)raw_args[num_spec]);
915 					tmp_buf += (err + 1);
916 				}
917 
918 				i++;
919 				num_spec++;
920 				continue;
921 			}
922 
923 			/* only support "%pI4", "%pi4", "%pI6" and "%pi6". */
924 			if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') ||
925 			    (fmt[i + 2] != '4' && fmt[i + 2] != '6')) {
926 				err = -EINVAL;
927 				goto out;
928 			}
929 
930 			i += 2;
931 			if (!tmp_buf)
932 				goto nocopy_fmt;
933 
934 			sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16;
935 			if (tmp_buf_end - tmp_buf < sizeof_cur_ip) {
936 				err = -ENOSPC;
937 				goto out;
938 			}
939 
940 			unsafe_ptr = (char *)(long)raw_args[num_spec];
941 			err = copy_from_kernel_nofault(cur_ip, unsafe_ptr,
942 						       sizeof_cur_ip);
943 			if (err < 0)
944 				memset(cur_ip, 0, sizeof_cur_ip);
945 
946 			/* hack: bstr_printf expects IP addresses to be
947 			 * pre-formatted as strings, ironically, the easiest way
948 			 * to do that is to call snprintf.
949 			 */
950 			ip_spec[2] = fmt[i - 1];
951 			ip_spec[3] = fmt[i];
952 			err = snprintf(tmp_buf, tmp_buf_end - tmp_buf,
953 				       ip_spec, &cur_ip);
954 
955 			tmp_buf += err + 1;
956 			num_spec++;
957 
958 			continue;
959 		} else if (fmt[i] == 's') {
960 			fmt_ptype = fmt[i];
961 fmt_str:
962 			if (fmt[i + 1] != 0 &&
963 			    !isspace(fmt[i + 1]) &&
964 			    !ispunct(fmt[i + 1])) {
965 				err = -EINVAL;
966 				goto out;
967 			}
968 
969 			if (!tmp_buf)
970 				goto nocopy_fmt;
971 
972 			if (tmp_buf_end == tmp_buf) {
973 				err = -ENOSPC;
974 				goto out;
975 			}
976 
977 			unsafe_ptr = (char *)(long)raw_args[num_spec];
978 			err = bpf_trace_copy_string(tmp_buf, unsafe_ptr,
979 						    fmt_ptype,
980 						    tmp_buf_end - tmp_buf);
981 			if (err < 0) {
982 				tmp_buf[0] = '\0';
983 				err = 1;
984 			}
985 
986 			tmp_buf += err;
987 			num_spec++;
988 
989 			continue;
990 		} else if (fmt[i] == 'c') {
991 			if (!tmp_buf)
992 				goto nocopy_fmt;
993 
994 			if (tmp_buf_end == tmp_buf) {
995 				err = -ENOSPC;
996 				goto out;
997 			}
998 
999 			*tmp_buf = raw_args[num_spec];
1000 			tmp_buf++;
1001 			num_spec++;
1002 
1003 			continue;
1004 		}
1005 
1006 		sizeof_cur_arg = sizeof(int);
1007 
1008 		if (fmt[i] == 'l') {
1009 			sizeof_cur_arg = sizeof(long);
1010 			i++;
1011 		}
1012 		if (fmt[i] == 'l') {
1013 			sizeof_cur_arg = sizeof(long long);
1014 			i++;
1015 		}
1016 
1017 		if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' &&
1018 		    fmt[i] != 'x' && fmt[i] != 'X') {
1019 			err = -EINVAL;
1020 			goto out;
1021 		}
1022 
1023 		if (tmp_buf)
1024 			cur_arg = raw_args[num_spec];
1025 nocopy_fmt:
1026 		if (tmp_buf) {
1027 			tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32));
1028 			if (tmp_buf_end - tmp_buf < sizeof_cur_arg) {
1029 				err = -ENOSPC;
1030 				goto out;
1031 			}
1032 
1033 			if (sizeof_cur_arg == 8) {
1034 				*(u32 *)tmp_buf = *(u32 *)&cur_arg;
1035 				*(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1);
1036 			} else {
1037 				*(u32 *)tmp_buf = (u32)(long)cur_arg;
1038 			}
1039 			tmp_buf += sizeof_cur_arg;
1040 		}
1041 		num_spec++;
1042 	}
1043 
1044 	err = 0;
1045 out:
1046 	if (err)
1047 		bpf_bprintf_cleanup(data);
1048 	return err;
1049 }
1050 
BPF_CALL_5(bpf_snprintf,char *,str,u32,str_size,char *,fmt,const void *,args,u32,data_len)1051 BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt,
1052 	   const void *, args, u32, data_len)
1053 {
1054 	struct bpf_bprintf_data data = {
1055 		.get_bin_args	= true,
1056 	};
1057 	int err, num_args;
1058 
1059 	if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 ||
1060 	    (data_len && !args))
1061 		return -EINVAL;
1062 	num_args = data_len / 8;
1063 
1064 	/* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we
1065 	 * can safely give an unbounded size.
1066 	 */
1067 	err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data);
1068 	if (err < 0)
1069 		return err;
1070 
1071 	err = bstr_printf(str, str_size, fmt, data.bin_args);
1072 
1073 	bpf_bprintf_cleanup(&data);
1074 
1075 	return err + 1;
1076 }
1077 
1078 const struct bpf_func_proto bpf_snprintf_proto = {
1079 	.func		= bpf_snprintf,
1080 	.gpl_only	= true,
1081 	.ret_type	= RET_INTEGER,
1082 	.arg1_type	= ARG_PTR_TO_MEM_OR_NULL,
1083 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1084 	.arg3_type	= ARG_PTR_TO_CONST_STR,
1085 	.arg4_type	= ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
1086 	.arg5_type	= ARG_CONST_SIZE_OR_ZERO,
1087 };
1088 
1089 struct bpf_async_cb {
1090 	struct bpf_map *map;
1091 	struct bpf_prog *prog;
1092 	void __rcu *callback_fn;
1093 	void *value;
1094 	union {
1095 		struct rcu_head rcu;
1096 		struct work_struct delete_work;
1097 	};
1098 	u64 flags;
1099 };
1100 
1101 /* BPF map elements can contain 'struct bpf_timer'.
1102  * Such map owns all of its BPF timers.
1103  * 'struct bpf_timer' is allocated as part of map element allocation
1104  * and it's zero initialized.
1105  * That space is used to keep 'struct bpf_async_kern'.
1106  * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and
1107  * remembers 'struct bpf_map *' pointer it's part of.
1108  * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn.
1109  * bpf_timer_start() arms the timer.
1110  * If user space reference to a map goes to zero at this point
1111  * ops->map_release_uref callback is responsible for cancelling the timers,
1112  * freeing their memory, and decrementing prog's refcnts.
1113  * bpf_timer_cancel() cancels the timer and decrements prog's refcnt.
1114  * Inner maps can contain bpf timers as well. ops->map_release_uref is
1115  * freeing the timers when inner map is replaced or deleted by user space.
1116  */
1117 struct bpf_hrtimer {
1118 	struct bpf_async_cb cb;
1119 	struct hrtimer timer;
1120 	atomic_t cancelling;
1121 };
1122 
1123 struct bpf_work {
1124 	struct bpf_async_cb cb;
1125 	struct work_struct work;
1126 	struct work_struct delete_work;
1127 };
1128 
1129 /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */
1130 struct bpf_async_kern {
1131 	union {
1132 		struct bpf_async_cb *cb;
1133 		struct bpf_hrtimer *timer;
1134 		struct bpf_work *work;
1135 	};
1136 	/* bpf_spin_lock is used here instead of spinlock_t to make
1137 	 * sure that it always fits into space reserved by struct bpf_timer
1138 	 * regardless of LOCKDEP and spinlock debug flags.
1139 	 */
1140 	struct bpf_spin_lock lock;
1141 } __attribute__((aligned(8)));
1142 
1143 enum bpf_async_type {
1144 	BPF_ASYNC_TYPE_TIMER = 0,
1145 	BPF_ASYNC_TYPE_WQ,
1146 };
1147 
1148 static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running);
1149 
bpf_timer_cb(struct hrtimer * hrtimer)1150 static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer)
1151 {
1152 	struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer);
1153 	struct bpf_map *map = t->cb.map;
1154 	void *value = t->cb.value;
1155 	bpf_callback_t callback_fn;
1156 	void *key;
1157 	u32 idx;
1158 
1159 	BTF_TYPE_EMIT(struct bpf_timer);
1160 	callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held());
1161 	if (!callback_fn)
1162 		goto out;
1163 
1164 	/* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and
1165 	 * cannot be preempted by another bpf_timer_cb() on the same cpu.
1166 	 * Remember the timer this callback is servicing to prevent
1167 	 * deadlock if callback_fn() calls bpf_timer_cancel() or
1168 	 * bpf_map_delete_elem() on the same timer.
1169 	 */
1170 	this_cpu_write(hrtimer_running, t);
1171 	if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1172 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1173 
1174 		/* compute the key */
1175 		idx = ((char *)value - array->value) / array->elem_size;
1176 		key = &idx;
1177 	} else { /* hash or lru */
1178 		key = value - round_up(map->key_size, 8);
1179 	}
1180 
1181 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1182 	/* The verifier checked that return value is zero. */
1183 
1184 	this_cpu_write(hrtimer_running, NULL);
1185 out:
1186 	return HRTIMER_NORESTART;
1187 }
1188 
bpf_wq_work(struct work_struct * work)1189 static void bpf_wq_work(struct work_struct *work)
1190 {
1191 	struct bpf_work *w = container_of(work, struct bpf_work, work);
1192 	struct bpf_async_cb *cb = &w->cb;
1193 	struct bpf_map *map = cb->map;
1194 	bpf_callback_t callback_fn;
1195 	void *value = cb->value;
1196 	void *key;
1197 	u32 idx;
1198 
1199 	BTF_TYPE_EMIT(struct bpf_wq);
1200 
1201 	callback_fn = READ_ONCE(cb->callback_fn);
1202 	if (!callback_fn)
1203 		return;
1204 
1205 	if (map->map_type == BPF_MAP_TYPE_ARRAY) {
1206 		struct bpf_array *array = container_of(map, struct bpf_array, map);
1207 
1208 		/* compute the key */
1209 		idx = ((char *)value - array->value) / array->elem_size;
1210 		key = &idx;
1211 	} else { /* hash or lru */
1212 		key = value - round_up(map->key_size, 8);
1213 	}
1214 
1215         rcu_read_lock_trace();
1216         migrate_disable();
1217 
1218 	callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0);
1219 
1220 	migrate_enable();
1221 	rcu_read_unlock_trace();
1222 }
1223 
bpf_wq_delete_work(struct work_struct * work)1224 static void bpf_wq_delete_work(struct work_struct *work)
1225 {
1226 	struct bpf_work *w = container_of(work, struct bpf_work, delete_work);
1227 
1228 	cancel_work_sync(&w->work);
1229 
1230 	kfree_rcu(w, cb.rcu);
1231 }
1232 
bpf_timer_delete_work(struct work_struct * work)1233 static void bpf_timer_delete_work(struct work_struct *work)
1234 {
1235 	struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work);
1236 
1237 	/* Cancel the timer and wait for callback to complete if it was running.
1238 	 * If hrtimer_cancel() can be safely called it's safe to call
1239 	 * kfree_rcu(t) right after for both preallocated and non-preallocated
1240 	 * maps.  The async->cb = NULL was already done and no code path can see
1241 	 * address 't' anymore. Timer if armed for existing bpf_hrtimer before
1242 	 * bpf_timer_cancel_and_free will have been cancelled.
1243 	 */
1244 	hrtimer_cancel(&t->timer);
1245 	kfree_rcu(t, cb.rcu);
1246 }
1247 
__bpf_async_init(struct bpf_async_kern * async,struct bpf_map * map,u64 flags,enum bpf_async_type type)1248 static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags,
1249 			    enum bpf_async_type type)
1250 {
1251 	struct bpf_async_cb *cb;
1252 	struct bpf_hrtimer *t;
1253 	struct bpf_work *w;
1254 	clockid_t clockid;
1255 	size_t size;
1256 	int ret = 0;
1257 
1258 	if (in_nmi())
1259 		return -EOPNOTSUPP;
1260 
1261 	switch (type) {
1262 	case BPF_ASYNC_TYPE_TIMER:
1263 		size = sizeof(struct bpf_hrtimer);
1264 		break;
1265 	case BPF_ASYNC_TYPE_WQ:
1266 		size = sizeof(struct bpf_work);
1267 		break;
1268 	default:
1269 		return -EINVAL;
1270 	}
1271 
1272 	__bpf_spin_lock_irqsave(&async->lock);
1273 	t = async->timer;
1274 	if (t) {
1275 		ret = -EBUSY;
1276 		goto out;
1277 	}
1278 
1279 	/* Allocate via bpf_map_kmalloc_node() for memcg accounting. Until
1280 	 * kmalloc_nolock() is available, avoid locking issues by using
1281 	 * __GFP_HIGH (GFP_ATOMIC & ~__GFP_RECLAIM).
1282 	 */
1283 	cb = bpf_map_kmalloc_node(map, size, __GFP_HIGH, map->numa_node);
1284 	if (!cb) {
1285 		ret = -ENOMEM;
1286 		goto out;
1287 	}
1288 
1289 	switch (type) {
1290 	case BPF_ASYNC_TYPE_TIMER:
1291 		clockid = flags & (MAX_CLOCKS - 1);
1292 		t = (struct bpf_hrtimer *)cb;
1293 
1294 		atomic_set(&t->cancelling, 0);
1295 		INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work);
1296 		hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
1297 		t->timer.function = bpf_timer_cb;
1298 		cb->value = (void *)async - map->record->timer_off;
1299 		break;
1300 	case BPF_ASYNC_TYPE_WQ:
1301 		w = (struct bpf_work *)cb;
1302 
1303 		INIT_WORK(&w->work, bpf_wq_work);
1304 		INIT_WORK(&w->delete_work, bpf_wq_delete_work);
1305 		cb->value = (void *)async - map->record->wq_off;
1306 		break;
1307 	}
1308 	cb->map = map;
1309 	cb->prog = NULL;
1310 	cb->flags = flags;
1311 	rcu_assign_pointer(cb->callback_fn, NULL);
1312 
1313 	WRITE_ONCE(async->cb, cb);
1314 	/* Guarantee the order between async->cb and map->usercnt. So
1315 	 * when there are concurrent uref release and bpf timer init, either
1316 	 * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
1317 	 * timer or atomic64_read() below returns a zero usercnt.
1318 	 */
1319 	smp_mb();
1320 	if (!atomic64_read(&map->usercnt)) {
1321 		/* maps with timers must be either held by user space
1322 		 * or pinned in bpffs.
1323 		 */
1324 		WRITE_ONCE(async->cb, NULL);
1325 		kfree(cb);
1326 		ret = -EPERM;
1327 	}
1328 out:
1329 	__bpf_spin_unlock_irqrestore(&async->lock);
1330 	return ret;
1331 }
1332 
BPF_CALL_3(bpf_timer_init,struct bpf_async_kern *,timer,struct bpf_map *,map,u64,flags)1333 BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map,
1334 	   u64, flags)
1335 {
1336 	clock_t clockid = flags & (MAX_CLOCKS - 1);
1337 
1338 	BUILD_BUG_ON(MAX_CLOCKS != 16);
1339 	BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer));
1340 	BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer));
1341 
1342 	if (flags >= MAX_CLOCKS ||
1343 	    /* similar to timerfd except _ALARM variants are not supported */
1344 	    (clockid != CLOCK_MONOTONIC &&
1345 	     clockid != CLOCK_REALTIME &&
1346 	     clockid != CLOCK_BOOTTIME))
1347 		return -EINVAL;
1348 
1349 	return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER);
1350 }
1351 
1352 static const struct bpf_func_proto bpf_timer_init_proto = {
1353 	.func		= bpf_timer_init,
1354 	.gpl_only	= true,
1355 	.ret_type	= RET_INTEGER,
1356 	.arg1_type	= ARG_PTR_TO_TIMER,
1357 	.arg2_type	= ARG_CONST_MAP_PTR,
1358 	.arg3_type	= ARG_ANYTHING,
1359 };
1360 
__bpf_async_set_callback(struct bpf_async_kern * async,void * callback_fn,struct bpf_prog_aux * aux,unsigned int flags,enum bpf_async_type type)1361 static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn,
1362 				    struct bpf_prog_aux *aux, unsigned int flags,
1363 				    enum bpf_async_type type)
1364 {
1365 	struct bpf_prog *prev, *prog = aux->prog;
1366 	struct bpf_async_cb *cb;
1367 	int ret = 0;
1368 
1369 	if (in_nmi())
1370 		return -EOPNOTSUPP;
1371 	__bpf_spin_lock_irqsave(&async->lock);
1372 	cb = async->cb;
1373 	if (!cb) {
1374 		ret = -EINVAL;
1375 		goto out;
1376 	}
1377 	if (!atomic64_read(&cb->map->usercnt)) {
1378 		/* maps with timers must be either held by user space
1379 		 * or pinned in bpffs. Otherwise timer might still be
1380 		 * running even when bpf prog is detached and user space
1381 		 * is gone, since map_release_uref won't ever be called.
1382 		 */
1383 		ret = -EPERM;
1384 		goto out;
1385 	}
1386 	prev = cb->prog;
1387 	if (prev != prog) {
1388 		/* Bump prog refcnt once. Every bpf_timer_set_callback()
1389 		 * can pick different callback_fn-s within the same prog.
1390 		 */
1391 		prog = bpf_prog_inc_not_zero(prog);
1392 		if (IS_ERR(prog)) {
1393 			ret = PTR_ERR(prog);
1394 			goto out;
1395 		}
1396 		if (prev)
1397 			/* Drop prev prog refcnt when swapping with new prog */
1398 			bpf_prog_put(prev);
1399 		cb->prog = prog;
1400 	}
1401 	rcu_assign_pointer(cb->callback_fn, callback_fn);
1402 out:
1403 	__bpf_spin_unlock_irqrestore(&async->lock);
1404 	return ret;
1405 }
1406 
BPF_CALL_3(bpf_timer_set_callback,struct bpf_async_kern *,timer,void *,callback_fn,struct bpf_prog_aux *,aux)1407 BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn,
1408 	   struct bpf_prog_aux *, aux)
1409 {
1410 	return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER);
1411 }
1412 
1413 static const struct bpf_func_proto bpf_timer_set_callback_proto = {
1414 	.func		= bpf_timer_set_callback,
1415 	.gpl_only	= true,
1416 	.ret_type	= RET_INTEGER,
1417 	.arg1_type	= ARG_PTR_TO_TIMER,
1418 	.arg2_type	= ARG_PTR_TO_FUNC,
1419 };
1420 
BPF_CALL_3(bpf_timer_start,struct bpf_async_kern *,timer,u64,nsecs,u64,flags)1421 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags)
1422 {
1423 	struct bpf_hrtimer *t;
1424 	int ret = 0;
1425 	enum hrtimer_mode mode;
1426 
1427 	if (in_nmi())
1428 		return -EOPNOTSUPP;
1429 	if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN))
1430 		return -EINVAL;
1431 	__bpf_spin_lock_irqsave(&timer->lock);
1432 	t = timer->timer;
1433 	if (!t || !t->cb.prog) {
1434 		ret = -EINVAL;
1435 		goto out;
1436 	}
1437 
1438 	if (flags & BPF_F_TIMER_ABS)
1439 		mode = HRTIMER_MODE_ABS_SOFT;
1440 	else
1441 		mode = HRTIMER_MODE_REL_SOFT;
1442 
1443 	if (flags & BPF_F_TIMER_CPU_PIN)
1444 		mode |= HRTIMER_MODE_PINNED;
1445 
1446 	hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
1447 out:
1448 	__bpf_spin_unlock_irqrestore(&timer->lock);
1449 	return ret;
1450 }
1451 
1452 static const struct bpf_func_proto bpf_timer_start_proto = {
1453 	.func		= bpf_timer_start,
1454 	.gpl_only	= true,
1455 	.ret_type	= RET_INTEGER,
1456 	.arg1_type	= ARG_PTR_TO_TIMER,
1457 	.arg2_type	= ARG_ANYTHING,
1458 	.arg3_type	= ARG_ANYTHING,
1459 };
1460 
drop_prog_refcnt(struct bpf_async_cb * async)1461 static void drop_prog_refcnt(struct bpf_async_cb *async)
1462 {
1463 	struct bpf_prog *prog = async->prog;
1464 
1465 	if (prog) {
1466 		bpf_prog_put(prog);
1467 		async->prog = NULL;
1468 		rcu_assign_pointer(async->callback_fn, NULL);
1469 	}
1470 }
1471 
BPF_CALL_1(bpf_timer_cancel,struct bpf_async_kern *,timer)1472 BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer)
1473 {
1474 	struct bpf_hrtimer *t, *cur_t;
1475 	bool inc = false;
1476 	int ret = 0;
1477 
1478 	if (in_nmi())
1479 		return -EOPNOTSUPP;
1480 	rcu_read_lock();
1481 	__bpf_spin_lock_irqsave(&timer->lock);
1482 	t = timer->timer;
1483 	if (!t) {
1484 		ret = -EINVAL;
1485 		goto out;
1486 	}
1487 
1488 	cur_t = this_cpu_read(hrtimer_running);
1489 	if (cur_t == t) {
1490 		/* If bpf callback_fn is trying to bpf_timer_cancel()
1491 		 * its own timer the hrtimer_cancel() will deadlock
1492 		 * since it waits for callback_fn to finish.
1493 		 */
1494 		ret = -EDEADLK;
1495 		goto out;
1496 	}
1497 
1498 	/* Only account in-flight cancellations when invoked from a timer
1499 	 * callback, since we want to avoid waiting only if other _callbacks_
1500 	 * are waiting on us, to avoid introducing lockups. Non-callback paths
1501 	 * are ok, since nobody would synchronously wait for their completion.
1502 	 */
1503 	if (!cur_t)
1504 		goto drop;
1505 	atomic_inc(&t->cancelling);
1506 	/* Need full barrier after relaxed atomic_inc */
1507 	smp_mb__after_atomic();
1508 	inc = true;
1509 	if (atomic_read(&cur_t->cancelling)) {
1510 		/* We're cancelling timer t, while some other timer callback is
1511 		 * attempting to cancel us. In such a case, it might be possible
1512 		 * that timer t belongs to the other callback, or some other
1513 		 * callback waiting upon it (creating transitive dependencies
1514 		 * upon us), and we will enter a deadlock if we continue
1515 		 * cancelling and waiting for it synchronously, since it might
1516 		 * do the same. Bail!
1517 		 */
1518 		ret = -EDEADLK;
1519 		goto out;
1520 	}
1521 drop:
1522 	drop_prog_refcnt(&t->cb);
1523 out:
1524 	__bpf_spin_unlock_irqrestore(&timer->lock);
1525 	/* Cancel the timer and wait for associated callback to finish
1526 	 * if it was running.
1527 	 */
1528 	ret = ret ?: hrtimer_cancel(&t->timer);
1529 	if (inc)
1530 		atomic_dec(&t->cancelling);
1531 	rcu_read_unlock();
1532 	return ret;
1533 }
1534 
1535 static const struct bpf_func_proto bpf_timer_cancel_proto = {
1536 	.func		= bpf_timer_cancel,
1537 	.gpl_only	= true,
1538 	.ret_type	= RET_INTEGER,
1539 	.arg1_type	= ARG_PTR_TO_TIMER,
1540 };
1541 
__bpf_async_cancel_and_free(struct bpf_async_kern * async)1542 static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async)
1543 {
1544 	struct bpf_async_cb *cb;
1545 
1546 	/* Performance optimization: read async->cb without lock first. */
1547 	if (!READ_ONCE(async->cb))
1548 		return NULL;
1549 
1550 	__bpf_spin_lock_irqsave(&async->lock);
1551 	/* re-read it under lock */
1552 	cb = async->cb;
1553 	if (!cb)
1554 		goto out;
1555 	drop_prog_refcnt(cb);
1556 	/* The subsequent bpf_timer_start/cancel() helpers won't be able to use
1557 	 * this timer, since it won't be initialized.
1558 	 */
1559 	WRITE_ONCE(async->cb, NULL);
1560 out:
1561 	__bpf_spin_unlock_irqrestore(&async->lock);
1562 	return cb;
1563 }
1564 
1565 /* This function is called by map_delete/update_elem for individual element and
1566  * by ops->map_release_uref when the user space reference to a map reaches zero.
1567  */
bpf_timer_cancel_and_free(void * val)1568 void bpf_timer_cancel_and_free(void *val)
1569 {
1570 	struct bpf_hrtimer *t;
1571 
1572 	t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val);
1573 
1574 	if (!t)
1575 		return;
1576 	/* We check that bpf_map_delete/update_elem() was called from timer
1577 	 * callback_fn. In such case we don't call hrtimer_cancel() (since it
1578 	 * will deadlock) and don't call hrtimer_try_to_cancel() (since it will
1579 	 * just return -1). Though callback_fn is still running on this cpu it's
1580 	 * safe to do kfree(t) because bpf_timer_cb() read everything it needed
1581 	 * from 't'. The bpf subprog callback_fn won't be able to access 't',
1582 	 * since async->cb = NULL was already done. The timer will be
1583 	 * effectively cancelled because bpf_timer_cb() will return
1584 	 * HRTIMER_NORESTART.
1585 	 *
1586 	 * However, it is possible the timer callback_fn calling us armed the
1587 	 * timer _before_ calling us, such that failing to cancel it here will
1588 	 * cause it to possibly use struct hrtimer after freeing bpf_hrtimer.
1589 	 * Therefore, we _need_ to cancel any outstanding timers before we do
1590 	 * kfree_rcu, even though no more timers can be armed.
1591 	 *
1592 	 * Moreover, we need to schedule work even if timer does not belong to
1593 	 * the calling callback_fn, as on two different CPUs, we can end up in a
1594 	 * situation where both sides run in parallel, try to cancel one
1595 	 * another, and we end up waiting on both sides in hrtimer_cancel
1596 	 * without making forward progress, since timer1 depends on time2
1597 	 * callback to finish, and vice versa.
1598 	 *
1599 	 *  CPU 1 (timer1_cb)			CPU 2 (timer2_cb)
1600 	 *  bpf_timer_cancel_and_free(timer2)	bpf_timer_cancel_and_free(timer1)
1601 	 *
1602 	 * To avoid these issues, punt to workqueue context when we are in a
1603 	 * timer callback.
1604 	 */
1605 	if (this_cpu_read(hrtimer_running)) {
1606 		queue_work(system_unbound_wq, &t->cb.delete_work);
1607 		return;
1608 	}
1609 
1610 	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1611 		/* If the timer is running on other CPU, also use a kworker to
1612 		 * wait for the completion of the timer instead of trying to
1613 		 * acquire a sleepable lock in hrtimer_cancel() to wait for its
1614 		 * completion.
1615 		 */
1616 		if (hrtimer_try_to_cancel(&t->timer) >= 0)
1617 			kfree_rcu(t, cb.rcu);
1618 		else
1619 			queue_work(system_unbound_wq, &t->cb.delete_work);
1620 	} else {
1621 		bpf_timer_delete_work(&t->cb.delete_work);
1622 	}
1623 }
1624 
1625 /* This function is called by map_delete/update_elem for individual element and
1626  * by ops->map_release_uref when the user space reference to a map reaches zero.
1627  */
bpf_wq_cancel_and_free(void * val)1628 void bpf_wq_cancel_and_free(void *val)
1629 {
1630 	struct bpf_work *work;
1631 
1632 	BTF_TYPE_EMIT(struct bpf_wq);
1633 
1634 	work = (struct bpf_work *)__bpf_async_cancel_and_free(val);
1635 	if (!work)
1636 		return;
1637 	/* Trigger cancel of the sleepable work, but *do not* wait for
1638 	 * it to finish if it was running as we might not be in a
1639 	 * sleepable context.
1640 	 * kfree will be called once the work has finished.
1641 	 */
1642 	schedule_work(&work->delete_work);
1643 }
1644 
BPF_CALL_2(bpf_kptr_xchg,void *,dst,void *,ptr)1645 BPF_CALL_2(bpf_kptr_xchg, void *, dst, void *, ptr)
1646 {
1647 	unsigned long *kptr = dst;
1648 
1649 	/* This helper may be inlined by verifier. */
1650 	return xchg(kptr, (unsigned long)ptr);
1651 }
1652 
1653 /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg()
1654  * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to
1655  * denote type that verifier will determine.
1656  */
1657 static const struct bpf_func_proto bpf_kptr_xchg_proto = {
1658 	.func         = bpf_kptr_xchg,
1659 	.gpl_only     = false,
1660 	.ret_type     = RET_PTR_TO_BTF_ID_OR_NULL,
1661 	.ret_btf_id   = BPF_PTR_POISON,
1662 	.arg1_type    = ARG_KPTR_XCHG_DEST,
1663 	.arg2_type    = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE,
1664 	.arg2_btf_id  = BPF_PTR_POISON,
1665 };
1666 
1667 /* Since the upper 8 bits of dynptr->size is reserved, the
1668  * maximum supported size is 2^24 - 1.
1669  */
1670 #define DYNPTR_MAX_SIZE	((1UL << 24) - 1)
1671 #define DYNPTR_TYPE_SHIFT	28
1672 #define DYNPTR_SIZE_MASK	0xFFFFFF
1673 #define DYNPTR_RDONLY_BIT	BIT(31)
1674 
__bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern * ptr)1675 bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr)
1676 {
1677 	return ptr->size & DYNPTR_RDONLY_BIT;
1678 }
1679 
bpf_dynptr_set_rdonly(struct bpf_dynptr_kern * ptr)1680 void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr)
1681 {
1682 	ptr->size |= DYNPTR_RDONLY_BIT;
1683 }
1684 
bpf_dynptr_set_type(struct bpf_dynptr_kern * ptr,enum bpf_dynptr_type type)1685 static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type)
1686 {
1687 	ptr->size |= type << DYNPTR_TYPE_SHIFT;
1688 }
1689 
bpf_dynptr_get_type(const struct bpf_dynptr_kern * ptr)1690 static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr)
1691 {
1692 	return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT;
1693 }
1694 
__bpf_dynptr_size(const struct bpf_dynptr_kern * ptr)1695 u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr)
1696 {
1697 	return ptr->size & DYNPTR_SIZE_MASK;
1698 }
1699 
bpf_dynptr_set_size(struct bpf_dynptr_kern * ptr,u32 new_size)1700 static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size)
1701 {
1702 	u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK;
1703 
1704 	ptr->size = new_size | metadata;
1705 }
1706 
bpf_dynptr_check_size(u32 size)1707 int bpf_dynptr_check_size(u32 size)
1708 {
1709 	return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
1710 }
1711 
bpf_dynptr_init(struct bpf_dynptr_kern * ptr,void * data,enum bpf_dynptr_type type,u32 offset,u32 size)1712 void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
1713 		     enum bpf_dynptr_type type, u32 offset, u32 size)
1714 {
1715 	ptr->data = data;
1716 	ptr->offset = offset;
1717 	ptr->size = size;
1718 	bpf_dynptr_set_type(ptr, type);
1719 }
1720 
bpf_dynptr_set_null(struct bpf_dynptr_kern * ptr)1721 void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
1722 {
1723 	memset(ptr, 0, sizeof(*ptr));
1724 }
1725 
bpf_dynptr_check_off_len(const struct bpf_dynptr_kern * ptr,u32 offset,u32 len)1726 static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len)
1727 {
1728 	u32 size = __bpf_dynptr_size(ptr);
1729 
1730 	if (len > size || offset > size - len)
1731 		return -E2BIG;
1732 
1733 	return 0;
1734 }
1735 
BPF_CALL_4(bpf_dynptr_from_mem,void *,data,u32,size,u64,flags,struct bpf_dynptr_kern *,ptr)1736 BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr)
1737 {
1738 	int err;
1739 
1740 	BTF_TYPE_EMIT(struct bpf_dynptr);
1741 
1742 	err = bpf_dynptr_check_size(size);
1743 	if (err)
1744 		goto error;
1745 
1746 	/* flags is currently unsupported */
1747 	if (flags) {
1748 		err = -EINVAL;
1749 		goto error;
1750 	}
1751 
1752 	bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size);
1753 
1754 	return 0;
1755 
1756 error:
1757 	bpf_dynptr_set_null(ptr);
1758 	return err;
1759 }
1760 
1761 static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
1762 	.func		= bpf_dynptr_from_mem,
1763 	.gpl_only	= false,
1764 	.ret_type	= RET_INTEGER,
1765 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1766 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1767 	.arg3_type	= ARG_ANYTHING,
1768 	.arg4_type	= ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE,
1769 };
1770 
BPF_CALL_5(bpf_dynptr_read,void *,dst,u32,len,const struct bpf_dynptr_kern *,src,u32,offset,u64,flags)1771 BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
1772 	   u32, offset, u64, flags)
1773 {
1774 	enum bpf_dynptr_type type;
1775 	int err;
1776 
1777 	if (!src->data || flags)
1778 		return -EINVAL;
1779 
1780 	err = bpf_dynptr_check_off_len(src, offset, len);
1781 	if (err)
1782 		return err;
1783 
1784 	type = bpf_dynptr_get_type(src);
1785 
1786 	switch (type) {
1787 	case BPF_DYNPTR_TYPE_LOCAL:
1788 	case BPF_DYNPTR_TYPE_RINGBUF:
1789 		/* Source and destination may possibly overlap, hence use memmove to
1790 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1791 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1792 		 */
1793 		memmove(dst, src->data + src->offset + offset, len);
1794 		return 0;
1795 	case BPF_DYNPTR_TYPE_SKB:
1796 		return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len);
1797 	case BPF_DYNPTR_TYPE_XDP:
1798 		return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len);
1799 	default:
1800 		WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type);
1801 		return -EFAULT;
1802 	}
1803 }
1804 
1805 static const struct bpf_func_proto bpf_dynptr_read_proto = {
1806 	.func		= bpf_dynptr_read,
1807 	.gpl_only	= false,
1808 	.ret_type	= RET_INTEGER,
1809 	.arg1_type	= ARG_PTR_TO_UNINIT_MEM,
1810 	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,
1811 	.arg3_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1812 	.arg4_type	= ARG_ANYTHING,
1813 	.arg5_type	= ARG_ANYTHING,
1814 };
1815 
BPF_CALL_5(bpf_dynptr_write,const struct bpf_dynptr_kern *,dst,u32,offset,void *,src,u32,len,u64,flags)1816 BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src,
1817 	   u32, len, u64, flags)
1818 {
1819 	enum bpf_dynptr_type type;
1820 	int err;
1821 
1822 	if (!dst->data || __bpf_dynptr_is_rdonly(dst))
1823 		return -EINVAL;
1824 
1825 	err = bpf_dynptr_check_off_len(dst, offset, len);
1826 	if (err)
1827 		return err;
1828 
1829 	type = bpf_dynptr_get_type(dst);
1830 
1831 	switch (type) {
1832 	case BPF_DYNPTR_TYPE_LOCAL:
1833 	case BPF_DYNPTR_TYPE_RINGBUF:
1834 		if (flags)
1835 			return -EINVAL;
1836 		/* Source and destination may possibly overlap, hence use memmove to
1837 		 * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr
1838 		 * pointing to overlapping PTR_TO_MAP_VALUE regions.
1839 		 */
1840 		memmove(dst->data + dst->offset + offset, src, len);
1841 		return 0;
1842 	case BPF_DYNPTR_TYPE_SKB:
1843 		return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len,
1844 					     flags);
1845 	case BPF_DYNPTR_TYPE_XDP:
1846 		if (flags)
1847 			return -EINVAL;
1848 		return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len);
1849 	default:
1850 		WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type);
1851 		return -EFAULT;
1852 	}
1853 }
1854 
1855 static const struct bpf_func_proto bpf_dynptr_write_proto = {
1856 	.func		= bpf_dynptr_write,
1857 	.gpl_only	= false,
1858 	.ret_type	= RET_INTEGER,
1859 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1860 	.arg2_type	= ARG_ANYTHING,
1861 	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,
1862 	.arg4_type	= ARG_CONST_SIZE_OR_ZERO,
1863 	.arg5_type	= ARG_ANYTHING,
1864 };
1865 
BPF_CALL_3(bpf_dynptr_data,const struct bpf_dynptr_kern *,ptr,u32,offset,u32,len)1866 BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len)
1867 {
1868 	enum bpf_dynptr_type type;
1869 	int err;
1870 
1871 	if (!ptr->data)
1872 		return 0;
1873 
1874 	err = bpf_dynptr_check_off_len(ptr, offset, len);
1875 	if (err)
1876 		return 0;
1877 
1878 	if (__bpf_dynptr_is_rdonly(ptr))
1879 		return 0;
1880 
1881 	type = bpf_dynptr_get_type(ptr);
1882 
1883 	switch (type) {
1884 	case BPF_DYNPTR_TYPE_LOCAL:
1885 	case BPF_DYNPTR_TYPE_RINGBUF:
1886 		return (unsigned long)(ptr->data + ptr->offset + offset);
1887 	case BPF_DYNPTR_TYPE_SKB:
1888 	case BPF_DYNPTR_TYPE_XDP:
1889 		/* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */
1890 		return 0;
1891 	default:
1892 		WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type);
1893 		return 0;
1894 	}
1895 }
1896 
1897 static const struct bpf_func_proto bpf_dynptr_data_proto = {
1898 	.func		= bpf_dynptr_data,
1899 	.gpl_only	= false,
1900 	.ret_type	= RET_PTR_TO_DYNPTR_MEM_OR_NULL,
1901 	.arg1_type	= ARG_PTR_TO_DYNPTR | MEM_RDONLY,
1902 	.arg2_type	= ARG_ANYTHING,
1903 	.arg3_type	= ARG_CONST_ALLOC_SIZE_OR_ZERO,
1904 };
1905 
1906 const struct bpf_func_proto bpf_get_current_task_proto __weak;
1907 const struct bpf_func_proto bpf_get_current_task_btf_proto __weak;
1908 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
1909 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
1910 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
1911 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
1912 const struct bpf_func_proto bpf_task_pt_regs_proto __weak;
1913 
1914 const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)1915 bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1916 {
1917 	switch (func_id) {
1918 	case BPF_FUNC_map_lookup_elem:
1919 		return &bpf_map_lookup_elem_proto;
1920 	case BPF_FUNC_map_update_elem:
1921 		return &bpf_map_update_elem_proto;
1922 	case BPF_FUNC_map_delete_elem:
1923 		return &bpf_map_delete_elem_proto;
1924 	case BPF_FUNC_map_push_elem:
1925 		return &bpf_map_push_elem_proto;
1926 	case BPF_FUNC_map_pop_elem:
1927 		return &bpf_map_pop_elem_proto;
1928 	case BPF_FUNC_map_peek_elem:
1929 		return &bpf_map_peek_elem_proto;
1930 	case BPF_FUNC_map_lookup_percpu_elem:
1931 		return &bpf_map_lookup_percpu_elem_proto;
1932 	case BPF_FUNC_get_prandom_u32:
1933 		return &bpf_get_prandom_u32_proto;
1934 	case BPF_FUNC_get_smp_processor_id:
1935 		return &bpf_get_raw_smp_processor_id_proto;
1936 	case BPF_FUNC_get_numa_node_id:
1937 		return &bpf_get_numa_node_id_proto;
1938 	case BPF_FUNC_tail_call:
1939 		return &bpf_tail_call_proto;
1940 	case BPF_FUNC_ktime_get_ns:
1941 		return &bpf_ktime_get_ns_proto;
1942 	case BPF_FUNC_ktime_get_boot_ns:
1943 		return &bpf_ktime_get_boot_ns_proto;
1944 	case BPF_FUNC_ktime_get_tai_ns:
1945 		return &bpf_ktime_get_tai_ns_proto;
1946 	case BPF_FUNC_ringbuf_output:
1947 		return &bpf_ringbuf_output_proto;
1948 	case BPF_FUNC_ringbuf_reserve:
1949 		return &bpf_ringbuf_reserve_proto;
1950 	case BPF_FUNC_ringbuf_submit:
1951 		return &bpf_ringbuf_submit_proto;
1952 	case BPF_FUNC_ringbuf_discard:
1953 		return &bpf_ringbuf_discard_proto;
1954 	case BPF_FUNC_ringbuf_query:
1955 		return &bpf_ringbuf_query_proto;
1956 	case BPF_FUNC_strncmp:
1957 		return &bpf_strncmp_proto;
1958 	case BPF_FUNC_strtol:
1959 		return &bpf_strtol_proto;
1960 	case BPF_FUNC_strtoul:
1961 		return &bpf_strtoul_proto;
1962 	case BPF_FUNC_get_current_pid_tgid:
1963 		return &bpf_get_current_pid_tgid_proto;
1964 	case BPF_FUNC_get_ns_current_pid_tgid:
1965 		return &bpf_get_ns_current_pid_tgid_proto;
1966 	default:
1967 		break;
1968 	}
1969 
1970 	if (!bpf_token_capable(prog->aux->token, CAP_BPF))
1971 		return NULL;
1972 
1973 	switch (func_id) {
1974 	case BPF_FUNC_spin_lock:
1975 		return &bpf_spin_lock_proto;
1976 	case BPF_FUNC_spin_unlock:
1977 		return &bpf_spin_unlock_proto;
1978 	case BPF_FUNC_jiffies64:
1979 		return &bpf_jiffies64_proto;
1980 	case BPF_FUNC_per_cpu_ptr:
1981 		return &bpf_per_cpu_ptr_proto;
1982 	case BPF_FUNC_this_cpu_ptr:
1983 		return &bpf_this_cpu_ptr_proto;
1984 	case BPF_FUNC_timer_init:
1985 		return &bpf_timer_init_proto;
1986 	case BPF_FUNC_timer_set_callback:
1987 		return &bpf_timer_set_callback_proto;
1988 	case BPF_FUNC_timer_start:
1989 		return &bpf_timer_start_proto;
1990 	case BPF_FUNC_timer_cancel:
1991 		return &bpf_timer_cancel_proto;
1992 	case BPF_FUNC_kptr_xchg:
1993 		return &bpf_kptr_xchg_proto;
1994 	case BPF_FUNC_for_each_map_elem:
1995 		return &bpf_for_each_map_elem_proto;
1996 	case BPF_FUNC_loop:
1997 		return &bpf_loop_proto;
1998 	case BPF_FUNC_user_ringbuf_drain:
1999 		return &bpf_user_ringbuf_drain_proto;
2000 	case BPF_FUNC_ringbuf_reserve_dynptr:
2001 		return &bpf_ringbuf_reserve_dynptr_proto;
2002 	case BPF_FUNC_ringbuf_submit_dynptr:
2003 		return &bpf_ringbuf_submit_dynptr_proto;
2004 	case BPF_FUNC_ringbuf_discard_dynptr:
2005 		return &bpf_ringbuf_discard_dynptr_proto;
2006 	case BPF_FUNC_dynptr_from_mem:
2007 		return &bpf_dynptr_from_mem_proto;
2008 	case BPF_FUNC_dynptr_read:
2009 		return &bpf_dynptr_read_proto;
2010 	case BPF_FUNC_dynptr_write:
2011 		return &bpf_dynptr_write_proto;
2012 	case BPF_FUNC_dynptr_data:
2013 		return &bpf_dynptr_data_proto;
2014 #ifdef CONFIG_CGROUPS
2015 	case BPF_FUNC_cgrp_storage_get:
2016 		return &bpf_cgrp_storage_get_proto;
2017 	case BPF_FUNC_cgrp_storage_delete:
2018 		return &bpf_cgrp_storage_delete_proto;
2019 	case BPF_FUNC_get_current_cgroup_id:
2020 		return &bpf_get_current_cgroup_id_proto;
2021 	case BPF_FUNC_get_current_ancestor_cgroup_id:
2022 		return &bpf_get_current_ancestor_cgroup_id_proto;
2023 #endif
2024 	default:
2025 		break;
2026 	}
2027 
2028 	if (!bpf_token_capable(prog->aux->token, CAP_PERFMON))
2029 		return NULL;
2030 
2031 	switch (func_id) {
2032 	case BPF_FUNC_trace_printk:
2033 		return bpf_get_trace_printk_proto();
2034 	case BPF_FUNC_get_current_task:
2035 		return &bpf_get_current_task_proto;
2036 	case BPF_FUNC_get_current_task_btf:
2037 		return &bpf_get_current_task_btf_proto;
2038 	case BPF_FUNC_probe_read_user:
2039 		return &bpf_probe_read_user_proto;
2040 	case BPF_FUNC_probe_read_kernel:
2041 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2042 		       NULL : &bpf_probe_read_kernel_proto;
2043 	case BPF_FUNC_probe_read_user_str:
2044 		return &bpf_probe_read_user_str_proto;
2045 	case BPF_FUNC_probe_read_kernel_str:
2046 		return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ?
2047 		       NULL : &bpf_probe_read_kernel_str_proto;
2048 	case BPF_FUNC_snprintf_btf:
2049 		return &bpf_snprintf_btf_proto;
2050 	case BPF_FUNC_snprintf:
2051 		return &bpf_snprintf_proto;
2052 	case BPF_FUNC_task_pt_regs:
2053 		return &bpf_task_pt_regs_proto;
2054 	case BPF_FUNC_trace_vprintk:
2055 		return bpf_get_trace_vprintk_proto();
2056 	default:
2057 		return NULL;
2058 	}
2059 }
2060 EXPORT_SYMBOL_GPL(bpf_base_func_proto);
2061 
bpf_list_head_free(const struct btf_field * field,void * list_head,struct bpf_spin_lock * spin_lock)2062 void bpf_list_head_free(const struct btf_field *field, void *list_head,
2063 			struct bpf_spin_lock *spin_lock)
2064 {
2065 	struct list_head *head = list_head, *orig_head = list_head;
2066 
2067 	BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head));
2068 	BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head));
2069 
2070 	/* Do the actual list draining outside the lock to not hold the lock for
2071 	 * too long, and also prevent deadlocks if tracing programs end up
2072 	 * executing on entry/exit of functions called inside the critical
2073 	 * section, and end up doing map ops that call bpf_list_head_free for
2074 	 * the same map value again.
2075 	 */
2076 	__bpf_spin_lock_irqsave(spin_lock);
2077 	if (!head->next || list_empty(head))
2078 		goto unlock;
2079 	head = head->next;
2080 unlock:
2081 	INIT_LIST_HEAD(orig_head);
2082 	__bpf_spin_unlock_irqrestore(spin_lock);
2083 
2084 	while (head != orig_head) {
2085 		void *obj = head;
2086 
2087 		obj -= field->graph_root.node_offset;
2088 		head = head->next;
2089 		/* The contained type can also have resources, including a
2090 		 * bpf_list_head which needs to be freed.
2091 		 */
2092 		migrate_disable();
2093 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2094 		migrate_enable();
2095 	}
2096 }
2097 
2098 /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are
2099  * 'rb_node *', so field name of rb_node within containing struct is not
2100  * needed.
2101  *
2102  * Since bpf_rb_tree's node type has a corresponding struct btf_field with
2103  * graph_root.node_offset, it's not necessary to know field name
2104  * or type of node struct
2105  */
2106 #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \
2107 	for (pos = rb_first_postorder(root); \
2108 	    pos && ({ n = rb_next_postorder(pos); 1; }); \
2109 	    pos = n)
2110 
bpf_rb_root_free(const struct btf_field * field,void * rb_root,struct bpf_spin_lock * spin_lock)2111 void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
2112 		      struct bpf_spin_lock *spin_lock)
2113 {
2114 	struct rb_root_cached orig_root, *root = rb_root;
2115 	struct rb_node *pos, *n;
2116 	void *obj;
2117 
2118 	BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root));
2119 	BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root));
2120 
2121 	__bpf_spin_lock_irqsave(spin_lock);
2122 	orig_root = *root;
2123 	*root = RB_ROOT_CACHED;
2124 	__bpf_spin_unlock_irqrestore(spin_lock);
2125 
2126 	bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) {
2127 		obj = pos;
2128 		obj -= field->graph_root.node_offset;
2129 
2130 
2131 		migrate_disable();
2132 		__bpf_obj_drop_impl(obj, field->graph_root.value_rec, false);
2133 		migrate_enable();
2134 	}
2135 }
2136 
2137 __bpf_kfunc_start_defs();
2138 
bpf_obj_new_impl(u64 local_type_id__k,void * meta__ign)2139 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2140 {
2141 	struct btf_struct_meta *meta = meta__ign;
2142 	u64 size = local_type_id__k;
2143 	void *p;
2144 
2145 	p = bpf_mem_alloc(&bpf_global_ma, size);
2146 	if (!p)
2147 		return NULL;
2148 	if (meta)
2149 		bpf_obj_init(meta->record, p);
2150 	return p;
2151 }
2152 
bpf_percpu_obj_new_impl(u64 local_type_id__k,void * meta__ign)2153 __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign)
2154 {
2155 	u64 size = local_type_id__k;
2156 
2157 	/* The verifier has ensured that meta__ign must be NULL */
2158 	return bpf_mem_alloc(&bpf_global_percpu_ma, size);
2159 }
2160 
2161 /* Must be called under migrate_disable(), as required by bpf_mem_free */
__bpf_obj_drop_impl(void * p,const struct btf_record * rec,bool percpu)2162 void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu)
2163 {
2164 	struct bpf_mem_alloc *ma;
2165 
2166 	if (rec && rec->refcount_off >= 0 &&
2167 	    !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) {
2168 		/* Object is refcounted and refcount_dec didn't result in 0
2169 		 * refcount. Return without freeing the object
2170 		 */
2171 		return;
2172 	}
2173 
2174 	if (rec)
2175 		bpf_obj_free_fields(rec, p);
2176 
2177 	if (percpu)
2178 		ma = &bpf_global_percpu_ma;
2179 	else
2180 		ma = &bpf_global_ma;
2181 	bpf_mem_free_rcu(ma, p);
2182 }
2183 
bpf_obj_drop_impl(void * p__alloc,void * meta__ign)2184 __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
2185 {
2186 	struct btf_struct_meta *meta = meta__ign;
2187 	void *p = p__alloc;
2188 
2189 	__bpf_obj_drop_impl(p, meta ? meta->record : NULL, false);
2190 }
2191 
bpf_percpu_obj_drop_impl(void * p__alloc,void * meta__ign)2192 __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign)
2193 {
2194 	/* The verifier has ensured that meta__ign must be NULL */
2195 	bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc);
2196 }
2197 
bpf_refcount_acquire_impl(void * p__refcounted_kptr,void * meta__ign)2198 __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign)
2199 {
2200 	struct btf_struct_meta *meta = meta__ign;
2201 	struct bpf_refcount *ref;
2202 
2203 	/* Could just cast directly to refcount_t *, but need some code using
2204 	 * bpf_refcount type so that it is emitted in vmlinux BTF
2205 	 */
2206 	ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off);
2207 	if (!refcount_inc_not_zero((refcount_t *)ref))
2208 		return NULL;
2209 
2210 	/* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null
2211 	 * in verifier.c
2212 	 */
2213 	return (void *)p__refcounted_kptr;
2214 }
2215 
__bpf_list_add(struct bpf_list_node_kern * node,struct bpf_list_head * head,bool tail,struct btf_record * rec,u64 off)2216 static int __bpf_list_add(struct bpf_list_node_kern *node,
2217 			  struct bpf_list_head *head,
2218 			  bool tail, struct btf_record *rec, u64 off)
2219 {
2220 	struct list_head *n = &node->list_head, *h = (void *)head;
2221 
2222 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2223 	 * called on its fields, so init here
2224 	 */
2225 	if (unlikely(!h->next))
2226 		INIT_LIST_HEAD(h);
2227 
2228 	/* node->owner != NULL implies !list_empty(n), no need to separately
2229 	 * check the latter
2230 	 */
2231 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2232 		/* Only called from BPF prog, no need to migrate_disable */
2233 		__bpf_obj_drop_impl((void *)n - off, rec, false);
2234 		return -EINVAL;
2235 	}
2236 
2237 	tail ? list_add_tail(n, h) : list_add(n, h);
2238 	WRITE_ONCE(node->owner, head);
2239 
2240 	return 0;
2241 }
2242 
bpf_list_push_front_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2243 __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head,
2244 					 struct bpf_list_node *node,
2245 					 void *meta__ign, u64 off)
2246 {
2247 	struct bpf_list_node_kern *n = (void *)node;
2248 	struct btf_struct_meta *meta = meta__ign;
2249 
2250 	return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off);
2251 }
2252 
bpf_list_push_back_impl(struct bpf_list_head * head,struct bpf_list_node * node,void * meta__ign,u64 off)2253 __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head,
2254 					struct bpf_list_node *node,
2255 					void *meta__ign, u64 off)
2256 {
2257 	struct bpf_list_node_kern *n = (void *)node;
2258 	struct btf_struct_meta *meta = meta__ign;
2259 
2260 	return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off);
2261 }
2262 
__bpf_list_del(struct bpf_list_head * head,bool tail)2263 static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail)
2264 {
2265 	struct list_head *n, *h = (void *)head;
2266 	struct bpf_list_node_kern *node;
2267 
2268 	/* If list_head was 0-initialized by map, bpf_obj_init_field wasn't
2269 	 * called on its fields, so init here
2270 	 */
2271 	if (unlikely(!h->next))
2272 		INIT_LIST_HEAD(h);
2273 	if (list_empty(h))
2274 		return NULL;
2275 
2276 	n = tail ? h->prev : h->next;
2277 	node = container_of(n, struct bpf_list_node_kern, list_head);
2278 	if (WARN_ON_ONCE(READ_ONCE(node->owner) != head))
2279 		return NULL;
2280 
2281 	list_del_init(n);
2282 	WRITE_ONCE(node->owner, NULL);
2283 	return (struct bpf_list_node *)n;
2284 }
2285 
bpf_list_pop_front(struct bpf_list_head * head)2286 __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head)
2287 {
2288 	return __bpf_list_del(head, false);
2289 }
2290 
bpf_list_pop_back(struct bpf_list_head * head)2291 __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
2292 {
2293 	return __bpf_list_del(head, true);
2294 }
2295 
bpf_rbtree_remove(struct bpf_rb_root * root,struct bpf_rb_node * node)2296 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root,
2297 						  struct bpf_rb_node *node)
2298 {
2299 	struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node;
2300 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2301 	struct rb_node *n = &node_internal->rb_node;
2302 
2303 	/* node_internal->owner != root implies either RB_EMPTY_NODE(n) or
2304 	 * n is owned by some other tree. No need to check RB_EMPTY_NODE(n)
2305 	 */
2306 	if (READ_ONCE(node_internal->owner) != root)
2307 		return NULL;
2308 
2309 	rb_erase_cached(n, r);
2310 	RB_CLEAR_NODE(n);
2311 	WRITE_ONCE(node_internal->owner, NULL);
2312 	return (struct bpf_rb_node *)n;
2313 }
2314 
2315 /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF
2316  * program
2317  */
__bpf_rbtree_add(struct bpf_rb_root * root,struct bpf_rb_node_kern * node,void * less,struct btf_record * rec,u64 off)2318 static int __bpf_rbtree_add(struct bpf_rb_root *root,
2319 			    struct bpf_rb_node_kern *node,
2320 			    void *less, struct btf_record *rec, u64 off)
2321 {
2322 	struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node;
2323 	struct rb_node *parent = NULL, *n = &node->rb_node;
2324 	bpf_callback_t cb = (bpf_callback_t)less;
2325 	bool leftmost = true;
2326 
2327 	/* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately
2328 	 * check the latter
2329 	 */
2330 	if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) {
2331 		/* Only called from BPF prog, no need to migrate_disable */
2332 		__bpf_obj_drop_impl((void *)n - off, rec, false);
2333 		return -EINVAL;
2334 	}
2335 
2336 	while (*link) {
2337 		parent = *link;
2338 		if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) {
2339 			link = &parent->rb_left;
2340 		} else {
2341 			link = &parent->rb_right;
2342 			leftmost = false;
2343 		}
2344 	}
2345 
2346 	rb_link_node(n, parent, link);
2347 	rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost);
2348 	WRITE_ONCE(node->owner, root);
2349 	return 0;
2350 }
2351 
bpf_rbtree_add_impl(struct bpf_rb_root * root,struct bpf_rb_node * node,bool (less)(struct bpf_rb_node * a,const struct bpf_rb_node * b),void * meta__ign,u64 off)2352 __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,
2353 				    bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b),
2354 				    void *meta__ign, u64 off)
2355 {
2356 	struct btf_struct_meta *meta = meta__ign;
2357 	struct bpf_rb_node_kern *n = (void *)node;
2358 
2359 	return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off);
2360 }
2361 
bpf_rbtree_first(struct bpf_rb_root * root)2362 __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
2363 {
2364 	struct rb_root_cached *r = (struct rb_root_cached *)root;
2365 
2366 	return (struct bpf_rb_node *)rb_first_cached(r);
2367 }
2368 
2369 /**
2370  * bpf_task_acquire - Acquire a reference to a task. A task acquired by this
2371  * kfunc which is not stored in a map as a kptr, must be released by calling
2372  * bpf_task_release().
2373  * @p: The task on which a reference is being acquired.
2374  */
bpf_task_acquire(struct task_struct * p)2375 __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
2376 {
2377 	if (refcount_inc_not_zero(&p->rcu_users))
2378 		return p;
2379 	return NULL;
2380 }
2381 
2382 /**
2383  * bpf_task_release - Release the reference acquired on a task.
2384  * @p: The task on which a reference is being released.
2385  */
bpf_task_release(struct task_struct * p)2386 __bpf_kfunc void bpf_task_release(struct task_struct *p)
2387 {
2388 	put_task_struct_rcu_user(p);
2389 }
2390 
bpf_task_release_dtor(void * p)2391 __bpf_kfunc void bpf_task_release_dtor(void *p)
2392 {
2393 	put_task_struct_rcu_user(p);
2394 }
2395 CFI_NOSEAL(bpf_task_release_dtor);
2396 
2397 #ifdef CONFIG_CGROUPS
2398 /**
2399  * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by
2400  * this kfunc which is not stored in a map as a kptr, must be released by
2401  * calling bpf_cgroup_release().
2402  * @cgrp: The cgroup on which a reference is being acquired.
2403  */
bpf_cgroup_acquire(struct cgroup * cgrp)2404 __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
2405 {
2406 	return cgroup_tryget(cgrp) ? cgrp : NULL;
2407 }
2408 
2409 /**
2410  * bpf_cgroup_release - Release the reference acquired on a cgroup.
2411  * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to
2412  * not be freed until the current grace period has ended, even if its refcount
2413  * drops to 0.
2414  * @cgrp: The cgroup on which a reference is being released.
2415  */
bpf_cgroup_release(struct cgroup * cgrp)2416 __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
2417 {
2418 	cgroup_put(cgrp);
2419 }
2420 
bpf_cgroup_release_dtor(void * cgrp)2421 __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp)
2422 {
2423 	cgroup_put(cgrp);
2424 }
2425 CFI_NOSEAL(bpf_cgroup_release_dtor);
2426 
2427 /**
2428  * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor
2429  * array. A cgroup returned by this kfunc which is not subsequently stored in a
2430  * map, must be released by calling bpf_cgroup_release().
2431  * @cgrp: The cgroup for which we're performing a lookup.
2432  * @level: The level of ancestor to look up.
2433  */
bpf_cgroup_ancestor(struct cgroup * cgrp,int level)2434 __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level)
2435 {
2436 	struct cgroup *ancestor;
2437 
2438 	if (level > cgrp->level || level < 0)
2439 		return NULL;
2440 
2441 	/* cgrp's refcnt could be 0 here, but ancestors can still be accessed */
2442 	ancestor = cgrp->ancestors[level];
2443 	if (!cgroup_tryget(ancestor))
2444 		return NULL;
2445 	return ancestor;
2446 }
2447 
2448 /**
2449  * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this
2450  * kfunc which is not subsequently stored in a map, must be released by calling
2451  * bpf_cgroup_release().
2452  * @cgid: cgroup id.
2453  */
bpf_cgroup_from_id(u64 cgid)2454 __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid)
2455 {
2456 	struct cgroup *cgrp;
2457 
2458 	cgrp = cgroup_get_from_id(cgid);
2459 	if (IS_ERR(cgrp))
2460 		return NULL;
2461 	return cgrp;
2462 }
2463 
2464 /**
2465  * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test
2466  * task's membership of cgroup ancestry.
2467  * @task: the task to be tested
2468  * @ancestor: possible ancestor of @task's cgroup
2469  *
2470  * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
2471  * It follows all the same rules as cgroup_is_descendant, and only applies
2472  * to the default hierarchy.
2473  */
bpf_task_under_cgroup(struct task_struct * task,struct cgroup * ancestor)2474 __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task,
2475 				       struct cgroup *ancestor)
2476 {
2477 	long ret;
2478 
2479 	rcu_read_lock();
2480 	ret = task_under_cgroup_hierarchy(task, ancestor);
2481 	rcu_read_unlock();
2482 	return ret;
2483 }
2484 
BPF_CALL_2(bpf_current_task_under_cgroup,struct bpf_map *,map,u32,idx)2485 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
2486 {
2487 	struct bpf_array *array = container_of(map, struct bpf_array, map);
2488 	struct cgroup *cgrp;
2489 
2490 	if (unlikely(idx >= array->map.max_entries))
2491 		return -E2BIG;
2492 
2493 	cgrp = READ_ONCE(array->ptrs[idx]);
2494 	if (unlikely(!cgrp))
2495 		return -EAGAIN;
2496 
2497 	return task_under_cgroup_hierarchy(current, cgrp);
2498 }
2499 
2500 const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
2501 	.func           = bpf_current_task_under_cgroup,
2502 	.gpl_only       = false,
2503 	.ret_type       = RET_INTEGER,
2504 	.arg1_type      = ARG_CONST_MAP_PTR,
2505 	.arg2_type      = ARG_ANYTHING,
2506 };
2507 
2508 /**
2509  * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a
2510  * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its
2511  * hierarchy ID.
2512  * @task: The target task
2513  * @hierarchy_id: The ID of a cgroup1 hierarchy
2514  *
2515  * On success, the cgroup is returen. On failure, NULL is returned.
2516  */
2517 __bpf_kfunc struct cgroup *
bpf_task_get_cgroup1(struct task_struct * task,int hierarchy_id)2518 bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id)
2519 {
2520 	struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id);
2521 
2522 	if (IS_ERR(cgrp))
2523 		return NULL;
2524 	return cgrp;
2525 }
2526 #endif /* CONFIG_CGROUPS */
2527 
2528 /**
2529  * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up
2530  * in the root pid namespace idr. If a task is returned, it must either be
2531  * stored in a map, or released with bpf_task_release().
2532  * @pid: The pid of the task being looked up.
2533  */
bpf_task_from_pid(s32 pid)2534 __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
2535 {
2536 	struct task_struct *p;
2537 
2538 	rcu_read_lock();
2539 	p = find_task_by_pid_ns(pid, &init_pid_ns);
2540 	if (p)
2541 		p = bpf_task_acquire(p);
2542 	rcu_read_unlock();
2543 
2544 	return p;
2545 }
2546 
2547 /**
2548  * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data.
2549  * @p: The dynptr whose data slice to retrieve
2550  * @offset: Offset into the dynptr
2551  * @buffer__opt: User-provided buffer to copy contents into.  May be NULL
2552  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2553  *               length of the requested slice. This must be a constant.
2554  *
2555  * For non-skb and non-xdp type dynptrs, there is no difference between
2556  * bpf_dynptr_slice and bpf_dynptr_data.
2557  *
2558  *  If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2559  *
2560  * If the intention is to write to the data slice, please use
2561  * bpf_dynptr_slice_rdwr.
2562  *
2563  * The user must check that the returned pointer is not null before using it.
2564  *
2565  * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice
2566  * does not change the underlying packet data pointers, so a call to
2567  * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in
2568  * the bpf program.
2569  *
2570  * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only
2571  * data slice (can be either direct pointer to the data or a pointer to the user
2572  * provided buffer, with its contents containing the data, if unable to obtain
2573  * direct pointer)
2574  */
bpf_dynptr_slice(const struct bpf_dynptr * p,u32 offset,void * buffer__opt,u32 buffer__szk)2575 __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset,
2576 				   void *buffer__opt, u32 buffer__szk)
2577 {
2578 	const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2579 	enum bpf_dynptr_type type;
2580 	u32 len = buffer__szk;
2581 	int err;
2582 
2583 	if (!ptr->data)
2584 		return NULL;
2585 
2586 	err = bpf_dynptr_check_off_len(ptr, offset, len);
2587 	if (err)
2588 		return NULL;
2589 
2590 	type = bpf_dynptr_get_type(ptr);
2591 
2592 	switch (type) {
2593 	case BPF_DYNPTR_TYPE_LOCAL:
2594 	case BPF_DYNPTR_TYPE_RINGBUF:
2595 		return ptr->data + ptr->offset + offset;
2596 	case BPF_DYNPTR_TYPE_SKB:
2597 		if (buffer__opt)
2598 			return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt);
2599 		else
2600 			return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len);
2601 	case BPF_DYNPTR_TYPE_XDP:
2602 	{
2603 		void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len);
2604 		if (!IS_ERR_OR_NULL(xdp_ptr))
2605 			return xdp_ptr;
2606 
2607 		if (!buffer__opt)
2608 			return NULL;
2609 		bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false);
2610 		return buffer__opt;
2611 	}
2612 	default:
2613 		WARN_ONCE(true, "unknown dynptr type %d\n", type);
2614 		return NULL;
2615 	}
2616 }
2617 
2618 /**
2619  * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data.
2620  * @p: The dynptr whose data slice to retrieve
2621  * @offset: Offset into the dynptr
2622  * @buffer__opt: User-provided buffer to copy contents into. May be NULL
2623  * @buffer__szk: Size (in bytes) of the buffer if present. This is the
2624  *               length of the requested slice. This must be a constant.
2625  *
2626  * For non-skb and non-xdp type dynptrs, there is no difference between
2627  * bpf_dynptr_slice and bpf_dynptr_data.
2628  *
2629  * If buffer__opt is NULL, the call will fail if buffer_opt was needed.
2630  *
2631  * The returned pointer is writable and may point to either directly the dynptr
2632  * data at the requested offset or to the buffer if unable to obtain a direct
2633  * data pointer to (example: the requested slice is to the paged area of an skb
2634  * packet). In the case where the returned pointer is to the buffer, the user
2635  * is responsible for persisting writes through calling bpf_dynptr_write(). This
2636  * usually looks something like this pattern:
2637  *
2638  * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer));
2639  * if (!eth)
2640  *	return TC_ACT_SHOT;
2641  *
2642  * // mutate eth header //
2643  *
2644  * if (eth == buffer)
2645  *	bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0);
2646  *
2647  * Please note that, as in the example above, the user must check that the
2648  * returned pointer is not null before using it.
2649  *
2650  * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr
2651  * does not change the underlying packet data pointers, so a call to
2652  * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in
2653  * the bpf program.
2654  *
2655  * Return: NULL if the call failed (eg invalid dynptr), pointer to a
2656  * data slice (can be either direct pointer to the data or a pointer to the user
2657  * provided buffer, with its contents containing the data, if unable to obtain
2658  * direct pointer)
2659  */
bpf_dynptr_slice_rdwr(const struct bpf_dynptr * p,u32 offset,void * buffer__opt,u32 buffer__szk)2660 __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset,
2661 					void *buffer__opt, u32 buffer__szk)
2662 {
2663 	const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2664 
2665 	if (!ptr->data || __bpf_dynptr_is_rdonly(ptr))
2666 		return NULL;
2667 
2668 	/* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice.
2669 	 *
2670 	 * For skb-type dynptrs, it is safe to write into the returned pointer
2671 	 * if the bpf program allows skb data writes. There are two possibilities
2672 	 * that may occur when calling bpf_dynptr_slice_rdwr:
2673 	 *
2674 	 * 1) The requested slice is in the head of the skb. In this case, the
2675 	 * returned pointer is directly to skb data, and if the skb is cloned, the
2676 	 * verifier will have uncloned it (see bpf_unclone_prologue()) already.
2677 	 * The pointer can be directly written into.
2678 	 *
2679 	 * 2) Some portion of the requested slice is in the paged buffer area.
2680 	 * In this case, the requested data will be copied out into the buffer
2681 	 * and the returned pointer will be a pointer to the buffer. The skb
2682 	 * will not be pulled. To persist the write, the user will need to call
2683 	 * bpf_dynptr_write(), which will pull the skb and commit the write.
2684 	 *
2685 	 * Similarly for xdp programs, if the requested slice is not across xdp
2686 	 * fragments, then a direct pointer will be returned, otherwise the data
2687 	 * will be copied out into the buffer and the user will need to call
2688 	 * bpf_dynptr_write() to commit changes.
2689 	 */
2690 	return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk);
2691 }
2692 
bpf_dynptr_adjust(const struct bpf_dynptr * p,u32 start,u32 end)2693 __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end)
2694 {
2695 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2696 	u32 size;
2697 
2698 	if (!ptr->data || start > end)
2699 		return -EINVAL;
2700 
2701 	size = __bpf_dynptr_size(ptr);
2702 
2703 	if (start > size || end > size)
2704 		return -ERANGE;
2705 
2706 	ptr->offset += start;
2707 	bpf_dynptr_set_size(ptr, end - start);
2708 
2709 	return 0;
2710 }
2711 
bpf_dynptr_is_null(const struct bpf_dynptr * p)2712 __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p)
2713 {
2714 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2715 
2716 	return !ptr->data;
2717 }
2718 
bpf_dynptr_is_rdonly(const struct bpf_dynptr * p)2719 __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p)
2720 {
2721 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2722 
2723 	if (!ptr->data)
2724 		return false;
2725 
2726 	return __bpf_dynptr_is_rdonly(ptr);
2727 }
2728 
bpf_dynptr_size(const struct bpf_dynptr * p)2729 __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p)
2730 {
2731 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2732 
2733 	if (!ptr->data)
2734 		return -EINVAL;
2735 
2736 	return __bpf_dynptr_size(ptr);
2737 }
2738 
bpf_dynptr_clone(const struct bpf_dynptr * p,struct bpf_dynptr * clone__uninit)2739 __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p,
2740 				 struct bpf_dynptr *clone__uninit)
2741 {
2742 	struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit;
2743 	struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p;
2744 
2745 	if (!ptr->data) {
2746 		bpf_dynptr_set_null(clone);
2747 		return -EINVAL;
2748 	}
2749 
2750 	*clone = *ptr;
2751 
2752 	return 0;
2753 }
2754 
bpf_cast_to_kern_ctx(void * obj)2755 __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj)
2756 {
2757 	return obj;
2758 }
2759 
bpf_rdonly_cast(const void * obj__ign,u32 btf_id__k)2760 __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k)
2761 {
2762 	return (void *)obj__ign;
2763 }
2764 
bpf_rcu_read_lock(void)2765 __bpf_kfunc void bpf_rcu_read_lock(void)
2766 {
2767 	rcu_read_lock();
2768 }
2769 
bpf_rcu_read_unlock(void)2770 __bpf_kfunc void bpf_rcu_read_unlock(void)
2771 {
2772 	rcu_read_unlock();
2773 }
2774 
2775 struct bpf_throw_ctx {
2776 	struct bpf_prog_aux *aux;
2777 	u64 sp;
2778 	u64 bp;
2779 	int cnt;
2780 };
2781 
bpf_stack_walker(void * cookie,u64 ip,u64 sp,u64 bp)2782 static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp)
2783 {
2784 	struct bpf_throw_ctx *ctx = cookie;
2785 	struct bpf_prog *prog;
2786 
2787 	/*
2788 	 * The RCU read lock is held to safely traverse the latch tree, but we
2789 	 * don't need its protection when accessing the prog, since it has an
2790 	 * active stack frame on the current stack trace, and won't disappear.
2791 	 */
2792 	rcu_read_lock();
2793 	prog = bpf_prog_ksym_find(ip);
2794 	rcu_read_unlock();
2795 	if (!prog)
2796 		return !ctx->cnt;
2797 	ctx->cnt++;
2798 	if (bpf_is_subprog(prog))
2799 		return true;
2800 	ctx->aux = prog->aux;
2801 	ctx->sp = sp;
2802 	ctx->bp = bp;
2803 	return false;
2804 }
2805 
bpf_throw(u64 cookie)2806 __bpf_kfunc void bpf_throw(u64 cookie)
2807 {
2808 	struct bpf_throw_ctx ctx = {};
2809 
2810 	arch_bpf_stack_walk(bpf_stack_walker, &ctx);
2811 	WARN_ON_ONCE(!ctx.aux);
2812 	if (ctx.aux)
2813 		WARN_ON_ONCE(!ctx.aux->exception_boundary);
2814 	WARN_ON_ONCE(!ctx.bp);
2815 	WARN_ON_ONCE(!ctx.cnt);
2816 	/* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning
2817 	 * deeper stack depths than ctx.sp as we do not return from bpf_throw,
2818 	 * which skips compiler generated instrumentation to do the same.
2819 	 */
2820 	kasan_unpoison_task_stack_below((void *)(long)ctx.sp);
2821 	ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0);
2822 	WARN(1, "A call to BPF exception callback should never return\n");
2823 }
2824 
bpf_wq_init(struct bpf_wq * wq,void * p__map,unsigned int flags)2825 __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags)
2826 {
2827 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2828 	struct bpf_map *map = p__map;
2829 
2830 	BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq));
2831 	BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq));
2832 
2833 	if (flags)
2834 		return -EINVAL;
2835 
2836 	return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ);
2837 }
2838 
bpf_wq_start(struct bpf_wq * wq,unsigned int flags)2839 __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
2840 {
2841 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2842 	struct bpf_work *w;
2843 
2844 	if (in_nmi())
2845 		return -EOPNOTSUPP;
2846 	if (flags)
2847 		return -EINVAL;
2848 	w = READ_ONCE(async->work);
2849 	if (!w || !READ_ONCE(w->cb.prog))
2850 		return -EINVAL;
2851 
2852 	schedule_work(&w->work);
2853 	return 0;
2854 }
2855 
bpf_wq_set_callback_impl(struct bpf_wq * wq,int (callback_fn)(void * map,int * key,void * value),unsigned int flags,void * aux__ign)2856 __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq,
2857 					 int (callback_fn)(void *map, int *key, void *value),
2858 					 unsigned int flags,
2859 					 void *aux__ign)
2860 {
2861 	struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__ign;
2862 	struct bpf_async_kern *async = (struct bpf_async_kern *)wq;
2863 
2864 	if (flags)
2865 		return -EINVAL;
2866 
2867 	return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ);
2868 }
2869 
bpf_preempt_disable(void)2870 __bpf_kfunc void bpf_preempt_disable(void)
2871 {
2872 	preempt_disable();
2873 }
2874 
bpf_preempt_enable(void)2875 __bpf_kfunc void bpf_preempt_enable(void)
2876 {
2877 	preempt_enable();
2878 }
2879 
2880 struct bpf_iter_bits {
2881 	__u64 __opaque[2];
2882 } __aligned(8);
2883 
2884 #define BITS_ITER_NR_WORDS_MAX 511
2885 
2886 struct bpf_iter_bits_kern {
2887 	union {
2888 		__u64 *bits;
2889 		__u64 bits_copy;
2890 	};
2891 	int nr_bits;
2892 	int bit;
2893 } __aligned(8);
2894 
2895 /* On 64-bit hosts, unsigned long and u64 have the same size, so passing
2896  * a u64 pointer and an unsigned long pointer to find_next_bit() will
2897  * return the same result, as both point to the same 8-byte area.
2898  *
2899  * For 32-bit little-endian hosts, using a u64 pointer or unsigned long
2900  * pointer also makes no difference. This is because the first iterated
2901  * unsigned long is composed of bits 0-31 of the u64 and the second unsigned
2902  * long is composed of bits 32-63 of the u64.
2903  *
2904  * However, for 32-bit big-endian hosts, this is not the case. The first
2905  * iterated unsigned long will be bits 32-63 of the u64, so swap these two
2906  * ulong values within the u64.
2907  */
swap_ulong_in_u64(u64 * bits,unsigned int nr)2908 static void swap_ulong_in_u64(u64 *bits, unsigned int nr)
2909 {
2910 #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN)
2911 	unsigned int i;
2912 
2913 	for (i = 0; i < nr; i++)
2914 		bits[i] = (bits[i] >> 32) | ((u64)(u32)bits[i] << 32);
2915 #endif
2916 }
2917 
2918 /**
2919  * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area
2920  * @it: The new bpf_iter_bits to be created
2921  * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over
2922  * @nr_words: The size of the specified memory area, measured in 8-byte units.
2923  * The maximum value of @nr_words is @BITS_ITER_NR_WORDS_MAX. This limit may be
2924  * further reduced by the BPF memory allocator implementation.
2925  *
2926  * This function initializes a new bpf_iter_bits structure for iterating over
2927  * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It
2928  * copies the data of the memory area to the newly created bpf_iter_bits @it for
2929  * subsequent iteration operations.
2930  *
2931  * On success, 0 is returned. On failure, ERR is returned.
2932  */
2933 __bpf_kfunc int
bpf_iter_bits_new(struct bpf_iter_bits * it,const u64 * unsafe_ptr__ign,u32 nr_words)2934 bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words)
2935 {
2936 	struct bpf_iter_bits_kern *kit = (void *)it;
2937 	u32 nr_bytes = nr_words * sizeof(u64);
2938 	u32 nr_bits = BYTES_TO_BITS(nr_bytes);
2939 	int err;
2940 
2941 	BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits));
2942 	BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) !=
2943 		     __alignof__(struct bpf_iter_bits));
2944 
2945 	kit->nr_bits = 0;
2946 	kit->bits_copy = 0;
2947 	kit->bit = -1;
2948 
2949 	if (!unsafe_ptr__ign || !nr_words)
2950 		return -EINVAL;
2951 	if (nr_words > BITS_ITER_NR_WORDS_MAX)
2952 		return -E2BIG;
2953 
2954 	/* Optimization for u64 mask */
2955 	if (nr_bits == 64) {
2956 		err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign);
2957 		if (err)
2958 			return -EFAULT;
2959 
2960 		swap_ulong_in_u64(&kit->bits_copy, nr_words);
2961 
2962 		kit->nr_bits = nr_bits;
2963 		return 0;
2964 	}
2965 
2966 	if (bpf_mem_alloc_check_size(false, nr_bytes))
2967 		return -E2BIG;
2968 
2969 	/* Fallback to memalloc */
2970 	kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes);
2971 	if (!kit->bits)
2972 		return -ENOMEM;
2973 
2974 	err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign);
2975 	if (err) {
2976 		bpf_mem_free(&bpf_global_ma, kit->bits);
2977 		return err;
2978 	}
2979 
2980 	swap_ulong_in_u64(kit->bits, nr_words);
2981 
2982 	kit->nr_bits = nr_bits;
2983 	return 0;
2984 }
2985 
2986 /**
2987  * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits
2988  * @it: The bpf_iter_bits to be checked
2989  *
2990  * This function returns a pointer to a number representing the value of the
2991  * next bit in the bits.
2992  *
2993  * If there are no further bits available, it returns NULL.
2994  */
bpf_iter_bits_next(struct bpf_iter_bits * it)2995 __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it)
2996 {
2997 	struct bpf_iter_bits_kern *kit = (void *)it;
2998 	int bit = kit->bit, nr_bits = kit->nr_bits;
2999 	const void *bits;
3000 
3001 	if (!nr_bits || bit >= nr_bits)
3002 		return NULL;
3003 
3004 	bits = nr_bits == 64 ? &kit->bits_copy : kit->bits;
3005 	bit = find_next_bit(bits, nr_bits, bit + 1);
3006 	if (bit >= nr_bits) {
3007 		kit->bit = bit;
3008 		return NULL;
3009 	}
3010 
3011 	kit->bit = bit;
3012 	return &kit->bit;
3013 }
3014 
3015 /**
3016  * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits
3017  * @it: The bpf_iter_bits to be destroyed
3018  *
3019  * Destroy the resource associated with the bpf_iter_bits.
3020  */
bpf_iter_bits_destroy(struct bpf_iter_bits * it)3021 __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it)
3022 {
3023 	struct bpf_iter_bits_kern *kit = (void *)it;
3024 
3025 	if (kit->nr_bits <= 64)
3026 		return;
3027 	bpf_mem_free(&bpf_global_ma, kit->bits);
3028 }
3029 
3030 /**
3031  * bpf_copy_from_user_str() - Copy a string from an unsafe user address
3032  * @dst:             Destination address, in kernel space.  This buffer must be
3033  *                   at least @dst__sz bytes long.
3034  * @dst__sz:         Maximum number of bytes to copy, includes the trailing NUL.
3035  * @unsafe_ptr__ign: Source address, in user space.
3036  * @flags:           The only supported flag is BPF_F_PAD_ZEROS
3037  *
3038  * Copies a NUL-terminated string from userspace to BPF space. If user string is
3039  * too long this will still ensure zero termination in the dst buffer unless
3040  * buffer size is 0.
3041  *
3042  * If BPF_F_PAD_ZEROS flag is set, memset the tail of @dst to 0 on success and
3043  * memset all of @dst on failure.
3044  */
bpf_copy_from_user_str(void * dst,u32 dst__sz,const void __user * unsafe_ptr__ign,u64 flags)3045 __bpf_kfunc int bpf_copy_from_user_str(void *dst, u32 dst__sz, const void __user *unsafe_ptr__ign, u64 flags)
3046 {
3047 	int ret;
3048 
3049 	if (unlikely(flags & ~BPF_F_PAD_ZEROS))
3050 		return -EINVAL;
3051 
3052 	if (unlikely(!dst__sz))
3053 		return 0;
3054 
3055 	ret = strncpy_from_user(dst, unsafe_ptr__ign, dst__sz - 1);
3056 	if (ret < 0) {
3057 		if (flags & BPF_F_PAD_ZEROS)
3058 			memset((char *)dst, 0, dst__sz);
3059 
3060 		return ret;
3061 	}
3062 
3063 	if (flags & BPF_F_PAD_ZEROS)
3064 		memset((char *)dst + ret, 0, dst__sz - ret);
3065 	else
3066 		((char *)dst)[ret] = '\0';
3067 
3068 	return ret + 1;
3069 }
3070 
3071 __bpf_kfunc_end_defs();
3072 
3073 BTF_KFUNCS_START(generic_btf_ids)
3074 #ifdef CONFIG_CRASH_DUMP
3075 BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE)
3076 #endif
3077 BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
3078 BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL)
3079 BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE)
3080 BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE)
3081 BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU)
3082 BTF_ID_FLAGS(func, bpf_list_push_front_impl)
3083 BTF_ID_FLAGS(func, bpf_list_push_back_impl)
3084 BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
3085 BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
3086 BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3087 BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
3088 BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL)
3089 BTF_ID_FLAGS(func, bpf_rbtree_add_impl)
3090 BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
3091 
3092 #ifdef CONFIG_CGROUPS
3093 BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3094 BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
3095 BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3096 BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
3097 BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU)
3098 BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
3099 #endif
3100 BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL)
3101 BTF_ID_FLAGS(func, bpf_throw)
3102 BTF_KFUNCS_END(generic_btf_ids)
3103 
3104 static const struct btf_kfunc_id_set generic_kfunc_set = {
3105 	.owner = THIS_MODULE,
3106 	.set   = &generic_btf_ids,
3107 };
3108 
3109 
3110 BTF_ID_LIST(generic_dtor_ids)
3111 BTF_ID(struct, task_struct)
3112 BTF_ID(func, bpf_task_release_dtor)
3113 #ifdef CONFIG_CGROUPS
3114 BTF_ID(struct, cgroup)
3115 BTF_ID(func, bpf_cgroup_release_dtor)
3116 #endif
3117 
3118 BTF_KFUNCS_START(common_btf_ids)
3119 BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx)
3120 BTF_ID_FLAGS(func, bpf_rdonly_cast)
3121 BTF_ID_FLAGS(func, bpf_rcu_read_lock)
3122 BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
3123 BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
3124 BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
3125 BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
3126 BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
3127 BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
3128 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
3129 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
3130 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
3131 #ifdef CONFIG_CGROUPS
3132 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
3133 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
3134 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
3135 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
3136 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
3137 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
3138 #endif
3139 BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
3140 BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
3141 BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
3142 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
3143 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
3144 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
3145 BTF_ID_FLAGS(func, bpf_dynptr_size)
3146 BTF_ID_FLAGS(func, bpf_dynptr_clone)
3147 BTF_ID_FLAGS(func, bpf_modify_return_test_tp)
3148 BTF_ID_FLAGS(func, bpf_wq_init)
3149 BTF_ID_FLAGS(func, bpf_wq_set_callback_impl)
3150 BTF_ID_FLAGS(func, bpf_wq_start)
3151 BTF_ID_FLAGS(func, bpf_preempt_disable)
3152 BTF_ID_FLAGS(func, bpf_preempt_enable)
3153 BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
3154 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
3155 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
3156 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
3157 #ifdef CONFIG_DMA_SHARED_BUFFER
3158 BTF_ID_FLAGS(func, bpf_iter_dmabuf_new, KF_ITER_NEW | KF_SLEEPABLE)
3159 BTF_ID_FLAGS(func, bpf_iter_dmabuf_next, KF_ITER_NEXT | KF_RET_NULL | KF_SLEEPABLE)
3160 BTF_ID_FLAGS(func, bpf_iter_dmabuf_destroy, KF_ITER_DESTROY | KF_SLEEPABLE)
3161 #endif
3162 BTF_KFUNCS_END(common_btf_ids)
3163 
3164 static const struct btf_kfunc_id_set common_kfunc_set = {
3165 	.owner = THIS_MODULE,
3166 	.set   = &common_btf_ids,
3167 };
3168 
kfunc_init(void)3169 static int __init kfunc_init(void)
3170 {
3171 	int ret;
3172 	const struct btf_id_dtor_kfunc generic_dtors[] = {
3173 		{
3174 			.btf_id       = generic_dtor_ids[0],
3175 			.kfunc_btf_id = generic_dtor_ids[1]
3176 		},
3177 #ifdef CONFIG_CGROUPS
3178 		{
3179 			.btf_id       = generic_dtor_ids[2],
3180 			.kfunc_btf_id = generic_dtor_ids[3]
3181 		},
3182 #endif
3183 	};
3184 
3185 	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
3186 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
3187 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set);
3188 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
3189 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set);
3190 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_CGROUP_SKB, &generic_kfunc_set);
3191 	ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
3192 						  ARRAY_SIZE(generic_dtors),
3193 						  THIS_MODULE);
3194 	return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
3195 }
3196 
3197 late_initcall(kfunc_init);
3198 
3199 /* Get a pointer to dynptr data up to len bytes for read only access. If
3200  * the dynptr doesn't have continuous data up to len bytes, return NULL.
3201  */
__bpf_dynptr_data(const struct bpf_dynptr_kern * ptr,u32 len)3202 const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len)
3203 {
3204 	const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr;
3205 
3206 	return bpf_dynptr_slice(p, 0, NULL, len);
3207 }
3208 
3209 /* Get a pointer to dynptr data up to len bytes for read write access. If
3210  * the dynptr doesn't have continuous data up to len bytes, or the dynptr
3211  * is read only, return NULL.
3212  */
__bpf_dynptr_data_rw(const struct bpf_dynptr_kern * ptr,u32 len)3213 void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len)
3214 {
3215 	if (__bpf_dynptr_is_rdonly(ptr))
3216 		return NULL;
3217 	return (void *)__bpf_dynptr_data(ptr, len);
3218 }
3219