1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4 #include <linux/bpf.h>
5 #include <linux/rcupdate.h>
6 #include <linux/rcupdate_trace.h>
7 #include <linux/random.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <linux/ktime.h>
11 #include <linux/sched.h>
12 #include <linux/uidgid.h>
13 #include <linux/filter.h>
14 #include <linux/ctype.h>
15 #include <linux/jiffies.h>
16 #include <linux/pid_namespace.h>
17 #include <linux/proc_ns.h>
18 #include <linux/security.h>
19
20 #include "../../lib/kstrtox.h"
21
22 /* If kernel subsystem is allowing eBPF programs to call this function,
23 * inside its own verifier_ops->get_func_proto() callback it should return
24 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
25 *
26 * Different map implementations will rely on rcu in map methods
27 * lookup/update/delete, therefore eBPF programs must run under rcu lock
28 * if program is allowed to access maps, so check rcu_read_lock_held() or
29 * rcu_read_lock_trace_held() in all three functions.
30 */
BPF_CALL_2(bpf_map_lookup_elem,struct bpf_map *,map,void *,key)31 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
32 {
33 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
34 return (unsigned long) map->ops->map_lookup_elem(map, key);
35 }
36
37 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
38 .func = bpf_map_lookup_elem,
39 .gpl_only = false,
40 .pkt_access = true,
41 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
42 .arg1_type = ARG_CONST_MAP_PTR,
43 .arg2_type = ARG_PTR_TO_MAP_KEY,
44 };
45
BPF_CALL_4(bpf_map_update_elem,struct bpf_map *,map,void *,key,void *,value,u64,flags)46 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
47 void *, value, u64, flags)
48 {
49 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
50 return map->ops->map_update_elem(map, key, value, flags);
51 }
52
53 const struct bpf_func_proto bpf_map_update_elem_proto = {
54 .func = bpf_map_update_elem,
55 .gpl_only = false,
56 .pkt_access = true,
57 .ret_type = RET_INTEGER,
58 .arg1_type = ARG_CONST_MAP_PTR,
59 .arg2_type = ARG_PTR_TO_MAP_KEY,
60 .arg3_type = ARG_PTR_TO_MAP_VALUE,
61 .arg4_type = ARG_ANYTHING,
62 };
63
BPF_CALL_2(bpf_map_delete_elem,struct bpf_map *,map,void *,key)64 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
65 {
66 WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
67 return map->ops->map_delete_elem(map, key);
68 }
69
70 const struct bpf_func_proto bpf_map_delete_elem_proto = {
71 .func = bpf_map_delete_elem,
72 .gpl_only = false,
73 .pkt_access = true,
74 .ret_type = RET_INTEGER,
75 .arg1_type = ARG_CONST_MAP_PTR,
76 .arg2_type = ARG_PTR_TO_MAP_KEY,
77 };
78
BPF_CALL_3(bpf_map_push_elem,struct bpf_map *,map,void *,value,u64,flags)79 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
80 {
81 return map->ops->map_push_elem(map, value, flags);
82 }
83
84 const struct bpf_func_proto bpf_map_push_elem_proto = {
85 .func = bpf_map_push_elem,
86 .gpl_only = false,
87 .pkt_access = true,
88 .ret_type = RET_INTEGER,
89 .arg1_type = ARG_CONST_MAP_PTR,
90 .arg2_type = ARG_PTR_TO_MAP_VALUE,
91 .arg3_type = ARG_ANYTHING,
92 };
93
BPF_CALL_2(bpf_map_pop_elem,struct bpf_map *,map,void *,value)94 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
95 {
96 return map->ops->map_pop_elem(map, value);
97 }
98
99 const struct bpf_func_proto bpf_map_pop_elem_proto = {
100 .func = bpf_map_pop_elem,
101 .gpl_only = false,
102 .ret_type = RET_INTEGER,
103 .arg1_type = ARG_CONST_MAP_PTR,
104 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
105 };
106
BPF_CALL_2(bpf_map_peek_elem,struct bpf_map *,map,void *,value)107 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
108 {
109 return map->ops->map_peek_elem(map, value);
110 }
111
112 const struct bpf_func_proto bpf_map_peek_elem_proto = {
113 .func = bpf_map_peek_elem,
114 .gpl_only = false,
115 .ret_type = RET_INTEGER,
116 .arg1_type = ARG_CONST_MAP_PTR,
117 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
118 };
119
120 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
121 .func = bpf_user_rnd_u32,
122 .gpl_only = false,
123 .ret_type = RET_INTEGER,
124 };
125
BPF_CALL_0(bpf_get_smp_processor_id)126 BPF_CALL_0(bpf_get_smp_processor_id)
127 {
128 return smp_processor_id();
129 }
130
131 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
132 .func = bpf_get_smp_processor_id,
133 .gpl_only = false,
134 .ret_type = RET_INTEGER,
135 };
136
BPF_CALL_0(bpf_get_numa_node_id)137 BPF_CALL_0(bpf_get_numa_node_id)
138 {
139 return numa_node_id();
140 }
141
142 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
143 .func = bpf_get_numa_node_id,
144 .gpl_only = false,
145 .ret_type = RET_INTEGER,
146 };
147
BPF_CALL_0(bpf_ktime_get_ns)148 BPF_CALL_0(bpf_ktime_get_ns)
149 {
150 /* NMI safe access to clock monotonic */
151 return ktime_get_mono_fast_ns();
152 }
153
154 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
155 .func = bpf_ktime_get_ns,
156 .gpl_only = false,
157 .ret_type = RET_INTEGER,
158 };
159
BPF_CALL_0(bpf_ktime_get_boot_ns)160 BPF_CALL_0(bpf_ktime_get_boot_ns)
161 {
162 /* NMI safe access to clock boottime */
163 return ktime_get_boot_fast_ns();
164 }
165
166 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
167 .func = bpf_ktime_get_boot_ns,
168 .gpl_only = false,
169 .ret_type = RET_INTEGER,
170 };
171
BPF_CALL_0(bpf_get_current_pid_tgid)172 BPF_CALL_0(bpf_get_current_pid_tgid)
173 {
174 struct task_struct *task = current;
175
176 if (unlikely(!task))
177 return -EINVAL;
178
179 return (u64) task->tgid << 32 | task->pid;
180 }
181
182 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
183 .func = bpf_get_current_pid_tgid,
184 .gpl_only = false,
185 .ret_type = RET_INTEGER,
186 };
187
BPF_CALL_0(bpf_get_current_uid_gid)188 BPF_CALL_0(bpf_get_current_uid_gid)
189 {
190 struct task_struct *task = current;
191 kuid_t uid;
192 kgid_t gid;
193
194 if (unlikely(!task))
195 return -EINVAL;
196
197 current_uid_gid(&uid, &gid);
198 return (u64) from_kgid(&init_user_ns, gid) << 32 |
199 from_kuid(&init_user_ns, uid);
200 }
201
202 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
203 .func = bpf_get_current_uid_gid,
204 .gpl_only = false,
205 .ret_type = RET_INTEGER,
206 };
207
BPF_CALL_2(bpf_get_current_comm,char *,buf,u32,size)208 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
209 {
210 struct task_struct *task = current;
211
212 if (unlikely(!task))
213 goto err_clear;
214
215 strncpy(buf, task->comm, size);
216
217 /* Verifier guarantees that size > 0. For task->comm exceeding
218 * size, guarantee that buf is %NUL-terminated. Unconditionally
219 * done here to save the size test.
220 */
221 buf[size - 1] = 0;
222 return 0;
223 err_clear:
224 memset(buf, 0, size);
225 return -EINVAL;
226 }
227
228 const struct bpf_func_proto bpf_get_current_comm_proto = {
229 .func = bpf_get_current_comm,
230 .gpl_only = false,
231 .ret_type = RET_INTEGER,
232 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
233 .arg2_type = ARG_CONST_SIZE,
234 };
235
236 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
237
__bpf_spin_lock(struct bpf_spin_lock * lock)238 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
239 {
240 arch_spinlock_t *l = (void *)lock;
241 union {
242 __u32 val;
243 arch_spinlock_t lock;
244 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
245
246 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
247 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
248 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
249 arch_spin_lock(l);
250 }
251
__bpf_spin_unlock(struct bpf_spin_lock * lock)252 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
253 {
254 arch_spinlock_t *l = (void *)lock;
255
256 arch_spin_unlock(l);
257 }
258
259 #else
260
__bpf_spin_lock(struct bpf_spin_lock * lock)261 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
262 {
263 atomic_t *l = (void *)lock;
264
265 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
266 do {
267 atomic_cond_read_relaxed(l, !VAL);
268 } while (atomic_xchg(l, 1));
269 }
270
__bpf_spin_unlock(struct bpf_spin_lock * lock)271 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
272 {
273 atomic_t *l = (void *)lock;
274
275 atomic_set_release(l, 0);
276 }
277
278 #endif
279
280 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
281
BPF_CALL_1(bpf_spin_lock,struct bpf_spin_lock *,lock)282 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
283 {
284 unsigned long flags;
285
286 local_irq_save(flags);
287 __bpf_spin_lock(lock);
288 __this_cpu_write(irqsave_flags, flags);
289 return 0;
290 }
291
292 const struct bpf_func_proto bpf_spin_lock_proto = {
293 .func = bpf_spin_lock,
294 .gpl_only = false,
295 .ret_type = RET_VOID,
296 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
297 };
298
BPF_CALL_1(bpf_spin_unlock,struct bpf_spin_lock *,lock)299 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
300 {
301 unsigned long flags;
302
303 flags = __this_cpu_read(irqsave_flags);
304 __bpf_spin_unlock(lock);
305 local_irq_restore(flags);
306 return 0;
307 }
308
309 const struct bpf_func_proto bpf_spin_unlock_proto = {
310 .func = bpf_spin_unlock,
311 .gpl_only = false,
312 .ret_type = RET_VOID,
313 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
314 };
315
copy_map_value_locked(struct bpf_map * map,void * dst,void * src,bool lock_src)316 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
317 bool lock_src)
318 {
319 struct bpf_spin_lock *lock;
320
321 if (lock_src)
322 lock = src + map->spin_lock_off;
323 else
324 lock = dst + map->spin_lock_off;
325 preempt_disable();
326 ____bpf_spin_lock(lock);
327 copy_map_value(map, dst, src);
328 ____bpf_spin_unlock(lock);
329 preempt_enable();
330 }
331
BPF_CALL_0(bpf_jiffies64)332 BPF_CALL_0(bpf_jiffies64)
333 {
334 return get_jiffies_64();
335 }
336
337 const struct bpf_func_proto bpf_jiffies64_proto = {
338 .func = bpf_jiffies64,
339 .gpl_only = false,
340 .ret_type = RET_INTEGER,
341 };
342
343 #ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)344 BPF_CALL_0(bpf_get_current_cgroup_id)
345 {
346 struct cgroup *cgrp = task_dfl_cgroup(current);
347
348 return cgroup_id(cgrp);
349 }
350
351 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
352 .func = bpf_get_current_cgroup_id,
353 .gpl_only = false,
354 .ret_type = RET_INTEGER,
355 };
356
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id,int,ancestor_level)357 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
358 {
359 struct cgroup *cgrp = task_dfl_cgroup(current);
360 struct cgroup *ancestor;
361
362 ancestor = cgroup_ancestor(cgrp, ancestor_level);
363 if (!ancestor)
364 return 0;
365 return cgroup_id(ancestor);
366 }
367
368 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
369 .func = bpf_get_current_ancestor_cgroup_id,
370 .gpl_only = false,
371 .ret_type = RET_INTEGER,
372 .arg1_type = ARG_ANYTHING,
373 };
374
375 #ifdef CONFIG_CGROUP_BPF
376
BPF_CALL_2(bpf_get_local_storage,struct bpf_map *,map,u64,flags)377 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
378 {
379 /* flags argument is not used now,
380 * but provides an ability to extend the API.
381 * verifier checks that its value is correct.
382 */
383 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
384 struct bpf_cgroup_storage *storage;
385 struct bpf_cg_run_ctx *ctx;
386 void *ptr;
387
388 /* get current cgroup storage from BPF run context */
389 ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
390 storage = ctx->prog_item->cgroup_storage[stype];
391
392 if (stype == BPF_CGROUP_STORAGE_SHARED)
393 ptr = &READ_ONCE(storage->buf)->data[0];
394 else
395 ptr = this_cpu_ptr(storage->percpu_buf);
396
397 return (unsigned long)ptr;
398 }
399
400 const struct bpf_func_proto bpf_get_local_storage_proto = {
401 .func = bpf_get_local_storage,
402 .gpl_only = false,
403 .ret_type = RET_PTR_TO_MAP_VALUE,
404 .arg1_type = ARG_CONST_MAP_PTR,
405 .arg2_type = ARG_ANYTHING,
406 };
407 #endif
408
409 #define BPF_STRTOX_BASE_MASK 0x1F
410
__bpf_strtoull(const char * buf,size_t buf_len,u64 flags,unsigned long long * res,bool * is_negative)411 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
412 unsigned long long *res, bool *is_negative)
413 {
414 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
415 const char *cur_buf = buf;
416 size_t cur_len = buf_len;
417 unsigned int consumed;
418 size_t val_len;
419 char str[64];
420
421 if (!buf || !buf_len || !res || !is_negative)
422 return -EINVAL;
423
424 if (base != 0 && base != 8 && base != 10 && base != 16)
425 return -EINVAL;
426
427 if (flags & ~BPF_STRTOX_BASE_MASK)
428 return -EINVAL;
429
430 while (cur_buf < buf + buf_len && isspace(*cur_buf))
431 ++cur_buf;
432
433 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
434 if (*is_negative)
435 ++cur_buf;
436
437 consumed = cur_buf - buf;
438 cur_len -= consumed;
439 if (!cur_len)
440 return -EINVAL;
441
442 cur_len = min(cur_len, sizeof(str) - 1);
443 memcpy(str, cur_buf, cur_len);
444 str[cur_len] = '\0';
445 cur_buf = str;
446
447 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
448 val_len = _parse_integer(cur_buf, base, res);
449
450 if (val_len & KSTRTOX_OVERFLOW)
451 return -ERANGE;
452
453 if (val_len == 0)
454 return -EINVAL;
455
456 cur_buf += val_len;
457 consumed += cur_buf - str;
458
459 return consumed;
460 }
461
__bpf_strtoll(const char * buf,size_t buf_len,u64 flags,long long * res)462 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
463 long long *res)
464 {
465 unsigned long long _res;
466 bool is_negative;
467 int err;
468
469 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
470 if (err < 0)
471 return err;
472 if (is_negative) {
473 if ((long long)-_res > 0)
474 return -ERANGE;
475 *res = -_res;
476 } else {
477 if ((long long)_res < 0)
478 return -ERANGE;
479 *res = _res;
480 }
481 return err;
482 }
483
BPF_CALL_4(bpf_strtol,const char *,buf,size_t,buf_len,u64,flags,long *,res)484 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
485 long *, res)
486 {
487 long long _res;
488 int err;
489
490 *res = 0;
491 err = __bpf_strtoll(buf, buf_len, flags, &_res);
492 if (err < 0)
493 return err;
494 if (_res != (long)_res)
495 return -ERANGE;
496 *res = _res;
497 return err;
498 }
499
500 const struct bpf_func_proto bpf_strtol_proto = {
501 .func = bpf_strtol,
502 .gpl_only = false,
503 .ret_type = RET_INTEGER,
504 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
505 .arg2_type = ARG_CONST_SIZE,
506 .arg3_type = ARG_ANYTHING,
507 .arg4_type = ARG_PTR_TO_LONG,
508 };
509
BPF_CALL_4(bpf_strtoul,const char *,buf,size_t,buf_len,u64,flags,unsigned long *,res)510 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
511 unsigned long *, res)
512 {
513 unsigned long long _res;
514 bool is_negative;
515 int err;
516
517 *res = 0;
518 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
519 if (err < 0)
520 return err;
521 if (is_negative)
522 return -EINVAL;
523 if (_res != (unsigned long)_res)
524 return -ERANGE;
525 *res = _res;
526 return err;
527 }
528
529 const struct bpf_func_proto bpf_strtoul_proto = {
530 .func = bpf_strtoul,
531 .gpl_only = false,
532 .ret_type = RET_INTEGER,
533 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
534 .arg2_type = ARG_CONST_SIZE,
535 .arg3_type = ARG_ANYTHING,
536 .arg4_type = ARG_PTR_TO_LONG,
537 };
538 #endif
539
BPF_CALL_4(bpf_get_ns_current_pid_tgid,u64,dev,u64,ino,struct bpf_pidns_info *,nsdata,u32,size)540 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
541 struct bpf_pidns_info *, nsdata, u32, size)
542 {
543 struct task_struct *task = current;
544 struct pid_namespace *pidns;
545 int err = -EINVAL;
546
547 if (unlikely(size != sizeof(struct bpf_pidns_info)))
548 goto clear;
549
550 if (unlikely((u64)(dev_t)dev != dev))
551 goto clear;
552
553 if (unlikely(!task))
554 goto clear;
555
556 pidns = task_active_pid_ns(task);
557 if (unlikely(!pidns)) {
558 err = -ENOENT;
559 goto clear;
560 }
561
562 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
563 goto clear;
564
565 nsdata->pid = task_pid_nr_ns(task, pidns);
566 nsdata->tgid = task_tgid_nr_ns(task, pidns);
567 return 0;
568 clear:
569 memset((void *)nsdata, 0, (size_t) size);
570 return err;
571 }
572
573 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
574 .func = bpf_get_ns_current_pid_tgid,
575 .gpl_only = false,
576 .ret_type = RET_INTEGER,
577 .arg1_type = ARG_ANYTHING,
578 .arg2_type = ARG_ANYTHING,
579 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
580 .arg4_type = ARG_CONST_SIZE,
581 };
582
583 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
584 .func = bpf_get_raw_cpu_id,
585 .gpl_only = false,
586 .ret_type = RET_INTEGER,
587 };
588
BPF_CALL_5(bpf_event_output_data,void *,ctx,struct bpf_map *,map,u64,flags,void *,data,u64,size)589 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
590 u64, flags, void *, data, u64, size)
591 {
592 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
593 return -EINVAL;
594
595 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
596 }
597
598 const struct bpf_func_proto bpf_event_output_data_proto = {
599 .func = bpf_event_output_data,
600 .gpl_only = true,
601 .ret_type = RET_INTEGER,
602 .arg1_type = ARG_PTR_TO_CTX,
603 .arg2_type = ARG_CONST_MAP_PTR,
604 .arg3_type = ARG_ANYTHING,
605 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
606 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
607 };
608
BPF_CALL_3(bpf_copy_from_user,void *,dst,u32,size,const void __user *,user_ptr)609 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
610 const void __user *, user_ptr)
611 {
612 int ret = copy_from_user(dst, user_ptr, size);
613
614 if (unlikely(ret)) {
615 memset(dst, 0, size);
616 ret = -EFAULT;
617 }
618
619 return ret;
620 }
621
622 const struct bpf_func_proto bpf_copy_from_user_proto = {
623 .func = bpf_copy_from_user,
624 .gpl_only = false,
625 .ret_type = RET_INTEGER,
626 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
627 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
628 .arg3_type = ARG_ANYTHING,
629 };
630
BPF_CALL_2(bpf_per_cpu_ptr,const void *,ptr,u32,cpu)631 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
632 {
633 if (cpu >= nr_cpu_ids)
634 return (unsigned long)NULL;
635
636 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
637 }
638
639 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
640 .func = bpf_per_cpu_ptr,
641 .gpl_only = false,
642 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
643 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
644 .arg2_type = ARG_ANYTHING,
645 };
646
BPF_CALL_1(bpf_this_cpu_ptr,const void *,percpu_ptr)647 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
648 {
649 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
650 }
651
652 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
653 .func = bpf_this_cpu_ptr,
654 .gpl_only = false,
655 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
656 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
657 };
658
659 const struct bpf_func_proto bpf_get_current_task_proto __weak;
660 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
661 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
662 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
663 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
664
665 const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id)666 bpf_base_func_proto(enum bpf_func_id func_id)
667 {
668 switch (func_id) {
669 case BPF_FUNC_map_lookup_elem:
670 return &bpf_map_lookup_elem_proto;
671 case BPF_FUNC_map_update_elem:
672 return &bpf_map_update_elem_proto;
673 case BPF_FUNC_map_delete_elem:
674 return &bpf_map_delete_elem_proto;
675 case BPF_FUNC_map_push_elem:
676 return &bpf_map_push_elem_proto;
677 case BPF_FUNC_map_pop_elem:
678 return &bpf_map_pop_elem_proto;
679 case BPF_FUNC_map_peek_elem:
680 return &bpf_map_peek_elem_proto;
681 case BPF_FUNC_get_prandom_u32:
682 return &bpf_get_prandom_u32_proto;
683 case BPF_FUNC_get_smp_processor_id:
684 return &bpf_get_raw_smp_processor_id_proto;
685 case BPF_FUNC_get_numa_node_id:
686 return &bpf_get_numa_node_id_proto;
687 case BPF_FUNC_tail_call:
688 return &bpf_tail_call_proto;
689 case BPF_FUNC_ktime_get_ns:
690 return &bpf_ktime_get_ns_proto;
691 case BPF_FUNC_ktime_get_boot_ns:
692 return &bpf_ktime_get_boot_ns_proto;
693 case BPF_FUNC_ringbuf_output:
694 return &bpf_ringbuf_output_proto;
695 case BPF_FUNC_ringbuf_reserve:
696 return &bpf_ringbuf_reserve_proto;
697 case BPF_FUNC_ringbuf_submit:
698 return &bpf_ringbuf_submit_proto;
699 case BPF_FUNC_ringbuf_discard:
700 return &bpf_ringbuf_discard_proto;
701 case BPF_FUNC_ringbuf_query:
702 return &bpf_ringbuf_query_proto;
703 default:
704 break;
705 }
706
707 if (!bpf_capable())
708 return NULL;
709
710 switch (func_id) {
711 case BPF_FUNC_spin_lock:
712 return &bpf_spin_lock_proto;
713 case BPF_FUNC_spin_unlock:
714 return &bpf_spin_unlock_proto;
715 case BPF_FUNC_jiffies64:
716 return &bpf_jiffies64_proto;
717 case BPF_FUNC_per_cpu_ptr:
718 return &bpf_per_cpu_ptr_proto;
719 case BPF_FUNC_this_cpu_ptr:
720 return &bpf_this_cpu_ptr_proto;
721 default:
722 break;
723 }
724
725 if (!perfmon_capable())
726 return NULL;
727
728 switch (func_id) {
729 case BPF_FUNC_trace_printk:
730 return bpf_get_trace_printk_proto();
731 case BPF_FUNC_get_current_task:
732 return &bpf_get_current_task_proto;
733 case BPF_FUNC_probe_read_user:
734 return &bpf_probe_read_user_proto;
735 case BPF_FUNC_probe_read_kernel:
736 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
737 NULL : &bpf_probe_read_kernel_proto;
738 case BPF_FUNC_probe_read_user_str:
739 return &bpf_probe_read_user_str_proto;
740 case BPF_FUNC_probe_read_kernel_str:
741 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
742 NULL : &bpf_probe_read_kernel_str_proto;
743 case BPF_FUNC_snprintf_btf:
744 return &bpf_snprintf_btf_proto;
745 default:
746 return NULL;
747 }
748 }
749