1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 */
4 #include <linux/bpf.h>
5 #include <linux/rcupdate.h>
6 #include <linux/random.h>
7 #include <linux/smp.h>
8 #include <linux/topology.h>
9 #include <linux/ktime.h>
10 #include <linux/sched.h>
11 #include <linux/uidgid.h>
12 #include <linux/filter.h>
13 #include <linux/ctype.h>
14 #include <linux/jiffies.h>
15 #include <linux/pid_namespace.h>
16 #include <linux/proc_ns.h>
17 #include <linux/security.h>
18
19 #include "../../lib/kstrtox.h"
20
21 /* If kernel subsystem is allowing eBPF programs to call this function,
22 * inside its own verifier_ops->get_func_proto() callback it should return
23 * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments
24 *
25 * Different map implementations will rely on rcu in map methods
26 * lookup/update/delete, therefore eBPF programs must run under rcu lock
27 * if program is allowed to access maps, so check rcu_read_lock_held in
28 * all three functions.
29 */
BPF_CALL_2(bpf_map_lookup_elem,struct bpf_map *,map,void *,key)30 BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key)
31 {
32 WARN_ON_ONCE(!rcu_read_lock_held());
33 return (unsigned long) map->ops->map_lookup_elem(map, key);
34 }
35
36 const struct bpf_func_proto bpf_map_lookup_elem_proto = {
37 .func = bpf_map_lookup_elem,
38 .gpl_only = false,
39 .pkt_access = true,
40 .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
41 .arg1_type = ARG_CONST_MAP_PTR,
42 .arg2_type = ARG_PTR_TO_MAP_KEY,
43 };
44
BPF_CALL_4(bpf_map_update_elem,struct bpf_map *,map,void *,key,void *,value,u64,flags)45 BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key,
46 void *, value, u64, flags)
47 {
48 WARN_ON_ONCE(!rcu_read_lock_held());
49 return map->ops->map_update_elem(map, key, value, flags);
50 }
51
52 const struct bpf_func_proto bpf_map_update_elem_proto = {
53 .func = bpf_map_update_elem,
54 .gpl_only = false,
55 .pkt_access = true,
56 .ret_type = RET_INTEGER,
57 .arg1_type = ARG_CONST_MAP_PTR,
58 .arg2_type = ARG_PTR_TO_MAP_KEY,
59 .arg3_type = ARG_PTR_TO_MAP_VALUE,
60 .arg4_type = ARG_ANYTHING,
61 };
62
BPF_CALL_2(bpf_map_delete_elem,struct bpf_map *,map,void *,key)63 BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key)
64 {
65 WARN_ON_ONCE(!rcu_read_lock_held());
66 return map->ops->map_delete_elem(map, key);
67 }
68
69 const struct bpf_func_proto bpf_map_delete_elem_proto = {
70 .func = bpf_map_delete_elem,
71 .gpl_only = false,
72 .pkt_access = true,
73 .ret_type = RET_INTEGER,
74 .arg1_type = ARG_CONST_MAP_PTR,
75 .arg2_type = ARG_PTR_TO_MAP_KEY,
76 };
77
BPF_CALL_3(bpf_map_push_elem,struct bpf_map *,map,void *,value,u64,flags)78 BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags)
79 {
80 return map->ops->map_push_elem(map, value, flags);
81 }
82
83 const struct bpf_func_proto bpf_map_push_elem_proto = {
84 .func = bpf_map_push_elem,
85 .gpl_only = false,
86 .pkt_access = true,
87 .ret_type = RET_INTEGER,
88 .arg1_type = ARG_CONST_MAP_PTR,
89 .arg2_type = ARG_PTR_TO_MAP_VALUE,
90 .arg3_type = ARG_ANYTHING,
91 };
92
BPF_CALL_2(bpf_map_pop_elem,struct bpf_map *,map,void *,value)93 BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value)
94 {
95 return map->ops->map_pop_elem(map, value);
96 }
97
98 const struct bpf_func_proto bpf_map_pop_elem_proto = {
99 .func = bpf_map_pop_elem,
100 .gpl_only = false,
101 .ret_type = RET_INTEGER,
102 .arg1_type = ARG_CONST_MAP_PTR,
103 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
104 };
105
BPF_CALL_2(bpf_map_peek_elem,struct bpf_map *,map,void *,value)106 BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
107 {
108 return map->ops->map_peek_elem(map, value);
109 }
110
111 const struct bpf_func_proto bpf_map_peek_elem_proto = {
112 .func = bpf_map_peek_elem,
113 .gpl_only = false,
114 .ret_type = RET_INTEGER,
115 .arg1_type = ARG_CONST_MAP_PTR,
116 .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE,
117 };
118
119 const struct bpf_func_proto bpf_get_prandom_u32_proto = {
120 .func = bpf_user_rnd_u32,
121 .gpl_only = false,
122 .ret_type = RET_INTEGER,
123 };
124
BPF_CALL_0(bpf_get_smp_processor_id)125 BPF_CALL_0(bpf_get_smp_processor_id)
126 {
127 return smp_processor_id();
128 }
129
130 const struct bpf_func_proto bpf_get_smp_processor_id_proto = {
131 .func = bpf_get_smp_processor_id,
132 .gpl_only = false,
133 .ret_type = RET_INTEGER,
134 };
135
BPF_CALL_0(bpf_get_numa_node_id)136 BPF_CALL_0(bpf_get_numa_node_id)
137 {
138 return numa_node_id();
139 }
140
141 const struct bpf_func_proto bpf_get_numa_node_id_proto = {
142 .func = bpf_get_numa_node_id,
143 .gpl_only = false,
144 .ret_type = RET_INTEGER,
145 };
146
BPF_CALL_0(bpf_ktime_get_ns)147 BPF_CALL_0(bpf_ktime_get_ns)
148 {
149 /* NMI safe access to clock monotonic */
150 return ktime_get_mono_fast_ns();
151 }
152
153 const struct bpf_func_proto bpf_ktime_get_ns_proto = {
154 .func = bpf_ktime_get_ns,
155 .gpl_only = false,
156 .ret_type = RET_INTEGER,
157 };
158
BPF_CALL_0(bpf_ktime_get_boot_ns)159 BPF_CALL_0(bpf_ktime_get_boot_ns)
160 {
161 /* NMI safe access to clock boottime */
162 return ktime_get_boot_fast_ns();
163 }
164
165 const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
166 .func = bpf_ktime_get_boot_ns,
167 .gpl_only = false,
168 .ret_type = RET_INTEGER,
169 };
170
BPF_CALL_0(bpf_get_current_pid_tgid)171 BPF_CALL_0(bpf_get_current_pid_tgid)
172 {
173 struct task_struct *task = current;
174
175 if (unlikely(!task))
176 return -EINVAL;
177
178 return (u64) task->tgid << 32 | task->pid;
179 }
180
181 const struct bpf_func_proto bpf_get_current_pid_tgid_proto = {
182 .func = bpf_get_current_pid_tgid,
183 .gpl_only = false,
184 .ret_type = RET_INTEGER,
185 };
186
BPF_CALL_0(bpf_get_current_uid_gid)187 BPF_CALL_0(bpf_get_current_uid_gid)
188 {
189 struct task_struct *task = current;
190 kuid_t uid;
191 kgid_t gid;
192
193 if (unlikely(!task))
194 return -EINVAL;
195
196 current_uid_gid(&uid, &gid);
197 return (u64) from_kgid(&init_user_ns, gid) << 32 |
198 from_kuid(&init_user_ns, uid);
199 }
200
201 const struct bpf_func_proto bpf_get_current_uid_gid_proto = {
202 .func = bpf_get_current_uid_gid,
203 .gpl_only = false,
204 .ret_type = RET_INTEGER,
205 };
206
BPF_CALL_2(bpf_get_current_comm,char *,buf,u32,size)207 BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
208 {
209 struct task_struct *task = current;
210
211 if (unlikely(!task))
212 goto err_clear;
213
214 strncpy(buf, task->comm, size);
215
216 /* Verifier guarantees that size > 0. For task->comm exceeding
217 * size, guarantee that buf is %NUL-terminated. Unconditionally
218 * done here to save the size test.
219 */
220 buf[size - 1] = 0;
221 return 0;
222 err_clear:
223 memset(buf, 0, size);
224 return -EINVAL;
225 }
226
227 const struct bpf_func_proto bpf_get_current_comm_proto = {
228 .func = bpf_get_current_comm,
229 .gpl_only = false,
230 .ret_type = RET_INTEGER,
231 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
232 .arg2_type = ARG_CONST_SIZE,
233 };
234
235 #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK)
236
__bpf_spin_lock(struct bpf_spin_lock * lock)237 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
238 {
239 arch_spinlock_t *l = (void *)lock;
240 union {
241 __u32 val;
242 arch_spinlock_t lock;
243 } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED };
244
245 compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0");
246 BUILD_BUG_ON(sizeof(*l) != sizeof(__u32));
247 BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32));
248 arch_spin_lock(l);
249 }
250
__bpf_spin_unlock(struct bpf_spin_lock * lock)251 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
252 {
253 arch_spinlock_t *l = (void *)lock;
254
255 arch_spin_unlock(l);
256 }
257
258 #else
259
__bpf_spin_lock(struct bpf_spin_lock * lock)260 static inline void __bpf_spin_lock(struct bpf_spin_lock *lock)
261 {
262 atomic_t *l = (void *)lock;
263
264 BUILD_BUG_ON(sizeof(*l) != sizeof(*lock));
265 do {
266 atomic_cond_read_relaxed(l, !VAL);
267 } while (atomic_xchg(l, 1));
268 }
269
__bpf_spin_unlock(struct bpf_spin_lock * lock)270 static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
271 {
272 atomic_t *l = (void *)lock;
273
274 atomic_set_release(l, 0);
275 }
276
277 #endif
278
279 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
280
BPF_CALL_1(bpf_spin_lock,struct bpf_spin_lock *,lock)281 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
282 {
283 unsigned long flags;
284
285 local_irq_save(flags);
286 __bpf_spin_lock(lock);
287 __this_cpu_write(irqsave_flags, flags);
288 return 0;
289 }
290
291 const struct bpf_func_proto bpf_spin_lock_proto = {
292 .func = bpf_spin_lock,
293 .gpl_only = false,
294 .ret_type = RET_VOID,
295 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
296 };
297
BPF_CALL_1(bpf_spin_unlock,struct bpf_spin_lock *,lock)298 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
299 {
300 unsigned long flags;
301
302 flags = __this_cpu_read(irqsave_flags);
303 __bpf_spin_unlock(lock);
304 local_irq_restore(flags);
305 return 0;
306 }
307
308 const struct bpf_func_proto bpf_spin_unlock_proto = {
309 .func = bpf_spin_unlock,
310 .gpl_only = false,
311 .ret_type = RET_VOID,
312 .arg1_type = ARG_PTR_TO_SPIN_LOCK,
313 };
314
copy_map_value_locked(struct bpf_map * map,void * dst,void * src,bool lock_src)315 void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
316 bool lock_src)
317 {
318 struct bpf_spin_lock *lock;
319
320 if (lock_src)
321 lock = src + map->spin_lock_off;
322 else
323 lock = dst + map->spin_lock_off;
324 preempt_disable();
325 ____bpf_spin_lock(lock);
326 copy_map_value(map, dst, src);
327 ____bpf_spin_unlock(lock);
328 preempt_enable();
329 }
330
BPF_CALL_0(bpf_jiffies64)331 BPF_CALL_0(bpf_jiffies64)
332 {
333 return get_jiffies_64();
334 }
335
336 const struct bpf_func_proto bpf_jiffies64_proto = {
337 .func = bpf_jiffies64,
338 .gpl_only = false,
339 .ret_type = RET_INTEGER,
340 };
341
342 #ifdef CONFIG_CGROUPS
BPF_CALL_0(bpf_get_current_cgroup_id)343 BPF_CALL_0(bpf_get_current_cgroup_id)
344 {
345 struct cgroup *cgrp = task_dfl_cgroup(current);
346
347 return cgroup_id(cgrp);
348 }
349
350 const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
351 .func = bpf_get_current_cgroup_id,
352 .gpl_only = false,
353 .ret_type = RET_INTEGER,
354 };
355
BPF_CALL_1(bpf_get_current_ancestor_cgroup_id,int,ancestor_level)356 BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level)
357 {
358 struct cgroup *cgrp = task_dfl_cgroup(current);
359 struct cgroup *ancestor;
360
361 ancestor = cgroup_ancestor(cgrp, ancestor_level);
362 if (!ancestor)
363 return 0;
364 return cgroup_id(ancestor);
365 }
366
367 const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = {
368 .func = bpf_get_current_ancestor_cgroup_id,
369 .gpl_only = false,
370 .ret_type = RET_INTEGER,
371 .arg1_type = ARG_ANYTHING,
372 };
373
374 #ifdef CONFIG_CGROUP_BPF
375
BPF_CALL_2(bpf_get_local_storage,struct bpf_map *,map,u64,flags)376 BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
377 {
378 /* flags argument is not used now,
379 * but provides an ability to extend the API.
380 * verifier checks that its value is correct.
381 */
382 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
383 struct bpf_cgroup_storage *storage;
384 struct bpf_cg_run_ctx *ctx;
385 void *ptr;
386
387 /* get current cgroup storage from BPF run context */
388 ctx = container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
389 storage = ctx->prog_item->cgroup_storage[stype];
390
391 if (stype == BPF_CGROUP_STORAGE_SHARED)
392 ptr = &READ_ONCE(storage->buf)->data[0];
393 else
394 ptr = this_cpu_ptr(storage->percpu_buf);
395
396 return (unsigned long)ptr;
397 }
398
399 const struct bpf_func_proto bpf_get_local_storage_proto = {
400 .func = bpf_get_local_storage,
401 .gpl_only = false,
402 .ret_type = RET_PTR_TO_MAP_VALUE,
403 .arg1_type = ARG_CONST_MAP_PTR,
404 .arg2_type = ARG_ANYTHING,
405 };
406 #endif
407
408 #define BPF_STRTOX_BASE_MASK 0x1F
409
__bpf_strtoull(const char * buf,size_t buf_len,u64 flags,unsigned long long * res,bool * is_negative)410 static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags,
411 unsigned long long *res, bool *is_negative)
412 {
413 unsigned int base = flags & BPF_STRTOX_BASE_MASK;
414 const char *cur_buf = buf;
415 size_t cur_len = buf_len;
416 unsigned int consumed;
417 size_t val_len;
418 char str[64];
419
420 if (!buf || !buf_len || !res || !is_negative)
421 return -EINVAL;
422
423 if (base != 0 && base != 8 && base != 10 && base != 16)
424 return -EINVAL;
425
426 if (flags & ~BPF_STRTOX_BASE_MASK)
427 return -EINVAL;
428
429 while (cur_buf < buf + buf_len && isspace(*cur_buf))
430 ++cur_buf;
431
432 *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-');
433 if (*is_negative)
434 ++cur_buf;
435
436 consumed = cur_buf - buf;
437 cur_len -= consumed;
438 if (!cur_len)
439 return -EINVAL;
440
441 cur_len = min(cur_len, sizeof(str) - 1);
442 memcpy(str, cur_buf, cur_len);
443 str[cur_len] = '\0';
444 cur_buf = str;
445
446 cur_buf = _parse_integer_fixup_radix(cur_buf, &base);
447 val_len = _parse_integer(cur_buf, base, res);
448
449 if (val_len & KSTRTOX_OVERFLOW)
450 return -ERANGE;
451
452 if (val_len == 0)
453 return -EINVAL;
454
455 cur_buf += val_len;
456 consumed += cur_buf - str;
457
458 return consumed;
459 }
460
__bpf_strtoll(const char * buf,size_t buf_len,u64 flags,long long * res)461 static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags,
462 long long *res)
463 {
464 unsigned long long _res;
465 bool is_negative;
466 int err;
467
468 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
469 if (err < 0)
470 return err;
471 if (is_negative) {
472 if ((long long)-_res > 0)
473 return -ERANGE;
474 *res = -_res;
475 } else {
476 if ((long long)_res < 0)
477 return -ERANGE;
478 *res = _res;
479 }
480 return err;
481 }
482
BPF_CALL_4(bpf_strtol,const char *,buf,size_t,buf_len,u64,flags,long *,res)483 BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags,
484 long *, res)
485 {
486 long long _res;
487 int err;
488
489 err = __bpf_strtoll(buf, buf_len, flags, &_res);
490 if (err < 0)
491 return err;
492 if (_res != (long)_res)
493 return -ERANGE;
494 *res = _res;
495 return err;
496 }
497
498 const struct bpf_func_proto bpf_strtol_proto = {
499 .func = bpf_strtol,
500 .gpl_only = false,
501 .ret_type = RET_INTEGER,
502 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
503 .arg2_type = ARG_CONST_SIZE,
504 .arg3_type = ARG_ANYTHING,
505 .arg4_type = ARG_PTR_TO_LONG,
506 };
507
BPF_CALL_4(bpf_strtoul,const char *,buf,size_t,buf_len,u64,flags,unsigned long *,res)508 BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags,
509 unsigned long *, res)
510 {
511 unsigned long long _res;
512 bool is_negative;
513 int err;
514
515 err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative);
516 if (err < 0)
517 return err;
518 if (is_negative)
519 return -EINVAL;
520 if (_res != (unsigned long)_res)
521 return -ERANGE;
522 *res = _res;
523 return err;
524 }
525
526 const struct bpf_func_proto bpf_strtoul_proto = {
527 .func = bpf_strtoul,
528 .gpl_only = false,
529 .ret_type = RET_INTEGER,
530 .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
531 .arg2_type = ARG_CONST_SIZE,
532 .arg3_type = ARG_ANYTHING,
533 .arg4_type = ARG_PTR_TO_LONG,
534 };
535 #endif
536
BPF_CALL_4(bpf_get_ns_current_pid_tgid,u64,dev,u64,ino,struct bpf_pidns_info *,nsdata,u32,size)537 BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino,
538 struct bpf_pidns_info *, nsdata, u32, size)
539 {
540 struct task_struct *task = current;
541 struct pid_namespace *pidns;
542 int err = -EINVAL;
543
544 if (unlikely(size != sizeof(struct bpf_pidns_info)))
545 goto clear;
546
547 if (unlikely((u64)(dev_t)dev != dev))
548 goto clear;
549
550 if (unlikely(!task))
551 goto clear;
552
553 pidns = task_active_pid_ns(task);
554 if (unlikely(!pidns)) {
555 err = -ENOENT;
556 goto clear;
557 }
558
559 if (!ns_match(&pidns->ns, (dev_t)dev, ino))
560 goto clear;
561
562 nsdata->pid = task_pid_nr_ns(task, pidns);
563 nsdata->tgid = task_tgid_nr_ns(task, pidns);
564 return 0;
565 clear:
566 memset((void *)nsdata, 0, (size_t) size);
567 return err;
568 }
569
570 const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
571 .func = bpf_get_ns_current_pid_tgid,
572 .gpl_only = false,
573 .ret_type = RET_INTEGER,
574 .arg1_type = ARG_ANYTHING,
575 .arg2_type = ARG_ANYTHING,
576 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
577 .arg4_type = ARG_CONST_SIZE,
578 };
579
580 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
581 .func = bpf_get_raw_cpu_id,
582 .gpl_only = false,
583 .ret_type = RET_INTEGER,
584 };
585
BPF_CALL_5(bpf_event_output_data,void *,ctx,struct bpf_map *,map,u64,flags,void *,data,u64,size)586 BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
587 u64, flags, void *, data, u64, size)
588 {
589 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
590 return -EINVAL;
591
592 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
593 }
594
595 const struct bpf_func_proto bpf_event_output_data_proto = {
596 .func = bpf_event_output_data,
597 .gpl_only = true,
598 .ret_type = RET_INTEGER,
599 .arg1_type = ARG_PTR_TO_CTX,
600 .arg2_type = ARG_CONST_MAP_PTR,
601 .arg3_type = ARG_ANYTHING,
602 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
603 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
604 };
605
BPF_CALL_3(bpf_copy_from_user,void *,dst,u32,size,const void __user *,user_ptr)606 BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
607 const void __user *, user_ptr)
608 {
609 int ret = copy_from_user(dst, user_ptr, size);
610
611 if (unlikely(ret)) {
612 memset(dst, 0, size);
613 ret = -EFAULT;
614 }
615
616 return ret;
617 }
618
619 const struct bpf_func_proto bpf_copy_from_user_proto = {
620 .func = bpf_copy_from_user,
621 .gpl_only = false,
622 .ret_type = RET_INTEGER,
623 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
624 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
625 .arg3_type = ARG_ANYTHING,
626 };
627
BPF_CALL_2(bpf_per_cpu_ptr,const void *,ptr,u32,cpu)628 BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
629 {
630 if (cpu >= nr_cpu_ids)
631 return (unsigned long)NULL;
632
633 return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
634 }
635
636 const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
637 .func = bpf_per_cpu_ptr,
638 .gpl_only = false,
639 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY,
640 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
641 .arg2_type = ARG_ANYTHING,
642 };
643
BPF_CALL_1(bpf_this_cpu_ptr,const void *,percpu_ptr)644 BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
645 {
646 return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
647 }
648
649 const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
650 .func = bpf_this_cpu_ptr,
651 .gpl_only = false,
652 .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY,
653 .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
654 };
655
656 const struct bpf_func_proto bpf_get_current_task_proto __weak;
657 const struct bpf_func_proto bpf_probe_read_user_proto __weak;
658 const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
659 const struct bpf_func_proto bpf_probe_read_kernel_proto __weak;
660 const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak;
661
662 const struct bpf_func_proto *
bpf_base_func_proto(enum bpf_func_id func_id)663 bpf_base_func_proto(enum bpf_func_id func_id)
664 {
665 switch (func_id) {
666 case BPF_FUNC_map_lookup_elem:
667 return &bpf_map_lookup_elem_proto;
668 case BPF_FUNC_map_update_elem:
669 return &bpf_map_update_elem_proto;
670 case BPF_FUNC_map_delete_elem:
671 return &bpf_map_delete_elem_proto;
672 case BPF_FUNC_map_push_elem:
673 return &bpf_map_push_elem_proto;
674 case BPF_FUNC_map_pop_elem:
675 return &bpf_map_pop_elem_proto;
676 case BPF_FUNC_map_peek_elem:
677 return &bpf_map_peek_elem_proto;
678 case BPF_FUNC_get_prandom_u32:
679 return &bpf_get_prandom_u32_proto;
680 case BPF_FUNC_get_smp_processor_id:
681 return &bpf_get_raw_smp_processor_id_proto;
682 case BPF_FUNC_get_numa_node_id:
683 return &bpf_get_numa_node_id_proto;
684 case BPF_FUNC_tail_call:
685 return &bpf_tail_call_proto;
686 case BPF_FUNC_ktime_get_ns:
687 return &bpf_ktime_get_ns_proto;
688 case BPF_FUNC_ktime_get_boot_ns:
689 return &bpf_ktime_get_boot_ns_proto;
690 case BPF_FUNC_ringbuf_output:
691 return &bpf_ringbuf_output_proto;
692 case BPF_FUNC_ringbuf_reserve:
693 return &bpf_ringbuf_reserve_proto;
694 case BPF_FUNC_ringbuf_submit:
695 return &bpf_ringbuf_submit_proto;
696 case BPF_FUNC_ringbuf_discard:
697 return &bpf_ringbuf_discard_proto;
698 case BPF_FUNC_ringbuf_query:
699 return &bpf_ringbuf_query_proto;
700 default:
701 break;
702 }
703
704 if (!bpf_capable())
705 return NULL;
706
707 switch (func_id) {
708 case BPF_FUNC_spin_lock:
709 return &bpf_spin_lock_proto;
710 case BPF_FUNC_spin_unlock:
711 return &bpf_spin_unlock_proto;
712 case BPF_FUNC_jiffies64:
713 return &bpf_jiffies64_proto;
714 case BPF_FUNC_per_cpu_ptr:
715 return &bpf_per_cpu_ptr_proto;
716 case BPF_FUNC_this_cpu_ptr:
717 return &bpf_this_cpu_ptr_proto;
718 default:
719 break;
720 }
721
722 if (!perfmon_capable())
723 return NULL;
724
725 switch (func_id) {
726 case BPF_FUNC_trace_printk:
727 return bpf_get_trace_printk_proto();
728 case BPF_FUNC_get_current_task:
729 return &bpf_get_current_task_proto;
730 case BPF_FUNC_probe_read_user:
731 return &bpf_probe_read_user_proto;
732 case BPF_FUNC_probe_read_kernel:
733 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
734 NULL : &bpf_probe_read_kernel_proto;
735 case BPF_FUNC_probe_read_user_str:
736 return &bpf_probe_read_user_str_proto;
737 case BPF_FUNC_probe_read_kernel_str:
738 return security_locked_down(LOCKDOWN_BPF_READ) < 0 ?
739 NULL : &bpf_probe_read_kernel_str_proto;
740 case BPF_FUNC_snprintf_btf:
741 return &bpf_snprintf_btf_proto;
742 default:
743 return NULL;
744 }
745 }
746