1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
11 */
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/anon_inodes.h>
16 #include <linux/file.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
20
21 int sysctl_unprivileged_bpf_disabled __read_mostly;
22
23 static LIST_HEAD(bpf_map_types);
24
find_and_alloc_map(union bpf_attr * attr)25 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
26 {
27 struct bpf_map_type_list *tl;
28 struct bpf_map *map;
29
30 list_for_each_entry(tl, &bpf_map_types, list_node) {
31 if (tl->type == attr->map_type) {
32 map = tl->ops->map_alloc(attr);
33 if (IS_ERR(map))
34 return map;
35 map->ops = tl->ops;
36 map->map_type = attr->map_type;
37 return map;
38 }
39 }
40 return ERR_PTR(-EINVAL);
41 }
42
43 /* boot time registration of different map implementations */
bpf_register_map_type(struct bpf_map_type_list * tl)44 void bpf_register_map_type(struct bpf_map_type_list *tl)
45 {
46 list_add(&tl->list_node, &bpf_map_types);
47 }
48
bpf_map_charge_memlock(struct bpf_map * map)49 static int bpf_map_charge_memlock(struct bpf_map *map)
50 {
51 struct user_struct *user = get_current_user();
52 unsigned long memlock_limit;
53
54 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
55
56 atomic_long_add(map->pages, &user->locked_vm);
57
58 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
59 atomic_long_sub(map->pages, &user->locked_vm);
60 free_uid(user);
61 return -EPERM;
62 }
63 map->user = user;
64 return 0;
65 }
66
bpf_map_uncharge_memlock(struct bpf_map * map)67 static void bpf_map_uncharge_memlock(struct bpf_map *map)
68 {
69 struct user_struct *user = map->user;
70
71 atomic_long_sub(map->pages, &user->locked_vm);
72 free_uid(user);
73 }
74
75 /* called from workqueue */
bpf_map_free_deferred(struct work_struct * work)76 static void bpf_map_free_deferred(struct work_struct *work)
77 {
78 struct bpf_map *map = container_of(work, struct bpf_map, work);
79
80 bpf_map_uncharge_memlock(map);
81 /* implementation dependent freeing */
82 map->ops->map_free(map);
83 }
84
bpf_map_put_uref(struct bpf_map * map)85 static void bpf_map_put_uref(struct bpf_map *map)
86 {
87 if (atomic_dec_and_test(&map->usercnt)) {
88 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
89 bpf_fd_array_map_clear(map);
90 }
91 }
92
93 /* decrement map refcnt and schedule it for freeing via workqueue
94 * (unrelying map implementation ops->map_free() might sleep)
95 */
bpf_map_put(struct bpf_map * map)96 void bpf_map_put(struct bpf_map *map)
97 {
98 if (atomic_dec_and_test(&map->refcnt)) {
99 INIT_WORK(&map->work, bpf_map_free_deferred);
100 schedule_work(&map->work);
101 }
102 }
103
bpf_map_put_with_uref(struct bpf_map * map)104 void bpf_map_put_with_uref(struct bpf_map *map)
105 {
106 bpf_map_put_uref(map);
107 bpf_map_put(map);
108 }
109
bpf_map_release(struct inode * inode,struct file * filp)110 static int bpf_map_release(struct inode *inode, struct file *filp)
111 {
112 bpf_map_put_with_uref(filp->private_data);
113 return 0;
114 }
115
116 static const struct file_operations bpf_map_fops = {
117 .release = bpf_map_release,
118 };
119
bpf_map_new_fd(struct bpf_map * map)120 int bpf_map_new_fd(struct bpf_map *map)
121 {
122 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
123 O_RDWR | O_CLOEXEC);
124 }
125
126 /* helper macro to check that unused fields 'union bpf_attr' are zero */
127 #define CHECK_ATTR(CMD) \
128 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
129 sizeof(attr->CMD##_LAST_FIELD), 0, \
130 sizeof(*attr) - \
131 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
132 sizeof(attr->CMD##_LAST_FIELD)) != NULL
133
134 #define BPF_MAP_CREATE_LAST_FIELD max_entries
135 /* called via syscall */
map_create(union bpf_attr * attr)136 static int map_create(union bpf_attr *attr)
137 {
138 struct bpf_map *map;
139 int err;
140
141 err = CHECK_ATTR(BPF_MAP_CREATE);
142 if (err)
143 return -EINVAL;
144
145 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
146 map = find_and_alloc_map(attr);
147 if (IS_ERR(map))
148 return PTR_ERR(map);
149
150 atomic_set(&map->refcnt, 1);
151 atomic_set(&map->usercnt, 1);
152
153 err = bpf_map_charge_memlock(map);
154 if (err)
155 goto free_map_nouncharge;
156
157 err = bpf_map_new_fd(map);
158 if (err < 0)
159 /* failed to allocate fd */
160 goto free_map;
161
162 return err;
163
164 free_map:
165 bpf_map_uncharge_memlock(map);
166 free_map_nouncharge:
167 map->ops->map_free(map);
168 return err;
169 }
170
171 /* if error is returned, fd is released.
172 * On success caller should complete fd access with matching fdput()
173 */
__bpf_map_get(struct fd f)174 struct bpf_map *__bpf_map_get(struct fd f)
175 {
176 if (!f.file)
177 return ERR_PTR(-EBADF);
178 if (f.file->f_op != &bpf_map_fops) {
179 fdput(f);
180 return ERR_PTR(-EINVAL);
181 }
182
183 return f.file->private_data;
184 }
185
186 /* prog's and map's refcnt limit */
187 #define BPF_MAX_REFCNT 32768
188
bpf_map_inc(struct bpf_map * map,bool uref)189 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
190 {
191 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
192 atomic_dec(&map->refcnt);
193 return ERR_PTR(-EBUSY);
194 }
195 if (uref)
196 atomic_inc(&map->usercnt);
197 return map;
198 }
199
bpf_map_get_with_uref(u32 ufd)200 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
201 {
202 struct fd f = fdget(ufd);
203 struct bpf_map *map;
204
205 map = __bpf_map_get(f);
206 if (IS_ERR(map))
207 return map;
208
209 map = bpf_map_inc(map, true);
210 fdput(f);
211
212 return map;
213 }
214
215 /* helper to convert user pointers passed inside __aligned_u64 fields */
u64_to_ptr(__u64 val)216 static void __user *u64_to_ptr(__u64 val)
217 {
218 return (void __user *) (unsigned long) val;
219 }
220
221 /* last field in 'union bpf_attr' used by this command */
222 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
223
map_lookup_elem(union bpf_attr * attr)224 static int map_lookup_elem(union bpf_attr *attr)
225 {
226 void __user *ukey = u64_to_ptr(attr->key);
227 void __user *uvalue = u64_to_ptr(attr->value);
228 int ufd = attr->map_fd;
229 struct bpf_map *map;
230 void *key, *value, *ptr;
231 struct fd f;
232 int err;
233
234 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
235 return -EINVAL;
236
237 f = fdget(ufd);
238 map = __bpf_map_get(f);
239 if (IS_ERR(map))
240 return PTR_ERR(map);
241
242 err = -ENOMEM;
243 key = kmalloc(map->key_size, GFP_USER);
244 if (!key)
245 goto err_put;
246
247 err = -EFAULT;
248 if (copy_from_user(key, ukey, map->key_size) != 0)
249 goto free_key;
250
251 err = -ENOMEM;
252 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
253 if (!value)
254 goto free_key;
255
256 rcu_read_lock();
257 ptr = map->ops->map_lookup_elem(map, key);
258 if (ptr)
259 memcpy(value, ptr, map->value_size);
260 rcu_read_unlock();
261
262 err = -ENOENT;
263 if (!ptr)
264 goto free_value;
265
266 err = -EFAULT;
267 if (copy_to_user(uvalue, value, map->value_size) != 0)
268 goto free_value;
269
270 err = 0;
271
272 free_value:
273 kfree(value);
274 free_key:
275 kfree(key);
276 err_put:
277 fdput(f);
278 return err;
279 }
280
281 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
282
map_update_elem(union bpf_attr * attr)283 static int map_update_elem(union bpf_attr *attr)
284 {
285 void __user *ukey = u64_to_ptr(attr->key);
286 void __user *uvalue = u64_to_ptr(attr->value);
287 int ufd = attr->map_fd;
288 struct bpf_map *map;
289 void *key, *value;
290 struct fd f;
291 int err;
292
293 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
294 return -EINVAL;
295
296 f = fdget(ufd);
297 map = __bpf_map_get(f);
298 if (IS_ERR(map))
299 return PTR_ERR(map);
300
301 err = -ENOMEM;
302 key = kmalloc(map->key_size, GFP_USER);
303 if (!key)
304 goto err_put;
305
306 err = -EFAULT;
307 if (copy_from_user(key, ukey, map->key_size) != 0)
308 goto free_key;
309
310 err = -ENOMEM;
311 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
312 if (!value)
313 goto free_key;
314
315 err = -EFAULT;
316 if (copy_from_user(value, uvalue, map->value_size) != 0)
317 goto free_value;
318
319 /* eBPF program that use maps are running under rcu_read_lock(),
320 * therefore all map accessors rely on this fact, so do the same here
321 */
322 rcu_read_lock();
323 err = map->ops->map_update_elem(map, key, value, attr->flags);
324 rcu_read_unlock();
325
326 free_value:
327 kfree(value);
328 free_key:
329 kfree(key);
330 err_put:
331 fdput(f);
332 return err;
333 }
334
335 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
336
map_delete_elem(union bpf_attr * attr)337 static int map_delete_elem(union bpf_attr *attr)
338 {
339 void __user *ukey = u64_to_ptr(attr->key);
340 int ufd = attr->map_fd;
341 struct bpf_map *map;
342 struct fd f;
343 void *key;
344 int err;
345
346 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
347 return -EINVAL;
348
349 f = fdget(ufd);
350 map = __bpf_map_get(f);
351 if (IS_ERR(map))
352 return PTR_ERR(map);
353
354 err = -ENOMEM;
355 key = kmalloc(map->key_size, GFP_USER);
356 if (!key)
357 goto err_put;
358
359 err = -EFAULT;
360 if (copy_from_user(key, ukey, map->key_size) != 0)
361 goto free_key;
362
363 rcu_read_lock();
364 err = map->ops->map_delete_elem(map, key);
365 rcu_read_unlock();
366
367 free_key:
368 kfree(key);
369 err_put:
370 fdput(f);
371 return err;
372 }
373
374 /* last field in 'union bpf_attr' used by this command */
375 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
376
map_get_next_key(union bpf_attr * attr)377 static int map_get_next_key(union bpf_attr *attr)
378 {
379 void __user *ukey = u64_to_ptr(attr->key);
380 void __user *unext_key = u64_to_ptr(attr->next_key);
381 int ufd = attr->map_fd;
382 struct bpf_map *map;
383 void *key, *next_key;
384 struct fd f;
385 int err;
386
387 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
388 return -EINVAL;
389
390 f = fdget(ufd);
391 map = __bpf_map_get(f);
392 if (IS_ERR(map))
393 return PTR_ERR(map);
394
395 if (ukey) {
396 err = -ENOMEM;
397 key = kmalloc(map->key_size, GFP_USER);
398 if (!key)
399 goto err_put;
400
401 err = -EFAULT;
402 if (copy_from_user(key, ukey, map->key_size) != 0)
403 goto free_key;
404 } else {
405 key = NULL;
406 }
407
408 err = -ENOMEM;
409 next_key = kmalloc(map->key_size, GFP_USER);
410 if (!next_key)
411 goto free_key;
412
413 rcu_read_lock();
414 err = map->ops->map_get_next_key(map, key, next_key);
415 rcu_read_unlock();
416 if (err)
417 goto free_next_key;
418
419 err = -EFAULT;
420 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
421 goto free_next_key;
422
423 err = 0;
424
425 free_next_key:
426 kfree(next_key);
427 free_key:
428 kfree(key);
429 err_put:
430 fdput(f);
431 return err;
432 }
433
434 static LIST_HEAD(bpf_prog_types);
435
find_prog_type(enum bpf_prog_type type,struct bpf_prog * prog)436 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
437 {
438 struct bpf_prog_type_list *tl;
439
440 list_for_each_entry(tl, &bpf_prog_types, list_node) {
441 if (tl->type == type) {
442 prog->aux->ops = tl->ops;
443 prog->type = type;
444 return 0;
445 }
446 }
447
448 return -EINVAL;
449 }
450
bpf_register_prog_type(struct bpf_prog_type_list * tl)451 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
452 {
453 list_add(&tl->list_node, &bpf_prog_types);
454 }
455
456 /* drop refcnt on maps used by eBPF program and free auxilary data */
free_used_maps(struct bpf_prog_aux * aux)457 static void free_used_maps(struct bpf_prog_aux *aux)
458 {
459 int i;
460
461 for (i = 0; i < aux->used_map_cnt; i++)
462 bpf_map_put(aux->used_maps[i]);
463
464 kfree(aux->used_maps);
465 }
466
bpf_prog_charge_memlock(struct bpf_prog * prog)467 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
468 {
469 struct user_struct *user = get_current_user();
470 unsigned long memlock_limit;
471
472 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
473
474 atomic_long_add(prog->pages, &user->locked_vm);
475 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
476 atomic_long_sub(prog->pages, &user->locked_vm);
477 free_uid(user);
478 return -EPERM;
479 }
480 prog->aux->user = user;
481 return 0;
482 }
483
bpf_prog_uncharge_memlock(struct bpf_prog * prog)484 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
485 {
486 struct user_struct *user = prog->aux->user;
487
488 atomic_long_sub(prog->pages, &user->locked_vm);
489 free_uid(user);
490 }
491
__bpf_prog_put_rcu(struct rcu_head * rcu)492 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
493 {
494 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
495
496 free_used_maps(aux);
497 bpf_prog_uncharge_memlock(aux->prog);
498 bpf_prog_free(aux->prog);
499 }
500
bpf_prog_put(struct bpf_prog * prog)501 void bpf_prog_put(struct bpf_prog *prog)
502 {
503 if (atomic_dec_and_test(&prog->aux->refcnt))
504 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
505 }
506 EXPORT_SYMBOL_GPL(bpf_prog_put);
507
bpf_prog_release(struct inode * inode,struct file * filp)508 static int bpf_prog_release(struct inode *inode, struct file *filp)
509 {
510 struct bpf_prog *prog = filp->private_data;
511
512 bpf_prog_put(prog);
513 return 0;
514 }
515
516 static const struct file_operations bpf_prog_fops = {
517 .release = bpf_prog_release,
518 };
519
bpf_prog_new_fd(struct bpf_prog * prog)520 int bpf_prog_new_fd(struct bpf_prog *prog)
521 {
522 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
523 O_RDWR | O_CLOEXEC);
524 }
525
__bpf_prog_get(struct fd f)526 static struct bpf_prog *__bpf_prog_get(struct fd f)
527 {
528 if (!f.file)
529 return ERR_PTR(-EBADF);
530 if (f.file->f_op != &bpf_prog_fops) {
531 fdput(f);
532 return ERR_PTR(-EINVAL);
533 }
534
535 return f.file->private_data;
536 }
537
bpf_prog_inc(struct bpf_prog * prog)538 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
539 {
540 if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
541 atomic_dec(&prog->aux->refcnt);
542 return ERR_PTR(-EBUSY);
543 }
544 return prog;
545 }
546
547 /* called by sockets/tracing/seccomp before attaching program to an event
548 * pairs with bpf_prog_put()
549 */
bpf_prog_get(u32 ufd)550 struct bpf_prog *bpf_prog_get(u32 ufd)
551 {
552 struct fd f = fdget(ufd);
553 struct bpf_prog *prog;
554
555 prog = __bpf_prog_get(f);
556 if (IS_ERR(prog))
557 return prog;
558
559 prog = bpf_prog_inc(prog);
560 fdput(f);
561
562 return prog;
563 }
564 EXPORT_SYMBOL_GPL(bpf_prog_get);
565
566 /* last field in 'union bpf_attr' used by this command */
567 #define BPF_PROG_LOAD_LAST_FIELD kern_version
568
bpf_prog_load(union bpf_attr * attr)569 static int bpf_prog_load(union bpf_attr *attr)
570 {
571 enum bpf_prog_type type = attr->prog_type;
572 struct bpf_prog *prog;
573 int err;
574 char license[128];
575 bool is_gpl;
576
577 if (CHECK_ATTR(BPF_PROG_LOAD))
578 return -EINVAL;
579
580 /* copy eBPF program license from user space */
581 if (strncpy_from_user(license, u64_to_ptr(attr->license),
582 sizeof(license) - 1) < 0)
583 return -EFAULT;
584 license[sizeof(license) - 1] = 0;
585
586 /* eBPF programs must be GPL compatible to use GPL-ed functions */
587 is_gpl = license_is_gpl_compatible(license);
588
589 if (attr->insn_cnt >= BPF_MAXINSNS)
590 return -EINVAL;
591
592 if (type == BPF_PROG_TYPE_KPROBE &&
593 attr->kern_version != LINUX_VERSION_CODE)
594 return -EINVAL;
595
596 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
597 return -EPERM;
598
599 /* plain bpf_prog allocation */
600 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
601 if (!prog)
602 return -ENOMEM;
603
604 err = bpf_prog_charge_memlock(prog);
605 if (err)
606 goto free_prog_nouncharge;
607
608 prog->len = attr->insn_cnt;
609
610 err = -EFAULT;
611 if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
612 prog->len * sizeof(struct bpf_insn)) != 0)
613 goto free_prog;
614
615 prog->orig_prog = NULL;
616 prog->jited = 0;
617
618 atomic_set(&prog->aux->refcnt, 1);
619 prog->gpl_compatible = is_gpl ? 1 : 0;
620
621 /* find program type: socket_filter vs tracing_filter */
622 err = find_prog_type(type, prog);
623 if (err < 0)
624 goto free_prog;
625
626 /* run eBPF verifier */
627 err = bpf_check(&prog, attr);
628 if (err < 0)
629 goto free_used_maps;
630
631 /* eBPF program is ready to be JITed */
632 err = bpf_prog_select_runtime(prog);
633 if (err < 0)
634 goto free_used_maps;
635
636 err = bpf_prog_new_fd(prog);
637 if (err < 0)
638 /* failed to allocate fd */
639 goto free_used_maps;
640
641 return err;
642
643 free_used_maps:
644 free_used_maps(prog->aux);
645 free_prog:
646 bpf_prog_uncharge_memlock(prog);
647 free_prog_nouncharge:
648 bpf_prog_free(prog);
649 return err;
650 }
651
652 #define BPF_OBJ_LAST_FIELD bpf_fd
653
bpf_obj_pin(const union bpf_attr * attr)654 static int bpf_obj_pin(const union bpf_attr *attr)
655 {
656 if (CHECK_ATTR(BPF_OBJ))
657 return -EINVAL;
658
659 return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
660 }
661
bpf_obj_get(const union bpf_attr * attr)662 static int bpf_obj_get(const union bpf_attr *attr)
663 {
664 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
665 return -EINVAL;
666
667 return bpf_obj_get_user(u64_to_ptr(attr->pathname));
668 }
669
SYSCALL_DEFINE3(bpf,int,cmd,union bpf_attr __user *,uattr,unsigned int,size)670 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
671 {
672 union bpf_attr attr;
673 int err;
674
675 if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN))
676 return -EPERM;
677
678 if (!access_ok(VERIFY_READ, uattr, 1))
679 return -EFAULT;
680
681 if (size > PAGE_SIZE) /* silly large */
682 return -E2BIG;
683
684 /* If we're handed a bigger struct than we know of,
685 * ensure all the unknown bits are 0 - i.e. new
686 * user-space does not rely on any kernel feature
687 * extensions we dont know about yet.
688 */
689 if (size > sizeof(attr)) {
690 unsigned char __user *addr;
691 unsigned char __user *end;
692 unsigned char val;
693
694 addr = (void __user *)uattr + sizeof(attr);
695 end = (void __user *)uattr + size;
696
697 for (; addr < end; addr++) {
698 err = get_user(val, addr);
699 if (err)
700 return err;
701 if (val)
702 return -E2BIG;
703 }
704 size = sizeof(attr);
705 }
706
707 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
708 memset(&attr, 0, sizeof(attr));
709 if (copy_from_user(&attr, uattr, size) != 0)
710 return -EFAULT;
711
712 switch (cmd) {
713 case BPF_MAP_CREATE:
714 err = map_create(&attr);
715 break;
716 case BPF_MAP_LOOKUP_ELEM:
717 err = map_lookup_elem(&attr);
718 break;
719 case BPF_MAP_UPDATE_ELEM:
720 err = map_update_elem(&attr);
721 break;
722 case BPF_MAP_DELETE_ELEM:
723 err = map_delete_elem(&attr);
724 break;
725 case BPF_MAP_GET_NEXT_KEY:
726 err = map_get_next_key(&attr);
727 break;
728 case BPF_PROG_LOAD:
729 err = bpf_prog_load(&attr);
730 break;
731 case BPF_OBJ_PIN:
732 err = bpf_obj_pin(&attr);
733 break;
734 case BPF_OBJ_GET:
735 err = bpf_obj_get(&attr);
736 break;
737 default:
738 err = -EINVAL;
739 break;
740 }
741
742 return err;
743 }
744