• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  */
4 #include <linux/bpf.h>
5 #include <linux/bpf_trace.h>
6 #include <linux/bpf_lirc.h>
7 #include <linux/bpf_verifier.h>
8 #include <linux/btf.h>
9 #include <linux/syscalls.h>
10 #include <linux/slab.h>
11 #include <linux/sched/signal.h>
12 #include <linux/vmalloc.h>
13 #include <linux/mmzone.h>
14 #include <linux/anon_inodes.h>
15 #include <linux/fdtable.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/license.h>
19 #include <linux/filter.h>
20 #include <linux/version.h>
21 #include <linux/kernel.h>
22 #include <linux/idr.h>
23 #include <linux/cred.h>
24 #include <linux/timekeeping.h>
25 #include <linux/ctype.h>
26 #include <linux/nospec.h>
27 #include <linux/audit.h>
28 #include <uapi/linux/btf.h>
29 #include <linux/pgtable.h>
30 #include <linux/bpf_lsm.h>
31 #include <linux/poll.h>
32 #include <linux/bpf-netns.h>
33 #include <linux/rcupdate_trace.h>
34 
35 #include <trace/hooks/syscall_check.h>
36 
37 #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
38 			  (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
39 			  (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
40 #define IS_FD_PROG_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY)
41 #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
42 #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map) || \
43 			IS_FD_HASH(map))
44 
45 #define BPF_OBJ_FLAG_MASK   (BPF_F_RDONLY | BPF_F_WRONLY)
46 
47 DEFINE_PER_CPU(int, bpf_prog_active);
48 static DEFINE_IDR(prog_idr);
49 static DEFINE_SPINLOCK(prog_idr_lock);
50 static DEFINE_IDR(map_idr);
51 static DEFINE_SPINLOCK(map_idr_lock);
52 static DEFINE_IDR(link_idr);
53 static DEFINE_SPINLOCK(link_idr_lock);
54 
55 int sysctl_unprivileged_bpf_disabled __read_mostly =
56 	IS_BUILTIN(CONFIG_BPF_UNPRIV_DEFAULT_OFF) ? 2 : 0;
57 
58 static const struct bpf_map_ops * const bpf_map_types[] = {
59 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
60 #define BPF_MAP_TYPE(_id, _ops) \
61 	[_id] = &_ops,
62 #define BPF_LINK_TYPE(_id, _name)
63 #include <linux/bpf_types.h>
64 #undef BPF_PROG_TYPE
65 #undef BPF_MAP_TYPE
66 #undef BPF_LINK_TYPE
67 };
68 
69 /*
70  * If we're handed a bigger struct than we know of, ensure all the unknown bits
71  * are 0 - i.e. new user-space does not rely on any kernel feature extensions
72  * we don't know about yet.
73  *
74  * There is a ToCToU between this function call and the following
75  * copy_from_user() call. However, this is not a concern since this function is
76  * meant to be a future-proofing of bits.
77  */
bpf_check_uarg_tail_zero(void __user * uaddr,size_t expected_size,size_t actual_size)78 int bpf_check_uarg_tail_zero(void __user *uaddr,
79 			     size_t expected_size,
80 			     size_t actual_size)
81 {
82 	unsigned char __user *addr = uaddr + expected_size;
83 	int res;
84 
85 	if (unlikely(actual_size > PAGE_SIZE))	/* silly large */
86 		return -E2BIG;
87 
88 	if (actual_size <= expected_size)
89 		return 0;
90 
91 	res = check_zeroed_user(addr, actual_size - expected_size);
92 	if (res < 0)
93 		return res;
94 	return res ? 0 : -E2BIG;
95 }
96 
97 const struct bpf_map_ops bpf_map_offload_ops = {
98 	.map_meta_equal = bpf_map_meta_equal,
99 	.map_alloc = bpf_map_offload_map_alloc,
100 	.map_free = bpf_map_offload_map_free,
101 	.map_check_btf = map_check_no_btf,
102 };
103 
find_and_alloc_map(union bpf_attr * attr)104 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
105 {
106 	const struct bpf_map_ops *ops;
107 	u32 type = attr->map_type;
108 	struct bpf_map *map;
109 	int err;
110 
111 	if (type >= ARRAY_SIZE(bpf_map_types))
112 		return ERR_PTR(-EINVAL);
113 	type = array_index_nospec(type, ARRAY_SIZE(bpf_map_types));
114 	ops = bpf_map_types[type];
115 	if (!ops)
116 		return ERR_PTR(-EINVAL);
117 
118 	if (ops->map_alloc_check) {
119 		err = ops->map_alloc_check(attr);
120 		if (err)
121 			return ERR_PTR(err);
122 	}
123 	if (attr->map_ifindex)
124 		ops = &bpf_map_offload_ops;
125 	map = ops->map_alloc(attr);
126 	if (IS_ERR(map))
127 		return map;
128 	map->ops = ops;
129 	map->map_type = type;
130 	return map;
131 }
132 
bpf_map_write_active_inc(struct bpf_map * map)133 static void bpf_map_write_active_inc(struct bpf_map *map)
134 {
135 	atomic64_inc(&map->writecnt);
136 }
137 
bpf_map_write_active_dec(struct bpf_map * map)138 static void bpf_map_write_active_dec(struct bpf_map *map)
139 {
140 	atomic64_dec(&map->writecnt);
141 }
142 
bpf_map_write_active(const struct bpf_map * map)143 bool bpf_map_write_active(const struct bpf_map *map)
144 {
145 	return atomic64_read(&map->writecnt) != 0;
146 }
147 
bpf_map_value_size(struct bpf_map * map)148 static u32 bpf_map_value_size(struct bpf_map *map)
149 {
150 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
151 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
152 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
153 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
154 		return round_up(map->value_size, 8) * num_possible_cpus();
155 	else if (IS_FD_MAP(map))
156 		return sizeof(u32);
157 	else
158 		return  map->value_size;
159 }
160 
maybe_wait_bpf_programs(struct bpf_map * map)161 static void maybe_wait_bpf_programs(struct bpf_map *map)
162 {
163 	/* Wait for any running BPF programs to complete so that
164 	 * userspace, when we return to it, knows that all programs
165 	 * that could be running use the new map value.
166 	 */
167 	if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS ||
168 	    map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
169 		synchronize_rcu();
170 }
171 
bpf_map_update_value(struct bpf_map * map,struct fd f,void * key,void * value,__u64 flags)172 static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
173 				void *value, __u64 flags)
174 {
175 	int err;
176 
177 	/* Need to create a kthread, thus must support schedule */
178 	if (bpf_map_is_dev_bound(map)) {
179 		return bpf_map_offload_update_elem(map, key, value, flags);
180 	} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
181 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
182 		return map->ops->map_update_elem(map, key, value, flags);
183 	} else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
184 		   map->map_type == BPF_MAP_TYPE_SOCKMAP) {
185 		return sock_map_update_elem_sys(map, key, value, flags);
186 	} else if (IS_FD_PROG_ARRAY(map)) {
187 		return bpf_fd_array_map_update_elem(map, f.file, key, value,
188 						    flags);
189 	}
190 
191 	bpf_disable_instrumentation();
192 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
193 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
194 		err = bpf_percpu_hash_update(map, key, value, flags);
195 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
196 		err = bpf_percpu_array_update(map, key, value, flags);
197 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
198 		err = bpf_percpu_cgroup_storage_update(map, key, value,
199 						       flags);
200 	} else if (IS_FD_ARRAY(map)) {
201 		rcu_read_lock();
202 		err = bpf_fd_array_map_update_elem(map, f.file, key, value,
203 						   flags);
204 		rcu_read_unlock();
205 	} else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
206 		rcu_read_lock();
207 		err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
208 						  flags);
209 		rcu_read_unlock();
210 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
211 		/* rcu_read_lock() is not needed */
212 		err = bpf_fd_reuseport_array_update_elem(map, key, value,
213 							 flags);
214 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
215 		   map->map_type == BPF_MAP_TYPE_STACK) {
216 		err = map->ops->map_push_elem(map, value, flags);
217 	} else {
218 		rcu_read_lock();
219 		err = map->ops->map_update_elem(map, key, value, flags);
220 		rcu_read_unlock();
221 	}
222 	bpf_enable_instrumentation();
223 	maybe_wait_bpf_programs(map);
224 
225 	return err;
226 }
227 
bpf_map_copy_value(struct bpf_map * map,void * key,void * value,__u64 flags)228 static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
229 			      __u64 flags)
230 {
231 	void *ptr;
232 	int err;
233 
234 	if (bpf_map_is_dev_bound(map))
235 		return bpf_map_offload_lookup_elem(map, key, value);
236 
237 	bpf_disable_instrumentation();
238 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
239 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
240 		err = bpf_percpu_hash_copy(map, key, value);
241 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
242 		err = bpf_percpu_array_copy(map, key, value);
243 	} else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) {
244 		err = bpf_percpu_cgroup_storage_copy(map, key, value);
245 	} else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
246 		err = bpf_stackmap_copy(map, key, value);
247 	} else if (IS_FD_ARRAY(map) || IS_FD_PROG_ARRAY(map)) {
248 		err = bpf_fd_array_map_lookup_elem(map, key, value);
249 	} else if (IS_FD_HASH(map)) {
250 		err = bpf_fd_htab_map_lookup_elem(map, key, value);
251 	} else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) {
252 		err = bpf_fd_reuseport_array_lookup_elem(map, key, value);
253 	} else if (map->map_type == BPF_MAP_TYPE_QUEUE ||
254 		   map->map_type == BPF_MAP_TYPE_STACK) {
255 		err = map->ops->map_peek_elem(map, value);
256 	} else if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
257 		/* struct_ops map requires directly updating "value" */
258 		err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
259 	} else {
260 		rcu_read_lock();
261 		if (map->ops->map_lookup_elem_sys_only)
262 			ptr = map->ops->map_lookup_elem_sys_only(map, key);
263 		else
264 			ptr = map->ops->map_lookup_elem(map, key);
265 		if (IS_ERR(ptr)) {
266 			err = PTR_ERR(ptr);
267 		} else if (!ptr) {
268 			err = -ENOENT;
269 		} else {
270 			err = 0;
271 			if (flags & BPF_F_LOCK)
272 				/* lock 'ptr' and copy everything but lock */
273 				copy_map_value_locked(map, value, ptr, true);
274 			else
275 				copy_map_value(map, value, ptr);
276 			/* mask lock, since value wasn't zero inited */
277 			check_and_init_map_lock(map, value);
278 		}
279 		rcu_read_unlock();
280 	}
281 
282 	bpf_enable_instrumentation();
283 	maybe_wait_bpf_programs(map);
284 
285 	return err;
286 }
287 
__bpf_map_area_alloc(u64 size,int numa_node,bool mmapable)288 static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
289 {
290 	/* We really just want to fail instead of triggering OOM killer
291 	 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
292 	 * which is used for lower order allocation requests.
293 	 *
294 	 * It has been observed that higher order allocation requests done by
295 	 * vmalloc with __GFP_NORETRY being set might fail due to not trying
296 	 * to reclaim memory from the page cache, thus we set
297 	 * __GFP_RETRY_MAYFAIL to avoid such situations.
298 	 */
299 
300 	const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO;
301 	unsigned int flags = 0;
302 	unsigned long align = 1;
303 	void *area;
304 
305 	if (size >= SIZE_MAX)
306 		return NULL;
307 
308 	/* kmalloc()'ed memory can't be mmap()'ed */
309 	if (mmapable) {
310 		BUG_ON(!PAGE_ALIGNED(size));
311 		align = SHMLBA;
312 		flags = VM_USERMAP;
313 	} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
314 		area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
315 				    numa_node);
316 		if (area != NULL)
317 			return area;
318 	}
319 
320 	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
321 			gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
322 			flags, numa_node, __builtin_return_address(0));
323 }
324 
bpf_map_area_alloc(u64 size,int numa_node)325 void *bpf_map_area_alloc(u64 size, int numa_node)
326 {
327 	return __bpf_map_area_alloc(size, numa_node, false);
328 }
329 
bpf_map_area_mmapable_alloc(u64 size,int numa_node)330 void *bpf_map_area_mmapable_alloc(u64 size, int numa_node)
331 {
332 	return __bpf_map_area_alloc(size, numa_node, true);
333 }
334 
bpf_map_area_free(void * area)335 void bpf_map_area_free(void *area)
336 {
337 	kvfree(area);
338 }
339 
bpf_map_flags_retain_permanent(u32 flags)340 static u32 bpf_map_flags_retain_permanent(u32 flags)
341 {
342 	/* Some map creation flags are not tied to the map object but
343 	 * rather to the map fd instead, so they have no meaning upon
344 	 * map object inspection since multiple file descriptors with
345 	 * different (access) properties can exist here. Thus, given
346 	 * this has zero meaning for the map itself, lets clear these
347 	 * from here.
348 	 */
349 	return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
350 }
351 
bpf_map_init_from_attr(struct bpf_map * map,union bpf_attr * attr)352 void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
353 {
354 	map->map_type = attr->map_type;
355 	map->key_size = attr->key_size;
356 	map->value_size = attr->value_size;
357 	map->max_entries = attr->max_entries;
358 	map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
359 	map->numa_node = bpf_map_attr_numa_node(attr);
360 }
361 
bpf_charge_memlock(struct user_struct * user,u32 pages)362 static int bpf_charge_memlock(struct user_struct *user, u32 pages)
363 {
364 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
365 
366 	if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
367 		atomic_long_sub(pages, &user->locked_vm);
368 		return -EPERM;
369 	}
370 	return 0;
371 }
372 
bpf_uncharge_memlock(struct user_struct * user,u32 pages)373 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
374 {
375 	if (user)
376 		atomic_long_sub(pages, &user->locked_vm);
377 }
378 
bpf_map_charge_init(struct bpf_map_memory * mem,u64 size)379 int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
380 {
381 	u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
382 	struct user_struct *user;
383 	int ret;
384 
385 	if (size >= U32_MAX - PAGE_SIZE)
386 		return -E2BIG;
387 
388 	user = get_current_user();
389 	ret = bpf_charge_memlock(user, pages);
390 	if (ret) {
391 		free_uid(user);
392 		return ret;
393 	}
394 
395 	mem->pages = pages;
396 	mem->user = user;
397 
398 	return 0;
399 }
400 
bpf_map_charge_finish(struct bpf_map_memory * mem)401 void bpf_map_charge_finish(struct bpf_map_memory *mem)
402 {
403 	bpf_uncharge_memlock(mem->user, mem->pages);
404 	free_uid(mem->user);
405 }
406 
bpf_map_charge_move(struct bpf_map_memory * dst,struct bpf_map_memory * src)407 void bpf_map_charge_move(struct bpf_map_memory *dst,
408 			 struct bpf_map_memory *src)
409 {
410 	*dst = *src;
411 
412 	/* Make sure src will not be used for the redundant uncharging. */
413 	memset(src, 0, sizeof(struct bpf_map_memory));
414 }
415 
bpf_map_charge_memlock(struct bpf_map * map,u32 pages)416 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
417 {
418 	int ret;
419 
420 	ret = bpf_charge_memlock(map->memory.user, pages);
421 	if (ret)
422 		return ret;
423 	map->memory.pages += pages;
424 	return ret;
425 }
426 
bpf_map_uncharge_memlock(struct bpf_map * map,u32 pages)427 void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
428 {
429 	bpf_uncharge_memlock(map->memory.user, pages);
430 	map->memory.pages -= pages;
431 }
432 
bpf_map_alloc_id(struct bpf_map * map)433 static int bpf_map_alloc_id(struct bpf_map *map)
434 {
435 	int id;
436 
437 	idr_preload(GFP_KERNEL);
438 	spin_lock_bh(&map_idr_lock);
439 	id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
440 	if (id > 0)
441 		map->id = id;
442 	spin_unlock_bh(&map_idr_lock);
443 	idr_preload_end();
444 
445 	if (WARN_ON_ONCE(!id))
446 		return -ENOSPC;
447 
448 	return id > 0 ? 0 : id;
449 }
450 
bpf_map_free_id(struct bpf_map * map,bool do_idr_lock)451 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
452 {
453 	unsigned long flags;
454 
455 	/* Offloaded maps are removed from the IDR store when their device
456 	 * disappears - even if someone holds an fd to them they are unusable,
457 	 * the memory is gone, all ops will fail; they are simply waiting for
458 	 * refcnt to drop to be freed.
459 	 */
460 	if (!map->id)
461 		return;
462 
463 	if (do_idr_lock)
464 		spin_lock_irqsave(&map_idr_lock, flags);
465 	else
466 		__acquire(&map_idr_lock);
467 
468 	idr_remove(&map_idr, map->id);
469 	map->id = 0;
470 
471 	if (do_idr_lock)
472 		spin_unlock_irqrestore(&map_idr_lock, flags);
473 	else
474 		__release(&map_idr_lock);
475 }
476 
477 /* called from workqueue */
bpf_map_free_deferred(struct work_struct * work)478 static void bpf_map_free_deferred(struct work_struct *work)
479 {
480 	struct bpf_map *map = container_of(work, struct bpf_map, work);
481 	struct bpf_map_memory mem;
482 
483 	bpf_map_charge_move(&mem, &map->memory);
484 	security_bpf_map_free(map);
485 	/* implementation dependent freeing */
486 	map->ops->map_free(map);
487 	bpf_map_charge_finish(&mem);
488 }
489 
bpf_map_put_uref(struct bpf_map * map)490 static void bpf_map_put_uref(struct bpf_map *map)
491 {
492 	if (atomic64_dec_and_test(&map->usercnt)) {
493 		if (map->ops->map_release_uref)
494 			map->ops->map_release_uref(map);
495 	}
496 }
497 
498 /* decrement map refcnt and schedule it for freeing via workqueue
499  * (unrelying map implementation ops->map_free() might sleep)
500  */
__bpf_map_put(struct bpf_map * map,bool do_idr_lock)501 static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
502 {
503 	if (atomic64_dec_and_test(&map->refcnt)) {
504 		/* bpf_map_free_id() must be called first */
505 		bpf_map_free_id(map, do_idr_lock);
506 		btf_put(map->btf);
507 		INIT_WORK(&map->work, bpf_map_free_deferred);
508 		schedule_work(&map->work);
509 	}
510 }
511 
bpf_map_put(struct bpf_map * map)512 void bpf_map_put(struct bpf_map *map)
513 {
514 	__bpf_map_put(map, true);
515 }
516 EXPORT_SYMBOL_GPL(bpf_map_put);
517 
bpf_map_put_with_uref(struct bpf_map * map)518 void bpf_map_put_with_uref(struct bpf_map *map)
519 {
520 	bpf_map_put_uref(map);
521 	bpf_map_put(map);
522 }
523 
bpf_map_release(struct inode * inode,struct file * filp)524 static int bpf_map_release(struct inode *inode, struct file *filp)
525 {
526 	struct bpf_map *map = filp->private_data;
527 
528 	if (map->ops->map_release)
529 		map->ops->map_release(map, filp);
530 
531 	bpf_map_put_with_uref(map);
532 	return 0;
533 }
534 
map_get_sys_perms(struct bpf_map * map,struct fd f)535 static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f)
536 {
537 	fmode_t mode = f.file->f_mode;
538 
539 	/* Our file permissions may have been overridden by global
540 	 * map permissions facing syscall side.
541 	 */
542 	if (READ_ONCE(map->frozen))
543 		mode &= ~FMODE_CAN_WRITE;
544 	return mode;
545 }
546 
547 #ifdef CONFIG_PROC_FS
bpf_map_show_fdinfo(struct seq_file * m,struct file * filp)548 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
549 {
550 	const struct bpf_map *map = filp->private_data;
551 	const struct bpf_array *array;
552 	u32 type = 0, jited = 0;
553 
554 	if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
555 		array = container_of(map, struct bpf_array, map);
556 		spin_lock(&array->aux->owner.lock);
557 		type  = array->aux->owner.type;
558 		jited = array->aux->owner.jited;
559 		spin_unlock(&array->aux->owner.lock);
560 	}
561 
562 	seq_printf(m,
563 		   "map_type:\t%u\n"
564 		   "key_size:\t%u\n"
565 		   "value_size:\t%u\n"
566 		   "max_entries:\t%u\n"
567 		   "map_flags:\t%#x\n"
568 		   "memlock:\t%llu\n"
569 		   "map_id:\t%u\n"
570 		   "frozen:\t%u\n",
571 		   map->map_type,
572 		   map->key_size,
573 		   map->value_size,
574 		   map->max_entries,
575 		   map->map_flags,
576 		   map->memory.pages * 1ULL << PAGE_SHIFT,
577 		   map->id,
578 		   READ_ONCE(map->frozen));
579 	if (type) {
580 		seq_printf(m, "owner_prog_type:\t%u\n", type);
581 		seq_printf(m, "owner_jited:\t%u\n", jited);
582 	}
583 }
584 #endif
585 
bpf_dummy_read(struct file * filp,char __user * buf,size_t siz,loff_t * ppos)586 static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz,
587 			      loff_t *ppos)
588 {
589 	/* We need this handler such that alloc_file() enables
590 	 * f_mode with FMODE_CAN_READ.
591 	 */
592 	return -EINVAL;
593 }
594 
bpf_dummy_write(struct file * filp,const char __user * buf,size_t siz,loff_t * ppos)595 static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf,
596 			       size_t siz, loff_t *ppos)
597 {
598 	/* We need this handler such that alloc_file() enables
599 	 * f_mode with FMODE_CAN_WRITE.
600 	 */
601 	return -EINVAL;
602 }
603 
604 /* called for any extra memory-mapped regions (except initial) */
bpf_map_mmap_open(struct vm_area_struct * vma)605 static void bpf_map_mmap_open(struct vm_area_struct *vma)
606 {
607 	struct bpf_map *map = vma->vm_file->private_data;
608 
609 	if (vma->vm_flags & VM_MAYWRITE)
610 		bpf_map_write_active_inc(map);
611 }
612 
613 /* called for all unmapped memory region (including initial) */
bpf_map_mmap_close(struct vm_area_struct * vma)614 static void bpf_map_mmap_close(struct vm_area_struct *vma)
615 {
616 	struct bpf_map *map = vma->vm_file->private_data;
617 
618 	if (vma->vm_flags & VM_MAYWRITE)
619 		bpf_map_write_active_dec(map);
620 }
621 
622 static const struct vm_operations_struct bpf_map_default_vmops = {
623 	.open		= bpf_map_mmap_open,
624 	.close		= bpf_map_mmap_close,
625 };
626 
bpf_map_mmap(struct file * filp,struct vm_area_struct * vma)627 static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
628 {
629 	struct bpf_map *map = filp->private_data;
630 	int err;
631 
632 	if (!map->ops->map_mmap || map_value_has_spin_lock(map))
633 		return -ENOTSUPP;
634 
635 	if (!(vma->vm_flags & VM_SHARED))
636 		return -EINVAL;
637 
638 	mutex_lock(&map->freeze_mutex);
639 
640 	if (vma->vm_flags & VM_WRITE) {
641 		if (map->frozen) {
642 			err = -EPERM;
643 			goto out;
644 		}
645 		/* map is meant to be read-only, so do not allow mapping as
646 		 * writable, because it's possible to leak a writable page
647 		 * reference and allows user-space to still modify it after
648 		 * freezing, while verifier will assume contents do not change
649 		 */
650 		if (map->map_flags & BPF_F_RDONLY_PROG) {
651 			err = -EACCES;
652 			goto out;
653 		}
654 	}
655 
656 	/* set default open/close callbacks */
657 	vma->vm_ops = &bpf_map_default_vmops;
658 	vma->vm_private_data = map;
659 	vma->vm_flags &= ~VM_MAYEXEC;
660 	if (!(vma->vm_flags & VM_WRITE))
661 		/* disallow re-mapping with PROT_WRITE */
662 		vma->vm_flags &= ~VM_MAYWRITE;
663 
664 	err = map->ops->map_mmap(map, vma);
665 	if (err)
666 		goto out;
667 
668 	if (vma->vm_flags & VM_MAYWRITE)
669 		bpf_map_write_active_inc(map);
670 out:
671 	mutex_unlock(&map->freeze_mutex);
672 	return err;
673 }
674 
bpf_map_poll(struct file * filp,struct poll_table_struct * pts)675 static __poll_t bpf_map_poll(struct file *filp, struct poll_table_struct *pts)
676 {
677 	struct bpf_map *map = filp->private_data;
678 
679 	if (map->ops->map_poll)
680 		return map->ops->map_poll(map, filp, pts);
681 
682 	return EPOLLERR;
683 }
684 
685 const struct file_operations bpf_map_fops = {
686 #ifdef CONFIG_PROC_FS
687 	.show_fdinfo	= bpf_map_show_fdinfo,
688 #endif
689 	.release	= bpf_map_release,
690 	.read		= bpf_dummy_read,
691 	.write		= bpf_dummy_write,
692 	.mmap		= bpf_map_mmap,
693 	.poll		= bpf_map_poll,
694 };
695 
bpf_map_new_fd(struct bpf_map * map,int flags)696 int bpf_map_new_fd(struct bpf_map *map, int flags)
697 {
698 	int ret;
699 
700 	ret = security_bpf_map(map, OPEN_FMODE(flags));
701 	if (ret < 0)
702 		return ret;
703 
704 	return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
705 				flags | O_CLOEXEC);
706 }
707 
bpf_get_file_flag(int flags)708 int bpf_get_file_flag(int flags)
709 {
710 	if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY))
711 		return -EINVAL;
712 	if (flags & BPF_F_RDONLY)
713 		return O_RDONLY;
714 	if (flags & BPF_F_WRONLY)
715 		return O_WRONLY;
716 	return O_RDWR;
717 }
718 
719 /* helper macro to check that unused fields 'union bpf_attr' are zero */
720 #define CHECK_ATTR(CMD) \
721 	memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
722 		   sizeof(attr->CMD##_LAST_FIELD), 0, \
723 		   sizeof(*attr) - \
724 		   offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
725 		   sizeof(attr->CMD##_LAST_FIELD)) != NULL
726 
727 /* dst and src must have at least "size" number of bytes.
728  * Return strlen on success and < 0 on error.
729  */
bpf_obj_name_cpy(char * dst,const char * src,unsigned int size)730 int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size)
731 {
732 	const char *end = src + size;
733 	const char *orig_src = src;
734 
735 	memset(dst, 0, size);
736 	/* Copy all isalnum(), '_' and '.' chars. */
737 	while (src < end && *src) {
738 		if (!isalnum(*src) &&
739 		    *src != '_' && *src != '.')
740 			return -EINVAL;
741 		*dst++ = *src++;
742 	}
743 
744 	/* No '\0' found in "size" number of bytes */
745 	if (src == end)
746 		return -EINVAL;
747 
748 	return src - orig_src;
749 }
750 
map_check_no_btf(const struct bpf_map * map,const struct btf * btf,const struct btf_type * key_type,const struct btf_type * value_type)751 int map_check_no_btf(const struct bpf_map *map,
752 		     const struct btf *btf,
753 		     const struct btf_type *key_type,
754 		     const struct btf_type *value_type)
755 {
756 	return -ENOTSUPP;
757 }
758 
map_check_btf(struct bpf_map * map,const struct btf * btf,u32 btf_key_id,u32 btf_value_id)759 static int map_check_btf(struct bpf_map *map, const struct btf *btf,
760 			 u32 btf_key_id, u32 btf_value_id)
761 {
762 	const struct btf_type *key_type, *value_type;
763 	u32 key_size, value_size;
764 	int ret = 0;
765 
766 	/* Some maps allow key to be unspecified. */
767 	if (btf_key_id) {
768 		key_type = btf_type_id_size(btf, &btf_key_id, &key_size);
769 		if (!key_type || key_size != map->key_size)
770 			return -EINVAL;
771 	} else {
772 		key_type = btf_type_by_id(btf, 0);
773 		if (!map->ops->map_check_btf)
774 			return -EINVAL;
775 	}
776 
777 	value_type = btf_type_id_size(btf, &btf_value_id, &value_size);
778 	if (!value_type || value_size != map->value_size)
779 		return -EINVAL;
780 
781 	map->spin_lock_off = btf_find_spin_lock(btf, value_type);
782 
783 	if (map_value_has_spin_lock(map)) {
784 		if (map->map_flags & BPF_F_RDONLY_PROG)
785 			return -EACCES;
786 		if (map->map_type != BPF_MAP_TYPE_HASH &&
787 		    map->map_type != BPF_MAP_TYPE_ARRAY &&
788 		    map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
789 		    map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
790 		    map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
791 			return -ENOTSUPP;
792 		if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
793 		    map->value_size) {
794 			WARN_ONCE(1,
795 				  "verifier bug spin_lock_off %d value_size %d\n",
796 				  map->spin_lock_off, map->value_size);
797 			return -EFAULT;
798 		}
799 	}
800 
801 	if (map->ops->map_check_btf)
802 		ret = map->ops->map_check_btf(map, btf, key_type, value_type);
803 
804 	return ret;
805 }
806 
807 #define BPF_MAP_CREATE_LAST_FIELD btf_vmlinux_value_type_id
808 /* called via syscall */
map_create(union bpf_attr * attr)809 static int map_create(union bpf_attr *attr)
810 {
811 	int numa_node = bpf_map_attr_numa_node(attr);
812 	struct bpf_map_memory mem;
813 	struct bpf_map *map;
814 	int f_flags;
815 	int err;
816 
817 	err = CHECK_ATTR(BPF_MAP_CREATE);
818 	if (err)
819 		return -EINVAL;
820 
821 	if (attr->btf_vmlinux_value_type_id) {
822 		if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
823 		    attr->btf_key_type_id || attr->btf_value_type_id)
824 			return -EINVAL;
825 	} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
826 		return -EINVAL;
827 	}
828 
829 	f_flags = bpf_get_file_flag(attr->map_flags);
830 	if (f_flags < 0)
831 		return f_flags;
832 
833 	if (numa_node != NUMA_NO_NODE &&
834 	    ((unsigned int)numa_node >= nr_node_ids ||
835 	     !node_online(numa_node)))
836 		return -EINVAL;
837 
838 	/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
839 	map = find_and_alloc_map(attr);
840 	if (IS_ERR(map))
841 		return PTR_ERR(map);
842 
843 	err = bpf_obj_name_cpy(map->name, attr->map_name,
844 			       sizeof(attr->map_name));
845 	if (err < 0)
846 		goto free_map;
847 
848 	atomic64_set(&map->refcnt, 1);
849 	atomic64_set(&map->usercnt, 1);
850 	mutex_init(&map->freeze_mutex);
851 
852 	map->spin_lock_off = -EINVAL;
853 	if (attr->btf_key_type_id || attr->btf_value_type_id ||
854 	    /* Even the map's value is a kernel's struct,
855 	     * the bpf_prog.o must have BTF to begin with
856 	     * to figure out the corresponding kernel's
857 	     * counter part.  Thus, attr->btf_fd has
858 	     * to be valid also.
859 	     */
860 	    attr->btf_vmlinux_value_type_id) {
861 		struct btf *btf;
862 
863 		btf = btf_get_by_fd(attr->btf_fd);
864 		if (IS_ERR(btf)) {
865 			err = PTR_ERR(btf);
866 			goto free_map;
867 		}
868 		map->btf = btf;
869 
870 		if (attr->btf_value_type_id) {
871 			err = map_check_btf(map, btf, attr->btf_key_type_id,
872 					    attr->btf_value_type_id);
873 			if (err)
874 				goto free_map;
875 		}
876 
877 		map->btf_key_type_id = attr->btf_key_type_id;
878 		map->btf_value_type_id = attr->btf_value_type_id;
879 		map->btf_vmlinux_value_type_id =
880 			attr->btf_vmlinux_value_type_id;
881 	}
882 
883 	err = security_bpf_map_alloc(map);
884 	if (err)
885 		goto free_map;
886 
887 	err = bpf_map_alloc_id(map);
888 	if (err)
889 		goto free_map_sec;
890 
891 	err = bpf_map_new_fd(map, f_flags);
892 	if (err < 0) {
893 		/* failed to allocate fd.
894 		 * bpf_map_put_with_uref() is needed because the above
895 		 * bpf_map_alloc_id() has published the map
896 		 * to the userspace and the userspace may
897 		 * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
898 		 */
899 		bpf_map_put_with_uref(map);
900 		return err;
901 	}
902 
903 	return err;
904 
905 free_map_sec:
906 	security_bpf_map_free(map);
907 free_map:
908 	btf_put(map->btf);
909 	bpf_map_charge_move(&mem, &map->memory);
910 	map->ops->map_free(map);
911 	bpf_map_charge_finish(&mem);
912 	return err;
913 }
914 
915 /* if error is returned, fd is released.
916  * On success caller should complete fd access with matching fdput()
917  */
__bpf_map_get(struct fd f)918 struct bpf_map *__bpf_map_get(struct fd f)
919 {
920 	if (!f.file)
921 		return ERR_PTR(-EBADF);
922 	if (f.file->f_op != &bpf_map_fops) {
923 		fdput(f);
924 		return ERR_PTR(-EINVAL);
925 	}
926 
927 	return f.file->private_data;
928 }
929 
bpf_map_inc(struct bpf_map * map)930 void bpf_map_inc(struct bpf_map *map)
931 {
932 	atomic64_inc(&map->refcnt);
933 }
934 EXPORT_SYMBOL_GPL(bpf_map_inc);
935 
bpf_map_inc_with_uref(struct bpf_map * map)936 void bpf_map_inc_with_uref(struct bpf_map *map)
937 {
938 	atomic64_inc(&map->refcnt);
939 	atomic64_inc(&map->usercnt);
940 }
941 EXPORT_SYMBOL_GPL(bpf_map_inc_with_uref);
942 
bpf_map_get(u32 ufd)943 struct bpf_map *bpf_map_get(u32 ufd)
944 {
945 	struct fd f = fdget(ufd);
946 	struct bpf_map *map;
947 
948 	map = __bpf_map_get(f);
949 	if (IS_ERR(map))
950 		return map;
951 
952 	bpf_map_inc(map);
953 	fdput(f);
954 
955 	return map;
956 }
957 
bpf_map_get_with_uref(u32 ufd)958 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
959 {
960 	struct fd f = fdget(ufd);
961 	struct bpf_map *map;
962 
963 	map = __bpf_map_get(f);
964 	if (IS_ERR(map))
965 		return map;
966 
967 	bpf_map_inc_with_uref(map);
968 	fdput(f);
969 
970 	return map;
971 }
972 
973 /* map_idr_lock should have been held */
__bpf_map_inc_not_zero(struct bpf_map * map,bool uref)974 static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
975 {
976 	int refold;
977 
978 	refold = atomic64_fetch_add_unless(&map->refcnt, 1, 0);
979 	if (!refold)
980 		return ERR_PTR(-ENOENT);
981 	if (uref)
982 		atomic64_inc(&map->usercnt);
983 
984 	return map;
985 }
986 
bpf_map_inc_not_zero(struct bpf_map * map)987 struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map)
988 {
989 	spin_lock_bh(&map_idr_lock);
990 	map = __bpf_map_inc_not_zero(map, false);
991 	spin_unlock_bh(&map_idr_lock);
992 
993 	return map;
994 }
995 EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero);
996 
bpf_stackmap_copy(struct bpf_map * map,void * key,void * value)997 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
998 {
999 	return -ENOTSUPP;
1000 }
1001 
__bpf_copy_key(void __user * ukey,u64 key_size)1002 static void *__bpf_copy_key(void __user *ukey, u64 key_size)
1003 {
1004 	if (key_size)
1005 		return memdup_user(ukey, key_size);
1006 
1007 	if (ukey)
1008 		return ERR_PTR(-EINVAL);
1009 
1010 	return NULL;
1011 }
1012 
1013 /* last field in 'union bpf_attr' used by this command */
1014 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
1015 
map_lookup_elem(union bpf_attr * attr)1016 static int map_lookup_elem(union bpf_attr *attr)
1017 {
1018 	void __user *ukey = u64_to_user_ptr(attr->key);
1019 	void __user *uvalue = u64_to_user_ptr(attr->value);
1020 	int ufd = attr->map_fd;
1021 	struct bpf_map *map;
1022 	void *key, *value;
1023 	u32 value_size;
1024 	struct fd f;
1025 	int err;
1026 
1027 	if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
1028 		return -EINVAL;
1029 
1030 	if (attr->flags & ~BPF_F_LOCK)
1031 		return -EINVAL;
1032 
1033 	f = fdget(ufd);
1034 	map = __bpf_map_get(f);
1035 	if (IS_ERR(map))
1036 		return PTR_ERR(map);
1037 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1038 		err = -EPERM;
1039 		goto err_put;
1040 	}
1041 
1042 	if ((attr->flags & BPF_F_LOCK) &&
1043 	    !map_value_has_spin_lock(map)) {
1044 		err = -EINVAL;
1045 		goto err_put;
1046 	}
1047 
1048 	key = __bpf_copy_key(ukey, map->key_size);
1049 	if (IS_ERR(key)) {
1050 		err = PTR_ERR(key);
1051 		goto err_put;
1052 	}
1053 
1054 	value_size = bpf_map_value_size(map);
1055 
1056 	err = -ENOMEM;
1057 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1058 	if (!value)
1059 		goto free_key;
1060 
1061 	err = bpf_map_copy_value(map, key, value, attr->flags);
1062 	if (err)
1063 		goto free_value;
1064 
1065 	err = -EFAULT;
1066 	if (copy_to_user(uvalue, value, value_size) != 0)
1067 		goto free_value;
1068 
1069 	err = 0;
1070 
1071 free_value:
1072 	kfree(value);
1073 free_key:
1074 	kfree(key);
1075 err_put:
1076 	fdput(f);
1077 	return err;
1078 }
1079 
1080 
1081 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
1082 
map_update_elem(union bpf_attr * attr)1083 static int map_update_elem(union bpf_attr *attr)
1084 {
1085 	void __user *ukey = u64_to_user_ptr(attr->key);
1086 	void __user *uvalue = u64_to_user_ptr(attr->value);
1087 	int ufd = attr->map_fd;
1088 	struct bpf_map *map;
1089 	void *key, *value;
1090 	u32 value_size;
1091 	struct fd f;
1092 	int err;
1093 
1094 	if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
1095 		return -EINVAL;
1096 
1097 	f = fdget(ufd);
1098 	map = __bpf_map_get(f);
1099 	if (IS_ERR(map))
1100 		return PTR_ERR(map);
1101 	bpf_map_write_active_inc(map);
1102 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1103 		err = -EPERM;
1104 		goto err_put;
1105 	}
1106 
1107 	if ((attr->flags & BPF_F_LOCK) &&
1108 	    !map_value_has_spin_lock(map)) {
1109 		err = -EINVAL;
1110 		goto err_put;
1111 	}
1112 
1113 	key = __bpf_copy_key(ukey, map->key_size);
1114 	if (IS_ERR(key)) {
1115 		err = PTR_ERR(key);
1116 		goto err_put;
1117 	}
1118 
1119 	if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
1120 	    map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
1121 	    map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY ||
1122 	    map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
1123 		value_size = round_up(map->value_size, 8) * num_possible_cpus();
1124 	else
1125 		value_size = map->value_size;
1126 
1127 	err = -ENOMEM;
1128 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1129 	if (!value)
1130 		goto free_key;
1131 
1132 	err = -EFAULT;
1133 	if (copy_from_user(value, uvalue, value_size) != 0)
1134 		goto free_value;
1135 
1136 	err = bpf_map_update_value(map, f, key, value, attr->flags);
1137 
1138 free_value:
1139 	kfree(value);
1140 free_key:
1141 	kfree(key);
1142 err_put:
1143 	bpf_map_write_active_dec(map);
1144 	fdput(f);
1145 	return err;
1146 }
1147 
1148 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
1149 
map_delete_elem(union bpf_attr * attr)1150 static int map_delete_elem(union bpf_attr *attr)
1151 {
1152 	void __user *ukey = u64_to_user_ptr(attr->key);
1153 	int ufd = attr->map_fd;
1154 	struct bpf_map *map;
1155 	struct fd f;
1156 	void *key;
1157 	int err;
1158 
1159 	if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
1160 		return -EINVAL;
1161 
1162 	f = fdget(ufd);
1163 	map = __bpf_map_get(f);
1164 	if (IS_ERR(map))
1165 		return PTR_ERR(map);
1166 	bpf_map_write_active_inc(map);
1167 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1168 		err = -EPERM;
1169 		goto err_put;
1170 	}
1171 
1172 	key = __bpf_copy_key(ukey, map->key_size);
1173 	if (IS_ERR(key)) {
1174 		err = PTR_ERR(key);
1175 		goto err_put;
1176 	}
1177 
1178 	if (bpf_map_is_dev_bound(map)) {
1179 		err = bpf_map_offload_delete_elem(map, key);
1180 		goto out;
1181 	} else if (IS_FD_PROG_ARRAY(map) ||
1182 		   map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1183 		/* These maps require sleepable context */
1184 		err = map->ops->map_delete_elem(map, key);
1185 		goto out;
1186 	}
1187 
1188 	bpf_disable_instrumentation();
1189 	rcu_read_lock();
1190 	err = map->ops->map_delete_elem(map, key);
1191 	rcu_read_unlock();
1192 	bpf_enable_instrumentation();
1193 	maybe_wait_bpf_programs(map);
1194 out:
1195 	kfree(key);
1196 err_put:
1197 	bpf_map_write_active_dec(map);
1198 	fdput(f);
1199 	return err;
1200 }
1201 
1202 /* last field in 'union bpf_attr' used by this command */
1203 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
1204 
map_get_next_key(union bpf_attr * attr)1205 static int map_get_next_key(union bpf_attr *attr)
1206 {
1207 	void __user *ukey = u64_to_user_ptr(attr->key);
1208 	void __user *unext_key = u64_to_user_ptr(attr->next_key);
1209 	int ufd = attr->map_fd;
1210 	struct bpf_map *map;
1211 	void *key, *next_key;
1212 	struct fd f;
1213 	int err;
1214 
1215 	if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
1216 		return -EINVAL;
1217 
1218 	f = fdget(ufd);
1219 	map = __bpf_map_get(f);
1220 	if (IS_ERR(map))
1221 		return PTR_ERR(map);
1222 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
1223 		err = -EPERM;
1224 		goto err_put;
1225 	}
1226 
1227 	if (ukey) {
1228 		key = __bpf_copy_key(ukey, map->key_size);
1229 		if (IS_ERR(key)) {
1230 			err = PTR_ERR(key);
1231 			goto err_put;
1232 		}
1233 	} else {
1234 		key = NULL;
1235 	}
1236 
1237 	err = -ENOMEM;
1238 	next_key = kmalloc(map->key_size, GFP_USER);
1239 	if (!next_key)
1240 		goto free_key;
1241 
1242 	if (bpf_map_is_dev_bound(map)) {
1243 		err = bpf_map_offload_get_next_key(map, key, next_key);
1244 		goto out;
1245 	}
1246 
1247 	rcu_read_lock();
1248 	err = map->ops->map_get_next_key(map, key, next_key);
1249 	rcu_read_unlock();
1250 out:
1251 	if (err)
1252 		goto free_next_key;
1253 
1254 	err = -EFAULT;
1255 	if (copy_to_user(unext_key, next_key, map->key_size) != 0)
1256 		goto free_next_key;
1257 
1258 	err = 0;
1259 
1260 free_next_key:
1261 	kfree(next_key);
1262 free_key:
1263 	kfree(key);
1264 err_put:
1265 	fdput(f);
1266 	return err;
1267 }
1268 
generic_map_delete_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1269 int generic_map_delete_batch(struct bpf_map *map,
1270 			     const union bpf_attr *attr,
1271 			     union bpf_attr __user *uattr)
1272 {
1273 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1274 	u32 cp, max_count;
1275 	int err = 0;
1276 	void *key;
1277 
1278 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1279 		return -EINVAL;
1280 
1281 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1282 	    !map_value_has_spin_lock(map)) {
1283 		return -EINVAL;
1284 	}
1285 
1286 	max_count = attr->batch.count;
1287 	if (!max_count)
1288 		return 0;
1289 
1290 	if (put_user(0, &uattr->batch.count))
1291 		return -EFAULT;
1292 
1293 	key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1294 	if (!key)
1295 		return -ENOMEM;
1296 
1297 	for (cp = 0; cp < max_count; cp++) {
1298 		err = -EFAULT;
1299 		if (copy_from_user(key, keys + cp * map->key_size,
1300 				   map->key_size))
1301 			break;
1302 
1303 		if (bpf_map_is_dev_bound(map)) {
1304 			err = bpf_map_offload_delete_elem(map, key);
1305 			break;
1306 		}
1307 
1308 		bpf_disable_instrumentation();
1309 		rcu_read_lock();
1310 		err = map->ops->map_delete_elem(map, key);
1311 		rcu_read_unlock();
1312 		bpf_enable_instrumentation();
1313 		maybe_wait_bpf_programs(map);
1314 		if (err)
1315 			break;
1316 		cond_resched();
1317 	}
1318 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1319 		err = -EFAULT;
1320 
1321 	kfree(key);
1322 	return err;
1323 }
1324 
generic_map_update_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1325 int generic_map_update_batch(struct bpf_map *map,
1326 			     const union bpf_attr *attr,
1327 			     union bpf_attr __user *uattr)
1328 {
1329 	void __user *values = u64_to_user_ptr(attr->batch.values);
1330 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1331 	u32 value_size, cp, max_count;
1332 	int ufd = attr->batch.map_fd;
1333 	void *key, *value;
1334 	struct fd f;
1335 	int err = 0;
1336 
1337 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1338 		return -EINVAL;
1339 
1340 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1341 	    !map_value_has_spin_lock(map)) {
1342 		return -EINVAL;
1343 	}
1344 
1345 	value_size = bpf_map_value_size(map);
1346 
1347 	max_count = attr->batch.count;
1348 	if (!max_count)
1349 		return 0;
1350 
1351 	if (put_user(0, &uattr->batch.count))
1352 		return -EFAULT;
1353 
1354 	key = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1355 	if (!key)
1356 		return -ENOMEM;
1357 
1358 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1359 	if (!value) {
1360 		kfree(key);
1361 		return -ENOMEM;
1362 	}
1363 
1364 	f = fdget(ufd); /* bpf_map_do_batch() guarantees ufd is valid */
1365 	for (cp = 0; cp < max_count; cp++) {
1366 		err = -EFAULT;
1367 		if (copy_from_user(key, keys + cp * map->key_size,
1368 		    map->key_size) ||
1369 		    copy_from_user(value, values + cp * value_size, value_size))
1370 			break;
1371 
1372 		err = bpf_map_update_value(map, f, key, value,
1373 					   attr->batch.elem_flags);
1374 
1375 		if (err)
1376 			break;
1377 		cond_resched();
1378 	}
1379 
1380 	if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
1381 		err = -EFAULT;
1382 
1383 	kfree(value);
1384 	kfree(key);
1385 	fdput(f);
1386 	return err;
1387 }
1388 
1389 #define MAP_LOOKUP_RETRIES 3
1390 
generic_map_lookup_batch(struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)1391 int generic_map_lookup_batch(struct bpf_map *map,
1392 				    const union bpf_attr *attr,
1393 				    union bpf_attr __user *uattr)
1394 {
1395 	void __user *uobatch = u64_to_user_ptr(attr->batch.out_batch);
1396 	void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1397 	void __user *values = u64_to_user_ptr(attr->batch.values);
1398 	void __user *keys = u64_to_user_ptr(attr->batch.keys);
1399 	void *buf, *buf_prevkey, *prev_key, *key, *value;
1400 	int err, retry = MAP_LOOKUP_RETRIES;
1401 	u32 value_size, cp, max_count;
1402 
1403 	if (attr->batch.elem_flags & ~BPF_F_LOCK)
1404 		return -EINVAL;
1405 
1406 	if ((attr->batch.elem_flags & BPF_F_LOCK) &&
1407 	    !map_value_has_spin_lock(map))
1408 		return -EINVAL;
1409 
1410 	value_size = bpf_map_value_size(map);
1411 
1412 	max_count = attr->batch.count;
1413 	if (!max_count)
1414 		return 0;
1415 
1416 	if (put_user(0, &uattr->batch.count))
1417 		return -EFAULT;
1418 
1419 	buf_prevkey = kmalloc(map->key_size, GFP_USER | __GFP_NOWARN);
1420 	if (!buf_prevkey)
1421 		return -ENOMEM;
1422 
1423 	buf = kmalloc(map->key_size + value_size, GFP_USER | __GFP_NOWARN);
1424 	if (!buf) {
1425 		kfree(buf_prevkey);
1426 		return -ENOMEM;
1427 	}
1428 
1429 	err = -EFAULT;
1430 	prev_key = NULL;
1431 	if (ubatch && copy_from_user(buf_prevkey, ubatch, map->key_size))
1432 		goto free_buf;
1433 	key = buf;
1434 	value = key + map->key_size;
1435 	if (ubatch)
1436 		prev_key = buf_prevkey;
1437 
1438 	for (cp = 0; cp < max_count;) {
1439 		rcu_read_lock();
1440 		err = map->ops->map_get_next_key(map, prev_key, key);
1441 		rcu_read_unlock();
1442 		if (err)
1443 			break;
1444 		err = bpf_map_copy_value(map, key, value,
1445 					 attr->batch.elem_flags);
1446 
1447 		if (err == -ENOENT) {
1448 			if (retry) {
1449 				retry--;
1450 				continue;
1451 			}
1452 			err = -EINTR;
1453 			break;
1454 		}
1455 
1456 		if (err)
1457 			goto free_buf;
1458 
1459 		if (copy_to_user(keys + cp * map->key_size, key,
1460 				 map->key_size)) {
1461 			err = -EFAULT;
1462 			goto free_buf;
1463 		}
1464 		if (copy_to_user(values + cp * value_size, value, value_size)) {
1465 			err = -EFAULT;
1466 			goto free_buf;
1467 		}
1468 
1469 		if (!prev_key)
1470 			prev_key = buf_prevkey;
1471 
1472 		swap(prev_key, key);
1473 		retry = MAP_LOOKUP_RETRIES;
1474 		cp++;
1475 		cond_resched();
1476 	}
1477 
1478 	if (err == -EFAULT)
1479 		goto free_buf;
1480 
1481 	if ((copy_to_user(&uattr->batch.count, &cp, sizeof(cp)) ||
1482 		    (cp && copy_to_user(uobatch, prev_key, map->key_size))))
1483 		err = -EFAULT;
1484 
1485 free_buf:
1486 	kfree(buf_prevkey);
1487 	kfree(buf);
1488 	return err;
1489 }
1490 
1491 #define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value
1492 
map_lookup_and_delete_elem(union bpf_attr * attr)1493 static int map_lookup_and_delete_elem(union bpf_attr *attr)
1494 {
1495 	void __user *ukey = u64_to_user_ptr(attr->key);
1496 	void __user *uvalue = u64_to_user_ptr(attr->value);
1497 	int ufd = attr->map_fd;
1498 	struct bpf_map *map;
1499 	void *key, *value;
1500 	u32 value_size;
1501 	struct fd f;
1502 	int err;
1503 
1504 	if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM))
1505 		return -EINVAL;
1506 
1507 	f = fdget(ufd);
1508 	map = __bpf_map_get(f);
1509 	if (IS_ERR(map))
1510 		return PTR_ERR(map);
1511 	bpf_map_write_active_inc(map);
1512 	if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) ||
1513 	    !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
1514 		err = -EPERM;
1515 		goto err_put;
1516 	}
1517 
1518 	key = __bpf_copy_key(ukey, map->key_size);
1519 	if (IS_ERR(key)) {
1520 		err = PTR_ERR(key);
1521 		goto err_put;
1522 	}
1523 
1524 	value_size = map->value_size;
1525 
1526 	err = -ENOMEM;
1527 	value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
1528 	if (!value)
1529 		goto free_key;
1530 
1531 	if (map->map_type == BPF_MAP_TYPE_QUEUE ||
1532 	    map->map_type == BPF_MAP_TYPE_STACK) {
1533 		err = map->ops->map_pop_elem(map, value);
1534 	} else {
1535 		err = -ENOTSUPP;
1536 	}
1537 
1538 	if (err)
1539 		goto free_value;
1540 
1541 	if (copy_to_user(uvalue, value, value_size) != 0) {
1542 		err = -EFAULT;
1543 		goto free_value;
1544 	}
1545 
1546 	err = 0;
1547 
1548 free_value:
1549 	kfree(value);
1550 free_key:
1551 	kfree(key);
1552 err_put:
1553 	bpf_map_write_active_dec(map);
1554 	fdput(f);
1555 	return err;
1556 }
1557 
1558 #define BPF_MAP_FREEZE_LAST_FIELD map_fd
1559 
map_freeze(const union bpf_attr * attr)1560 static int map_freeze(const union bpf_attr *attr)
1561 {
1562 	int err = 0, ufd = attr->map_fd;
1563 	struct bpf_map *map;
1564 	struct fd f;
1565 
1566 	if (CHECK_ATTR(BPF_MAP_FREEZE))
1567 		return -EINVAL;
1568 
1569 	f = fdget(ufd);
1570 	map = __bpf_map_get(f);
1571 	if (IS_ERR(map))
1572 		return PTR_ERR(map);
1573 
1574 	if (map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
1575 		fdput(f);
1576 		return -ENOTSUPP;
1577 	}
1578 
1579 	mutex_lock(&map->freeze_mutex);
1580 	if (bpf_map_write_active(map)) {
1581 		err = -EBUSY;
1582 		goto err_put;
1583 	}
1584 	if (READ_ONCE(map->frozen)) {
1585 		err = -EBUSY;
1586 		goto err_put;
1587 	}
1588 	if (!bpf_capable()) {
1589 		err = -EPERM;
1590 		goto err_put;
1591 	}
1592 
1593 	WRITE_ONCE(map->frozen, true);
1594 err_put:
1595 	mutex_unlock(&map->freeze_mutex);
1596 	fdput(f);
1597 	return err;
1598 }
1599 
1600 static const struct bpf_prog_ops * const bpf_prog_types[] = {
1601 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
1602 	[_id] = & _name ## _prog_ops,
1603 #define BPF_MAP_TYPE(_id, _ops)
1604 #define BPF_LINK_TYPE(_id, _name)
1605 #include <linux/bpf_types.h>
1606 #undef BPF_PROG_TYPE
1607 #undef BPF_MAP_TYPE
1608 #undef BPF_LINK_TYPE
1609 };
1610 
find_prog_type(enum bpf_prog_type type,struct bpf_prog * prog)1611 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
1612 {
1613 	const struct bpf_prog_ops *ops;
1614 
1615 	if (type >= ARRAY_SIZE(bpf_prog_types))
1616 		return -EINVAL;
1617 	type = array_index_nospec(type, ARRAY_SIZE(bpf_prog_types));
1618 	ops = bpf_prog_types[type];
1619 	if (!ops)
1620 		return -EINVAL;
1621 
1622 	if (!bpf_prog_is_dev_bound(prog->aux))
1623 		prog->aux->ops = ops;
1624 	else
1625 		prog->aux->ops = &bpf_offload_prog_ops;
1626 	prog->type = type;
1627 	return 0;
1628 }
1629 
1630 enum bpf_audit {
1631 	BPF_AUDIT_LOAD,
1632 	BPF_AUDIT_UNLOAD,
1633 	BPF_AUDIT_MAX,
1634 };
1635 
1636 static const char * const bpf_audit_str[BPF_AUDIT_MAX] = {
1637 	[BPF_AUDIT_LOAD]   = "LOAD",
1638 	[BPF_AUDIT_UNLOAD] = "UNLOAD",
1639 };
1640 
bpf_audit_prog(const struct bpf_prog * prog,unsigned int op)1641 static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
1642 {
1643 	struct audit_context *ctx = NULL;
1644 	struct audit_buffer *ab;
1645 
1646 	if (WARN_ON_ONCE(op >= BPF_AUDIT_MAX))
1647 		return;
1648 	if (audit_enabled == AUDIT_OFF)
1649 		return;
1650 	if (op == BPF_AUDIT_LOAD)
1651 		ctx = audit_context();
1652 	ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
1653 	if (unlikely(!ab))
1654 		return;
1655 	audit_log_format(ab, "prog-id=%u op=%s",
1656 			 prog->aux->id, bpf_audit_str[op]);
1657 	audit_log_end(ab);
1658 }
1659 
__bpf_prog_charge(struct user_struct * user,u32 pages)1660 int __bpf_prog_charge(struct user_struct *user, u32 pages)
1661 {
1662 	unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1663 	unsigned long user_bufs;
1664 
1665 	if (user) {
1666 		user_bufs = atomic_long_add_return(pages, &user->locked_vm);
1667 		if (user_bufs > memlock_limit) {
1668 			atomic_long_sub(pages, &user->locked_vm);
1669 			return -EPERM;
1670 		}
1671 	}
1672 
1673 	return 0;
1674 }
1675 
__bpf_prog_uncharge(struct user_struct * user,u32 pages)1676 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
1677 {
1678 	if (user)
1679 		atomic_long_sub(pages, &user->locked_vm);
1680 }
1681 
bpf_prog_charge_memlock(struct bpf_prog * prog)1682 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
1683 {
1684 	struct user_struct *user = get_current_user();
1685 	int ret;
1686 
1687 	ret = __bpf_prog_charge(user, prog->pages);
1688 	if (ret) {
1689 		free_uid(user);
1690 		return ret;
1691 	}
1692 
1693 	prog->aux->user = user;
1694 	return 0;
1695 }
1696 
bpf_prog_uncharge_memlock(struct bpf_prog * prog)1697 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
1698 {
1699 	struct user_struct *user = prog->aux->user;
1700 
1701 	__bpf_prog_uncharge(user, prog->pages);
1702 	free_uid(user);
1703 }
1704 
bpf_prog_alloc_id(struct bpf_prog * prog)1705 static int bpf_prog_alloc_id(struct bpf_prog *prog)
1706 {
1707 	int id;
1708 
1709 	idr_preload(GFP_KERNEL);
1710 	spin_lock_bh(&prog_idr_lock);
1711 	id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
1712 	if (id > 0)
1713 		prog->aux->id = id;
1714 	spin_unlock_bh(&prog_idr_lock);
1715 	idr_preload_end();
1716 
1717 	/* id is in [1, INT_MAX) */
1718 	if (WARN_ON_ONCE(!id))
1719 		return -ENOSPC;
1720 
1721 	return id > 0 ? 0 : id;
1722 }
1723 
bpf_prog_free_id(struct bpf_prog * prog,bool do_idr_lock)1724 void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
1725 {
1726 	/* cBPF to eBPF migrations are currently not in the idr store.
1727 	 * Offloaded programs are removed from the store when their device
1728 	 * disappears - even if someone grabs an fd to them they are unusable,
1729 	 * simply waiting for refcnt to drop to be freed.
1730 	 */
1731 	if (!prog->aux->id)
1732 		return;
1733 
1734 	if (do_idr_lock)
1735 		spin_lock_bh(&prog_idr_lock);
1736 	else
1737 		__acquire(&prog_idr_lock);
1738 
1739 	idr_remove(&prog_idr, prog->aux->id);
1740 	prog->aux->id = 0;
1741 
1742 	if (do_idr_lock)
1743 		spin_unlock_bh(&prog_idr_lock);
1744 	else
1745 		__release(&prog_idr_lock);
1746 }
1747 
__bpf_prog_put_rcu(struct rcu_head * rcu)1748 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
1749 {
1750 	struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
1751 
1752 	kvfree(aux->func_info);
1753 	kfree(aux->func_info_aux);
1754 	bpf_prog_uncharge_memlock(aux->prog);
1755 	security_bpf_prog_free(aux);
1756 	bpf_prog_free(aux->prog);
1757 }
1758 
__bpf_prog_put_noref(struct bpf_prog * prog,bool deferred)1759 static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
1760 {
1761 	bpf_prog_kallsyms_del_all(prog);
1762 	btf_put(prog->aux->btf);
1763 	bpf_prog_free_linfo(prog);
1764 
1765 	if (deferred) {
1766 		if (prog->aux->sleepable)
1767 			call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
1768 		else
1769 			call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1770 	} else {
1771 		__bpf_prog_put_rcu(&prog->aux->rcu);
1772 	}
1773 }
1774 
__bpf_prog_put(struct bpf_prog * prog,bool do_idr_lock)1775 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1776 {
1777 	if (atomic64_dec_and_test(&prog->aux->refcnt)) {
1778 		perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
1779 		bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
1780 		/* bpf_prog_free_id() must be called first */
1781 		bpf_prog_free_id(prog, do_idr_lock);
1782 		__bpf_prog_put_noref(prog, true);
1783 	}
1784 }
1785 
bpf_prog_put(struct bpf_prog * prog)1786 void bpf_prog_put(struct bpf_prog *prog)
1787 {
1788 	__bpf_prog_put(prog, true);
1789 }
1790 EXPORT_SYMBOL_GPL(bpf_prog_put);
1791 
bpf_prog_release(struct inode * inode,struct file * filp)1792 static int bpf_prog_release(struct inode *inode, struct file *filp)
1793 {
1794 	struct bpf_prog *prog = filp->private_data;
1795 
1796 	bpf_prog_put(prog);
1797 	return 0;
1798 }
1799 
bpf_prog_get_stats(const struct bpf_prog * prog,struct bpf_prog_stats * stats)1800 static void bpf_prog_get_stats(const struct bpf_prog *prog,
1801 			       struct bpf_prog_stats *stats)
1802 {
1803 	u64 nsecs = 0, cnt = 0;
1804 	int cpu;
1805 
1806 	for_each_possible_cpu(cpu) {
1807 		const struct bpf_prog_stats *st;
1808 		unsigned int start;
1809 		u64 tnsecs, tcnt;
1810 
1811 		st = per_cpu_ptr(prog->aux->stats, cpu);
1812 		do {
1813 			start = u64_stats_fetch_begin_irq(&st->syncp);
1814 			tnsecs = st->nsecs;
1815 			tcnt = st->cnt;
1816 		} while (u64_stats_fetch_retry_irq(&st->syncp, start));
1817 		nsecs += tnsecs;
1818 		cnt += tcnt;
1819 	}
1820 	stats->nsecs = nsecs;
1821 	stats->cnt = cnt;
1822 }
1823 
1824 #ifdef CONFIG_PROC_FS
bpf_prog_show_fdinfo(struct seq_file * m,struct file * filp)1825 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
1826 {
1827 	const struct bpf_prog *prog = filp->private_data;
1828 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
1829 	struct bpf_prog_stats stats;
1830 
1831 	bpf_prog_get_stats(prog, &stats);
1832 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
1833 	seq_printf(m,
1834 		   "prog_type:\t%u\n"
1835 		   "prog_jited:\t%u\n"
1836 		   "prog_tag:\t%s\n"
1837 		   "memlock:\t%llu\n"
1838 		   "prog_id:\t%u\n"
1839 		   "run_time_ns:\t%llu\n"
1840 		   "run_cnt:\t%llu\n",
1841 		   prog->type,
1842 		   prog->jited,
1843 		   prog_tag,
1844 		   prog->pages * 1ULL << PAGE_SHIFT,
1845 		   prog->aux->id,
1846 		   stats.nsecs,
1847 		   stats.cnt);
1848 }
1849 #endif
1850 
1851 const struct file_operations bpf_prog_fops = {
1852 #ifdef CONFIG_PROC_FS
1853 	.show_fdinfo	= bpf_prog_show_fdinfo,
1854 #endif
1855 	.release	= bpf_prog_release,
1856 	.read		= bpf_dummy_read,
1857 	.write		= bpf_dummy_write,
1858 };
1859 
bpf_prog_new_fd(struct bpf_prog * prog)1860 int bpf_prog_new_fd(struct bpf_prog *prog)
1861 {
1862 	int ret;
1863 
1864 	ret = security_bpf_prog(prog);
1865 	if (ret < 0)
1866 		return ret;
1867 
1868 	return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
1869 				O_RDWR | O_CLOEXEC);
1870 }
1871 
____bpf_prog_get(struct fd f)1872 static struct bpf_prog *____bpf_prog_get(struct fd f)
1873 {
1874 	if (!f.file)
1875 		return ERR_PTR(-EBADF);
1876 	if (f.file->f_op != &bpf_prog_fops) {
1877 		fdput(f);
1878 		return ERR_PTR(-EINVAL);
1879 	}
1880 
1881 	return f.file->private_data;
1882 }
1883 
bpf_prog_add(struct bpf_prog * prog,int i)1884 void bpf_prog_add(struct bpf_prog *prog, int i)
1885 {
1886 	atomic64_add(i, &prog->aux->refcnt);
1887 }
1888 EXPORT_SYMBOL_GPL(bpf_prog_add);
1889 
bpf_prog_sub(struct bpf_prog * prog,int i)1890 void bpf_prog_sub(struct bpf_prog *prog, int i)
1891 {
1892 	/* Only to be used for undoing previous bpf_prog_add() in some
1893 	 * error path. We still know that another entity in our call
1894 	 * path holds a reference to the program, thus atomic_sub() can
1895 	 * be safely used in such cases!
1896 	 */
1897 	WARN_ON(atomic64_sub_return(i, &prog->aux->refcnt) == 0);
1898 }
1899 EXPORT_SYMBOL_GPL(bpf_prog_sub);
1900 
bpf_prog_inc(struct bpf_prog * prog)1901 void bpf_prog_inc(struct bpf_prog *prog)
1902 {
1903 	atomic64_inc(&prog->aux->refcnt);
1904 }
1905 EXPORT_SYMBOL_GPL(bpf_prog_inc);
1906 
1907 /* prog_idr_lock should have been held */
bpf_prog_inc_not_zero(struct bpf_prog * prog)1908 struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
1909 {
1910 	int refold;
1911 
1912 	refold = atomic64_fetch_add_unless(&prog->aux->refcnt, 1, 0);
1913 
1914 	if (!refold)
1915 		return ERR_PTR(-ENOENT);
1916 
1917 	return prog;
1918 }
1919 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
1920 
bpf_prog_get_ok(struct bpf_prog * prog,enum bpf_prog_type * attach_type,bool attach_drv)1921 bool bpf_prog_get_ok(struct bpf_prog *prog,
1922 			    enum bpf_prog_type *attach_type, bool attach_drv)
1923 {
1924 	/* not an attachment, just a refcount inc, always allow */
1925 	if (!attach_type)
1926 		return true;
1927 
1928 	if (prog->type != *attach_type)
1929 		return false;
1930 	if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
1931 		return false;
1932 
1933 	return true;
1934 }
1935 
__bpf_prog_get(u32 ufd,enum bpf_prog_type * attach_type,bool attach_drv)1936 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
1937 				       bool attach_drv)
1938 {
1939 	struct fd f = fdget(ufd);
1940 	struct bpf_prog *prog;
1941 
1942 	prog = ____bpf_prog_get(f);
1943 	if (IS_ERR(prog))
1944 		return prog;
1945 	if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
1946 		prog = ERR_PTR(-EINVAL);
1947 		goto out;
1948 	}
1949 
1950 	bpf_prog_inc(prog);
1951 out:
1952 	fdput(f);
1953 	return prog;
1954 }
1955 
bpf_prog_get(u32 ufd)1956 struct bpf_prog *bpf_prog_get(u32 ufd)
1957 {
1958 	return __bpf_prog_get(ufd, NULL, false);
1959 }
1960 
bpf_prog_get_type_dev(u32 ufd,enum bpf_prog_type type,bool attach_drv)1961 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1962 				       bool attach_drv)
1963 {
1964 	return __bpf_prog_get(ufd, &type, attach_drv);
1965 }
1966 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
1967 
1968 /* Initially all BPF programs could be loaded w/o specifying
1969  * expected_attach_type. Later for some of them specifying expected_attach_type
1970  * at load time became required so that program could be validated properly.
1971  * Programs of types that are allowed to be loaded both w/ and w/o (for
1972  * backward compatibility) expected_attach_type, should have the default attach
1973  * type assigned to expected_attach_type for the latter case, so that it can be
1974  * validated later at attach time.
1975  *
1976  * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if
1977  * prog type requires it but has some attach types that have to be backward
1978  * compatible.
1979  */
bpf_prog_load_fixup_attach_type(union bpf_attr * attr)1980 static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr)
1981 {
1982 	switch (attr->prog_type) {
1983 	case BPF_PROG_TYPE_CGROUP_SOCK:
1984 		/* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't
1985 		 * exist so checking for non-zero is the way to go here.
1986 		 */
1987 		if (!attr->expected_attach_type)
1988 			attr->expected_attach_type =
1989 				BPF_CGROUP_INET_SOCK_CREATE;
1990 		break;
1991 	}
1992 }
1993 
1994 static int
bpf_prog_load_check_attach(enum bpf_prog_type prog_type,enum bpf_attach_type expected_attach_type,u32 btf_id,u32 prog_fd)1995 bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
1996 			   enum bpf_attach_type expected_attach_type,
1997 			   u32 btf_id, u32 prog_fd)
1998 {
1999 	if (btf_id) {
2000 		if (btf_id > BTF_MAX_TYPE)
2001 			return -EINVAL;
2002 
2003 		switch (prog_type) {
2004 		case BPF_PROG_TYPE_TRACING:
2005 		case BPF_PROG_TYPE_LSM:
2006 		case BPF_PROG_TYPE_STRUCT_OPS:
2007 		case BPF_PROG_TYPE_EXT:
2008 			break;
2009 		default:
2010 			return -EINVAL;
2011 		}
2012 	}
2013 
2014 	if (prog_fd && prog_type != BPF_PROG_TYPE_TRACING &&
2015 	    prog_type != BPF_PROG_TYPE_EXT)
2016 		return -EINVAL;
2017 
2018 	switch (prog_type) {
2019 	case BPF_PROG_TYPE_CGROUP_SOCK:
2020 		switch (expected_attach_type) {
2021 		case BPF_CGROUP_INET_SOCK_CREATE:
2022 		case BPF_CGROUP_INET_SOCK_RELEASE:
2023 		case BPF_CGROUP_INET4_POST_BIND:
2024 		case BPF_CGROUP_INET6_POST_BIND:
2025 			return 0;
2026 		default:
2027 			return -EINVAL;
2028 		}
2029 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2030 		switch (expected_attach_type) {
2031 		case BPF_CGROUP_INET4_BIND:
2032 		case BPF_CGROUP_INET6_BIND:
2033 		case BPF_CGROUP_INET4_CONNECT:
2034 		case BPF_CGROUP_INET6_CONNECT:
2035 		case BPF_CGROUP_INET4_GETPEERNAME:
2036 		case BPF_CGROUP_INET6_GETPEERNAME:
2037 		case BPF_CGROUP_INET4_GETSOCKNAME:
2038 		case BPF_CGROUP_INET6_GETSOCKNAME:
2039 		case BPF_CGROUP_UDP4_SENDMSG:
2040 		case BPF_CGROUP_UDP6_SENDMSG:
2041 		case BPF_CGROUP_UDP4_RECVMSG:
2042 		case BPF_CGROUP_UDP6_RECVMSG:
2043 			return 0;
2044 		default:
2045 			return -EINVAL;
2046 		}
2047 	case BPF_PROG_TYPE_CGROUP_SKB:
2048 		switch (expected_attach_type) {
2049 		case BPF_CGROUP_INET_INGRESS:
2050 		case BPF_CGROUP_INET_EGRESS:
2051 			return 0;
2052 		default:
2053 			return -EINVAL;
2054 		}
2055 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2056 		switch (expected_attach_type) {
2057 		case BPF_CGROUP_SETSOCKOPT:
2058 		case BPF_CGROUP_GETSOCKOPT:
2059 			return 0;
2060 		default:
2061 			return -EINVAL;
2062 		}
2063 	case BPF_PROG_TYPE_SK_LOOKUP:
2064 		if (expected_attach_type == BPF_SK_LOOKUP)
2065 			return 0;
2066 		return -EINVAL;
2067 	case BPF_PROG_TYPE_EXT:
2068 		if (expected_attach_type)
2069 			return -EINVAL;
2070 		fallthrough;
2071 	default:
2072 		return 0;
2073 	}
2074 }
2075 
is_net_admin_prog_type(enum bpf_prog_type prog_type)2076 static bool is_net_admin_prog_type(enum bpf_prog_type prog_type)
2077 {
2078 	switch (prog_type) {
2079 	case BPF_PROG_TYPE_SCHED_CLS:
2080 	case BPF_PROG_TYPE_SCHED_ACT:
2081 	case BPF_PROG_TYPE_XDP:
2082 	case BPF_PROG_TYPE_LWT_IN:
2083 	case BPF_PROG_TYPE_LWT_OUT:
2084 	case BPF_PROG_TYPE_LWT_XMIT:
2085 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
2086 	case BPF_PROG_TYPE_SK_SKB:
2087 	case BPF_PROG_TYPE_SK_MSG:
2088 	case BPF_PROG_TYPE_LIRC_MODE2:
2089 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
2090 	case BPF_PROG_TYPE_CGROUP_DEVICE:
2091 	case BPF_PROG_TYPE_CGROUP_SOCK:
2092 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2093 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2094 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
2095 	case BPF_PROG_TYPE_SOCK_OPS:
2096 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2097 		return true;
2098 	case BPF_PROG_TYPE_CGROUP_SKB:
2099 		/* always unpriv */
2100 	case BPF_PROG_TYPE_SK_REUSEPORT:
2101 		/* equivalent to SOCKET_FILTER. need CAP_BPF only */
2102 	default:
2103 		return false;
2104 	}
2105 }
2106 
is_perfmon_prog_type(enum bpf_prog_type prog_type)2107 static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
2108 {
2109 	switch (prog_type) {
2110 	case BPF_PROG_TYPE_KPROBE:
2111 	case BPF_PROG_TYPE_TRACEPOINT:
2112 	case BPF_PROG_TYPE_PERF_EVENT:
2113 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2114 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2115 	case BPF_PROG_TYPE_TRACING:
2116 	case BPF_PROG_TYPE_LSM:
2117 	case BPF_PROG_TYPE_STRUCT_OPS: /* has access to struct sock */
2118 	case BPF_PROG_TYPE_EXT: /* extends any prog */
2119 		return true;
2120 	default:
2121 		return false;
2122 	}
2123 }
2124 
2125 /* last field in 'union bpf_attr' used by this command */
2126 #define	BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
2127 
bpf_prog_load(union bpf_attr * attr,union bpf_attr __user * uattr)2128 static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
2129 {
2130 	enum bpf_prog_type type = attr->prog_type;
2131 	struct bpf_prog *prog;
2132 	int err;
2133 	char license[128];
2134 	bool is_gpl;
2135 
2136 	if (CHECK_ATTR(BPF_PROG_LOAD))
2137 		return -EINVAL;
2138 
2139 	if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
2140 				 BPF_F_ANY_ALIGNMENT |
2141 				 BPF_F_TEST_STATE_FREQ |
2142 				 BPF_F_SLEEPABLE |
2143 				 BPF_F_TEST_RND_HI32))
2144 		return -EINVAL;
2145 
2146 	if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
2147 	    (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
2148 	    !bpf_capable())
2149 		return -EPERM;
2150 
2151 	/* copy eBPF program license from user space */
2152 	if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
2153 			      sizeof(license) - 1) < 0)
2154 		return -EFAULT;
2155 	license[sizeof(license) - 1] = 0;
2156 
2157 	/* eBPF programs must be GPL compatible to use GPL-ed functions */
2158 	is_gpl = license_is_gpl_compatible(license);
2159 
2160 	if (attr->insn_cnt == 0 ||
2161 	    attr->insn_cnt > (bpf_capable() ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS))
2162 		return -E2BIG;
2163 	if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
2164 	    type != BPF_PROG_TYPE_CGROUP_SKB &&
2165 	    !bpf_capable())
2166 		return -EPERM;
2167 
2168 	if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
2169 		return -EPERM;
2170 	if (is_perfmon_prog_type(type) && !perfmon_capable())
2171 		return -EPERM;
2172 
2173 	bpf_prog_load_fixup_attach_type(attr);
2174 	if (bpf_prog_load_check_attach(type, attr->expected_attach_type,
2175 				       attr->attach_btf_id,
2176 				       attr->attach_prog_fd))
2177 		return -EINVAL;
2178 
2179 	/* plain bpf_prog allocation */
2180 	prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
2181 	if (!prog)
2182 		return -ENOMEM;
2183 
2184 	prog->expected_attach_type = attr->expected_attach_type;
2185 	prog->aux->attach_btf_id = attr->attach_btf_id;
2186 	if (attr->attach_prog_fd) {
2187 		struct bpf_prog *dst_prog;
2188 
2189 		dst_prog = bpf_prog_get(attr->attach_prog_fd);
2190 		if (IS_ERR(dst_prog)) {
2191 			err = PTR_ERR(dst_prog);
2192 			goto free_prog_nouncharge;
2193 		}
2194 		prog->aux->dst_prog = dst_prog;
2195 	}
2196 
2197 	prog->aux->offload_requested = !!attr->prog_ifindex;
2198 	prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
2199 
2200 	err = security_bpf_prog_alloc(prog->aux);
2201 	if (err)
2202 		goto free_prog_nouncharge;
2203 
2204 	err = bpf_prog_charge_memlock(prog);
2205 	if (err)
2206 		goto free_prog_sec;
2207 
2208 	prog->len = attr->insn_cnt;
2209 
2210 	err = -EFAULT;
2211 	if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
2212 			   bpf_prog_insn_size(prog)) != 0)
2213 		goto free_prog;
2214 
2215 	prog->orig_prog = NULL;
2216 	prog->jited = 0;
2217 
2218 	atomic64_set(&prog->aux->refcnt, 1);
2219 	prog->gpl_compatible = is_gpl ? 1 : 0;
2220 
2221 	if (bpf_prog_is_dev_bound(prog->aux)) {
2222 		err = bpf_prog_offload_init(prog, attr);
2223 		if (err)
2224 			goto free_prog;
2225 	}
2226 
2227 	/* find program type: socket_filter vs tracing_filter */
2228 	err = find_prog_type(type, prog);
2229 	if (err < 0)
2230 		goto free_prog;
2231 
2232 	prog->aux->load_time = ktime_get_boottime_ns();
2233 	err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name,
2234 			       sizeof(attr->prog_name));
2235 	if (err < 0)
2236 		goto free_prog;
2237 
2238 	/* run eBPF verifier */
2239 	err = bpf_check(&prog, attr, uattr);
2240 	if (err < 0)
2241 		goto free_used_maps;
2242 
2243 	prog = bpf_prog_select_runtime(prog, &err);
2244 	if (err < 0)
2245 		goto free_used_maps;
2246 
2247 	err = bpf_prog_alloc_id(prog);
2248 	if (err)
2249 		goto free_used_maps;
2250 
2251 	/* Upon success of bpf_prog_alloc_id(), the BPF prog is
2252 	 * effectively publicly exposed. However, retrieving via
2253 	 * bpf_prog_get_fd_by_id() will take another reference,
2254 	 * therefore it cannot be gone underneath us.
2255 	 *
2256 	 * Only for the time /after/ successful bpf_prog_new_fd()
2257 	 * and before returning to userspace, we might just hold
2258 	 * one reference and any parallel close on that fd could
2259 	 * rip everything out. Hence, below notifications must
2260 	 * happen before bpf_prog_new_fd().
2261 	 *
2262 	 * Also, any failure handling from this point onwards must
2263 	 * be using bpf_prog_put() given the program is exposed.
2264 	 */
2265 	bpf_prog_kallsyms_add(prog);
2266 	perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
2267 	bpf_audit_prog(prog, BPF_AUDIT_LOAD);
2268 
2269 	err = bpf_prog_new_fd(prog);
2270 	if (err < 0)
2271 		bpf_prog_put(prog);
2272 	return err;
2273 
2274 free_used_maps:
2275 	/* In case we have subprogs, we need to wait for a grace
2276 	 * period before we can tear down JIT memory since symbols
2277 	 * are already exposed under kallsyms.
2278 	 */
2279 	__bpf_prog_put_noref(prog, prog->aux->func_cnt);
2280 	return err;
2281 free_prog:
2282 	bpf_prog_uncharge_memlock(prog);
2283 free_prog_sec:
2284 	security_bpf_prog_free(prog->aux);
2285 free_prog_nouncharge:
2286 	bpf_prog_free(prog);
2287 	return err;
2288 }
2289 
2290 #define BPF_OBJ_LAST_FIELD file_flags
2291 
bpf_obj_pin(const union bpf_attr * attr)2292 static int bpf_obj_pin(const union bpf_attr *attr)
2293 {
2294 	if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0)
2295 		return -EINVAL;
2296 
2297 	return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
2298 }
2299 
bpf_obj_get(const union bpf_attr * attr)2300 static int bpf_obj_get(const union bpf_attr *attr)
2301 {
2302 	if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 ||
2303 	    attr->file_flags & ~BPF_OBJ_FLAG_MASK)
2304 		return -EINVAL;
2305 
2306 	return bpf_obj_get_user(u64_to_user_ptr(attr->pathname),
2307 				attr->file_flags);
2308 }
2309 
bpf_link_init(struct bpf_link * link,enum bpf_link_type type,const struct bpf_link_ops * ops,struct bpf_prog * prog)2310 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
2311 		   const struct bpf_link_ops *ops, struct bpf_prog *prog)
2312 {
2313 	atomic64_set(&link->refcnt, 1);
2314 	link->type = type;
2315 	link->id = 0;
2316 	link->ops = ops;
2317 	link->prog = prog;
2318 }
2319 
bpf_link_free_id(int id)2320 static void bpf_link_free_id(int id)
2321 {
2322 	if (!id)
2323 		return;
2324 
2325 	spin_lock_bh(&link_idr_lock);
2326 	idr_remove(&link_idr, id);
2327 	spin_unlock_bh(&link_idr_lock);
2328 }
2329 
2330 /* Clean up bpf_link and corresponding anon_inode file and FD. After
2331  * anon_inode is created, bpf_link can't be just kfree()'d due to deferred
2332  * anon_inode's release() call. This helper marksbpf_link as
2333  * defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
2334  * is not decremented, it's the responsibility of a calling code that failed
2335  * to complete bpf_link initialization.
2336  */
bpf_link_cleanup(struct bpf_link_primer * primer)2337 void bpf_link_cleanup(struct bpf_link_primer *primer)
2338 {
2339 	primer->link->prog = NULL;
2340 	bpf_link_free_id(primer->id);
2341 	fput(primer->file);
2342 	put_unused_fd(primer->fd);
2343 }
2344 
bpf_link_inc(struct bpf_link * link)2345 void bpf_link_inc(struct bpf_link *link)
2346 {
2347 	atomic64_inc(&link->refcnt);
2348 }
2349 
2350 /* bpf_link_free is guaranteed to be called from process context */
bpf_link_free(struct bpf_link * link)2351 static void bpf_link_free(struct bpf_link *link)
2352 {
2353 	bpf_link_free_id(link->id);
2354 	if (link->prog) {
2355 		/* detach BPF program, clean up used resources */
2356 		link->ops->release(link);
2357 		bpf_prog_put(link->prog);
2358 	}
2359 	/* free bpf_link and its containing memory */
2360 	link->ops->dealloc(link);
2361 }
2362 
bpf_link_put_deferred(struct work_struct * work)2363 static void bpf_link_put_deferred(struct work_struct *work)
2364 {
2365 	struct bpf_link *link = container_of(work, struct bpf_link, work);
2366 
2367 	bpf_link_free(link);
2368 }
2369 
2370 /* bpf_link_put can be called from atomic context, but ensures that resources
2371  * are freed from process context
2372  */
bpf_link_put(struct bpf_link * link)2373 void bpf_link_put(struct bpf_link *link)
2374 {
2375 	if (!atomic64_dec_and_test(&link->refcnt))
2376 		return;
2377 
2378 	if (in_atomic()) {
2379 		INIT_WORK(&link->work, bpf_link_put_deferred);
2380 		schedule_work(&link->work);
2381 	} else {
2382 		bpf_link_free(link);
2383 	}
2384 }
2385 
bpf_link_release(struct inode * inode,struct file * filp)2386 static int bpf_link_release(struct inode *inode, struct file *filp)
2387 {
2388 	struct bpf_link *link = filp->private_data;
2389 
2390 	bpf_link_put(link);
2391 	return 0;
2392 }
2393 
2394 #ifdef CONFIG_PROC_FS
2395 #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
2396 #define BPF_MAP_TYPE(_id, _ops)
2397 #define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
2398 static const char *bpf_link_type_strs[] = {
2399 	[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
2400 #include <linux/bpf_types.h>
2401 };
2402 #undef BPF_PROG_TYPE
2403 #undef BPF_MAP_TYPE
2404 #undef BPF_LINK_TYPE
2405 
bpf_link_show_fdinfo(struct seq_file * m,struct file * filp)2406 static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
2407 {
2408 	const struct bpf_link *link = filp->private_data;
2409 	const struct bpf_prog *prog = link->prog;
2410 	char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
2411 
2412 	bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
2413 	seq_printf(m,
2414 		   "link_type:\t%s\n"
2415 		   "link_id:\t%u\n"
2416 		   "prog_tag:\t%s\n"
2417 		   "prog_id:\t%u\n",
2418 		   bpf_link_type_strs[link->type],
2419 		   link->id,
2420 		   prog_tag,
2421 		   prog->aux->id);
2422 	if (link->ops->show_fdinfo)
2423 		link->ops->show_fdinfo(link, m);
2424 }
2425 #endif
2426 
2427 static const struct file_operations bpf_link_fops = {
2428 #ifdef CONFIG_PROC_FS
2429 	.show_fdinfo	= bpf_link_show_fdinfo,
2430 #endif
2431 	.release	= bpf_link_release,
2432 	.read		= bpf_dummy_read,
2433 	.write		= bpf_dummy_write,
2434 };
2435 
bpf_link_alloc_id(struct bpf_link * link)2436 static int bpf_link_alloc_id(struct bpf_link *link)
2437 {
2438 	int id;
2439 
2440 	idr_preload(GFP_KERNEL);
2441 	spin_lock_bh(&link_idr_lock);
2442 	id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
2443 	spin_unlock_bh(&link_idr_lock);
2444 	idr_preload_end();
2445 
2446 	return id;
2447 }
2448 
2449 /* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
2450  * reserving unused FD and allocating ID from link_idr. This is to be paired
2451  * with bpf_link_settle() to install FD and ID and expose bpf_link to
2452  * user-space, if bpf_link is successfully attached. If not, bpf_link and
2453  * pre-allocated resources are to be freed with bpf_cleanup() call. All the
2454  * transient state is passed around in struct bpf_link_primer.
2455  * This is preferred way to create and initialize bpf_link, especially when
2456  * there are complicated and expensive operations inbetween creating bpf_link
2457  * itself and attaching it to BPF hook. By using bpf_link_prime() and
2458  * bpf_link_settle() kernel code using bpf_link doesn't have to perform
2459  * expensive (and potentially failing) roll back operations in a rare case
2460  * that file, FD, or ID can't be allocated.
2461  */
bpf_link_prime(struct bpf_link * link,struct bpf_link_primer * primer)2462 int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
2463 {
2464 	struct file *file;
2465 	int fd, id;
2466 
2467 	fd = get_unused_fd_flags(O_CLOEXEC);
2468 	if (fd < 0)
2469 		return fd;
2470 
2471 
2472 	id = bpf_link_alloc_id(link);
2473 	if (id < 0) {
2474 		put_unused_fd(fd);
2475 		return id;
2476 	}
2477 
2478 	file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
2479 	if (IS_ERR(file)) {
2480 		bpf_link_free_id(id);
2481 		put_unused_fd(fd);
2482 		return PTR_ERR(file);
2483 	}
2484 
2485 	primer->link = link;
2486 	primer->file = file;
2487 	primer->fd = fd;
2488 	primer->id = id;
2489 	return 0;
2490 }
2491 
bpf_link_settle(struct bpf_link_primer * primer)2492 int bpf_link_settle(struct bpf_link_primer *primer)
2493 {
2494 	/* make bpf_link fetchable by ID */
2495 	spin_lock_bh(&link_idr_lock);
2496 	primer->link->id = primer->id;
2497 	spin_unlock_bh(&link_idr_lock);
2498 	/* make bpf_link fetchable by FD */
2499 	fd_install(primer->fd, primer->file);
2500 	/* pass through installed FD */
2501 	return primer->fd;
2502 }
2503 
bpf_link_new_fd(struct bpf_link * link)2504 int bpf_link_new_fd(struct bpf_link *link)
2505 {
2506 	return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
2507 }
2508 
bpf_link_get_from_fd(u32 ufd)2509 struct bpf_link *bpf_link_get_from_fd(u32 ufd)
2510 {
2511 	struct fd f = fdget(ufd);
2512 	struct bpf_link *link;
2513 
2514 	if (!f.file)
2515 		return ERR_PTR(-EBADF);
2516 	if (f.file->f_op != &bpf_link_fops) {
2517 		fdput(f);
2518 		return ERR_PTR(-EINVAL);
2519 	}
2520 
2521 	link = f.file->private_data;
2522 	bpf_link_inc(link);
2523 	fdput(f);
2524 
2525 	return link;
2526 }
2527 
2528 struct bpf_tracing_link {
2529 	struct bpf_link link;
2530 	enum bpf_attach_type attach_type;
2531 	struct bpf_trampoline *trampoline;
2532 	struct bpf_prog *tgt_prog;
2533 };
2534 
bpf_tracing_link_release(struct bpf_link * link)2535 static void bpf_tracing_link_release(struct bpf_link *link)
2536 {
2537 	struct bpf_tracing_link *tr_link =
2538 		container_of(link, struct bpf_tracing_link, link);
2539 
2540 	WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
2541 						tr_link->trampoline));
2542 
2543 	bpf_trampoline_put(tr_link->trampoline);
2544 
2545 	/* tgt_prog is NULL if target is a kernel function */
2546 	if (tr_link->tgt_prog)
2547 		bpf_prog_put(tr_link->tgt_prog);
2548 }
2549 
bpf_tracing_link_dealloc(struct bpf_link * link)2550 static void bpf_tracing_link_dealloc(struct bpf_link *link)
2551 {
2552 	struct bpf_tracing_link *tr_link =
2553 		container_of(link, struct bpf_tracing_link, link);
2554 
2555 	kfree(tr_link);
2556 }
2557 
bpf_tracing_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)2558 static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
2559 					 struct seq_file *seq)
2560 {
2561 	struct bpf_tracing_link *tr_link =
2562 		container_of(link, struct bpf_tracing_link, link);
2563 
2564 	seq_printf(seq,
2565 		   "attach_type:\t%d\n",
2566 		   tr_link->attach_type);
2567 }
2568 
bpf_tracing_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)2569 static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
2570 					   struct bpf_link_info *info)
2571 {
2572 	struct bpf_tracing_link *tr_link =
2573 		container_of(link, struct bpf_tracing_link, link);
2574 
2575 	info->tracing.attach_type = tr_link->attach_type;
2576 
2577 	return 0;
2578 }
2579 
2580 static const struct bpf_link_ops bpf_tracing_link_lops = {
2581 	.release = bpf_tracing_link_release,
2582 	.dealloc = bpf_tracing_link_dealloc,
2583 	.show_fdinfo = bpf_tracing_link_show_fdinfo,
2584 	.fill_link_info = bpf_tracing_link_fill_link_info,
2585 };
2586 
bpf_tracing_prog_attach(struct bpf_prog * prog,int tgt_prog_fd,u32 btf_id)2587 static int bpf_tracing_prog_attach(struct bpf_prog *prog,
2588 				   int tgt_prog_fd,
2589 				   u32 btf_id)
2590 {
2591 	struct bpf_link_primer link_primer;
2592 	struct bpf_prog *tgt_prog = NULL;
2593 	struct bpf_trampoline *tr = NULL;
2594 	struct bpf_tracing_link *link;
2595 	u64 key = 0;
2596 	int err;
2597 
2598 	switch (prog->type) {
2599 	case BPF_PROG_TYPE_TRACING:
2600 		if (prog->expected_attach_type != BPF_TRACE_FENTRY &&
2601 		    prog->expected_attach_type != BPF_TRACE_FEXIT &&
2602 		    prog->expected_attach_type != BPF_MODIFY_RETURN) {
2603 			err = -EINVAL;
2604 			goto out_put_prog;
2605 		}
2606 		break;
2607 	case BPF_PROG_TYPE_EXT:
2608 		if (prog->expected_attach_type != 0) {
2609 			err = -EINVAL;
2610 			goto out_put_prog;
2611 		}
2612 		break;
2613 	case BPF_PROG_TYPE_LSM:
2614 		if (prog->expected_attach_type != BPF_LSM_MAC) {
2615 			err = -EINVAL;
2616 			goto out_put_prog;
2617 		}
2618 		break;
2619 	default:
2620 		err = -EINVAL;
2621 		goto out_put_prog;
2622 	}
2623 
2624 	if (!!tgt_prog_fd != !!btf_id) {
2625 		err = -EINVAL;
2626 		goto out_put_prog;
2627 	}
2628 
2629 	if (tgt_prog_fd) {
2630 		/* For now we only allow new targets for BPF_PROG_TYPE_EXT */
2631 		if (prog->type != BPF_PROG_TYPE_EXT) {
2632 			err = -EINVAL;
2633 			goto out_put_prog;
2634 		}
2635 
2636 		tgt_prog = bpf_prog_get(tgt_prog_fd);
2637 		if (IS_ERR(tgt_prog)) {
2638 			err = PTR_ERR(tgt_prog);
2639 			tgt_prog = NULL;
2640 			goto out_put_prog;
2641 		}
2642 
2643 		key = bpf_trampoline_compute_key(tgt_prog, btf_id);
2644 	}
2645 
2646 	link = kzalloc(sizeof(*link), GFP_USER);
2647 	if (!link) {
2648 		err = -ENOMEM;
2649 		goto out_put_prog;
2650 	}
2651 	bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
2652 		      &bpf_tracing_link_lops, prog);
2653 	link->attach_type = prog->expected_attach_type;
2654 
2655 	mutex_lock(&prog->aux->dst_mutex);
2656 
2657 	/* There are a few possible cases here:
2658 	 *
2659 	 * - if prog->aux->dst_trampoline is set, the program was just loaded
2660 	 *   and not yet attached to anything, so we can use the values stored
2661 	 *   in prog->aux
2662 	 *
2663 	 * - if prog->aux->dst_trampoline is NULL, the program has already been
2664          *   attached to a target and its initial target was cleared (below)
2665 	 *
2666 	 * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
2667 	 *   target_btf_id using the link_create API.
2668 	 *
2669 	 * - if tgt_prog == NULL when this function was called using the old
2670          *   raw_tracepoint_open API, and we need a target from prog->aux
2671          *
2672          * The combination of no saved target in prog->aux, and no target
2673          * specified on load is illegal, and we reject that here.
2674 	 */
2675 	if (!prog->aux->dst_trampoline && !tgt_prog) {
2676 		err = -ENOENT;
2677 		goto out_unlock;
2678 	}
2679 
2680 	if (!prog->aux->dst_trampoline ||
2681 	    (key && key != prog->aux->dst_trampoline->key)) {
2682 		/* If there is no saved target, or the specified target is
2683 		 * different from the destination specified at load time, we
2684 		 * need a new trampoline and a check for compatibility
2685 		 */
2686 		struct bpf_attach_target_info tgt_info = {};
2687 
2688 		err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
2689 					      &tgt_info);
2690 		if (err)
2691 			goto out_unlock;
2692 
2693 		tr = bpf_trampoline_get(key, &tgt_info);
2694 		if (!tr) {
2695 			err = -ENOMEM;
2696 			goto out_unlock;
2697 		}
2698 	} else {
2699 		/* The caller didn't specify a target, or the target was the
2700 		 * same as the destination supplied during program load. This
2701 		 * means we can reuse the trampoline and reference from program
2702 		 * load time, and there is no need to allocate a new one. This
2703 		 * can only happen once for any program, as the saved values in
2704 		 * prog->aux are cleared below.
2705 		 */
2706 		tr = prog->aux->dst_trampoline;
2707 		tgt_prog = prog->aux->dst_prog;
2708 	}
2709 
2710 	err = bpf_link_prime(&link->link, &link_primer);
2711 	if (err)
2712 		goto out_unlock;
2713 
2714 	err = bpf_trampoline_link_prog(prog, tr);
2715 	if (err) {
2716 		bpf_link_cleanup(&link_primer);
2717 		link = NULL;
2718 		goto out_unlock;
2719 	}
2720 
2721 	link->tgt_prog = tgt_prog;
2722 	link->trampoline = tr;
2723 
2724 	/* Always clear the trampoline and target prog from prog->aux to make
2725 	 * sure the original attach destination is not kept alive after a
2726 	 * program is (re-)attached to another target.
2727 	 */
2728 	if (prog->aux->dst_prog &&
2729 	    (tgt_prog_fd || tr != prog->aux->dst_trampoline))
2730 		/* got extra prog ref from syscall, or attaching to different prog */
2731 		bpf_prog_put(prog->aux->dst_prog);
2732 	if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
2733 		/* we allocated a new trampoline, so free the old one */
2734 		bpf_trampoline_put(prog->aux->dst_trampoline);
2735 
2736 	prog->aux->dst_prog = NULL;
2737 	prog->aux->dst_trampoline = NULL;
2738 	mutex_unlock(&prog->aux->dst_mutex);
2739 
2740 	return bpf_link_settle(&link_primer);
2741 out_unlock:
2742 	if (tr && tr != prog->aux->dst_trampoline)
2743 		bpf_trampoline_put(tr);
2744 	mutex_unlock(&prog->aux->dst_mutex);
2745 	kfree(link);
2746 out_put_prog:
2747 	if (tgt_prog_fd && tgt_prog)
2748 		bpf_prog_put(tgt_prog);
2749 	return err;
2750 }
2751 
2752 struct bpf_raw_tp_link {
2753 	struct bpf_link link;
2754 	struct bpf_raw_event_map *btp;
2755 };
2756 
bpf_raw_tp_link_release(struct bpf_link * link)2757 static void bpf_raw_tp_link_release(struct bpf_link *link)
2758 {
2759 	struct bpf_raw_tp_link *raw_tp =
2760 		container_of(link, struct bpf_raw_tp_link, link);
2761 
2762 	bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
2763 	bpf_put_raw_tracepoint(raw_tp->btp);
2764 }
2765 
bpf_raw_tp_link_dealloc(struct bpf_link * link)2766 static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
2767 {
2768 	struct bpf_raw_tp_link *raw_tp =
2769 		container_of(link, struct bpf_raw_tp_link, link);
2770 
2771 	kfree(raw_tp);
2772 }
2773 
bpf_raw_tp_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)2774 static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
2775 					struct seq_file *seq)
2776 {
2777 	struct bpf_raw_tp_link *raw_tp_link =
2778 		container_of(link, struct bpf_raw_tp_link, link);
2779 
2780 	seq_printf(seq,
2781 		   "tp_name:\t%s\n",
2782 		   raw_tp_link->btp->tp->name);
2783 }
2784 
bpf_raw_tp_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)2785 static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
2786 					  struct bpf_link_info *info)
2787 {
2788 	struct bpf_raw_tp_link *raw_tp_link =
2789 		container_of(link, struct bpf_raw_tp_link, link);
2790 	char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
2791 	const char *tp_name = raw_tp_link->btp->tp->name;
2792 	u32 ulen = info->raw_tracepoint.tp_name_len;
2793 	size_t tp_len = strlen(tp_name);
2794 
2795 	if (!ulen ^ !ubuf)
2796 		return -EINVAL;
2797 
2798 	info->raw_tracepoint.tp_name_len = tp_len + 1;
2799 
2800 	if (!ubuf)
2801 		return 0;
2802 
2803 	if (ulen >= tp_len + 1) {
2804 		if (copy_to_user(ubuf, tp_name, tp_len + 1))
2805 			return -EFAULT;
2806 	} else {
2807 		char zero = '\0';
2808 
2809 		if (copy_to_user(ubuf, tp_name, ulen - 1))
2810 			return -EFAULT;
2811 		if (put_user(zero, ubuf + ulen - 1))
2812 			return -EFAULT;
2813 		return -ENOSPC;
2814 	}
2815 
2816 	return 0;
2817 }
2818 
2819 static const struct bpf_link_ops bpf_raw_tp_link_lops = {
2820 	.release = bpf_raw_tp_link_release,
2821 	.dealloc = bpf_raw_tp_link_dealloc,
2822 	.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
2823 	.fill_link_info = bpf_raw_tp_link_fill_link_info,
2824 };
2825 
2826 #define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
2827 
bpf_raw_tracepoint_open(const union bpf_attr * attr)2828 static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
2829 {
2830 	struct bpf_link_primer link_primer;
2831 	struct bpf_raw_tp_link *link;
2832 	struct bpf_raw_event_map *btp;
2833 	struct bpf_prog *prog;
2834 	const char *tp_name;
2835 	char buf[128];
2836 	int err;
2837 
2838 	if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
2839 		return -EINVAL;
2840 
2841 	prog = bpf_prog_get(attr->raw_tracepoint.prog_fd);
2842 	if (IS_ERR(prog))
2843 		return PTR_ERR(prog);
2844 
2845 	switch (prog->type) {
2846 	case BPF_PROG_TYPE_TRACING:
2847 	case BPF_PROG_TYPE_EXT:
2848 	case BPF_PROG_TYPE_LSM:
2849 		if (attr->raw_tracepoint.name) {
2850 			/* The attach point for this category of programs
2851 			 * should be specified via btf_id during program load.
2852 			 */
2853 			err = -EINVAL;
2854 			goto out_put_prog;
2855 		}
2856 		if (prog->type == BPF_PROG_TYPE_TRACING &&
2857 		    prog->expected_attach_type == BPF_TRACE_RAW_TP) {
2858 			tp_name = prog->aux->attach_func_name;
2859 			break;
2860 		}
2861 		err = bpf_tracing_prog_attach(prog, 0, 0);
2862 		if (err >= 0)
2863 			return err;
2864 		goto out_put_prog;
2865 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
2866 	case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
2867 		if (strncpy_from_user(buf,
2868 				      u64_to_user_ptr(attr->raw_tracepoint.name),
2869 				      sizeof(buf) - 1) < 0) {
2870 			err = -EFAULT;
2871 			goto out_put_prog;
2872 		}
2873 		buf[sizeof(buf) - 1] = 0;
2874 		tp_name = buf;
2875 		break;
2876 	default:
2877 		err = -EINVAL;
2878 		goto out_put_prog;
2879 	}
2880 
2881 	btp = bpf_get_raw_tracepoint(tp_name);
2882 	if (!btp) {
2883 		err = -ENOENT;
2884 		goto out_put_prog;
2885 	}
2886 
2887 	link = kzalloc(sizeof(*link), GFP_USER);
2888 	if (!link) {
2889 		err = -ENOMEM;
2890 		goto out_put_btp;
2891 	}
2892 	bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
2893 		      &bpf_raw_tp_link_lops, prog);
2894 	link->btp = btp;
2895 
2896 	err = bpf_link_prime(&link->link, &link_primer);
2897 	if (err) {
2898 		kfree(link);
2899 		goto out_put_btp;
2900 	}
2901 
2902 	err = bpf_probe_register(link->btp, prog);
2903 	if (err) {
2904 		bpf_link_cleanup(&link_primer);
2905 		goto out_put_btp;
2906 	}
2907 
2908 	return bpf_link_settle(&link_primer);
2909 
2910 out_put_btp:
2911 	bpf_put_raw_tracepoint(btp);
2912 out_put_prog:
2913 	bpf_prog_put(prog);
2914 	return err;
2915 }
2916 
bpf_prog_attach_check_attach_type(const struct bpf_prog * prog,enum bpf_attach_type attach_type)2917 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
2918 					     enum bpf_attach_type attach_type)
2919 {
2920 	switch (prog->type) {
2921 	case BPF_PROG_TYPE_CGROUP_SOCK:
2922 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
2923 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
2924 	case BPF_PROG_TYPE_SK_LOOKUP:
2925 		return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
2926 	case BPF_PROG_TYPE_CGROUP_SKB:
2927 		if (!capable(CAP_NET_ADMIN))
2928 			/* cg-skb progs can be loaded by unpriv user.
2929 			 * check permissions at attach time.
2930 			 */
2931 			return -EPERM;
2932 		return prog->enforce_expected_attach_type &&
2933 			prog->expected_attach_type != attach_type ?
2934 			-EINVAL : 0;
2935 	default:
2936 		return 0;
2937 	}
2938 }
2939 
2940 static enum bpf_prog_type
attach_type_to_prog_type(enum bpf_attach_type attach_type)2941 attach_type_to_prog_type(enum bpf_attach_type attach_type)
2942 {
2943 	switch (attach_type) {
2944 	case BPF_CGROUP_INET_INGRESS:
2945 	case BPF_CGROUP_INET_EGRESS:
2946 		return BPF_PROG_TYPE_CGROUP_SKB;
2947 	case BPF_CGROUP_INET_SOCK_CREATE:
2948 	case BPF_CGROUP_INET_SOCK_RELEASE:
2949 	case BPF_CGROUP_INET4_POST_BIND:
2950 	case BPF_CGROUP_INET6_POST_BIND:
2951 		return BPF_PROG_TYPE_CGROUP_SOCK;
2952 	case BPF_CGROUP_INET4_BIND:
2953 	case BPF_CGROUP_INET6_BIND:
2954 	case BPF_CGROUP_INET4_CONNECT:
2955 	case BPF_CGROUP_INET6_CONNECT:
2956 	case BPF_CGROUP_INET4_GETPEERNAME:
2957 	case BPF_CGROUP_INET6_GETPEERNAME:
2958 	case BPF_CGROUP_INET4_GETSOCKNAME:
2959 	case BPF_CGROUP_INET6_GETSOCKNAME:
2960 	case BPF_CGROUP_UDP4_SENDMSG:
2961 	case BPF_CGROUP_UDP6_SENDMSG:
2962 	case BPF_CGROUP_UDP4_RECVMSG:
2963 	case BPF_CGROUP_UDP6_RECVMSG:
2964 		return BPF_PROG_TYPE_CGROUP_SOCK_ADDR;
2965 	case BPF_CGROUP_SOCK_OPS:
2966 		return BPF_PROG_TYPE_SOCK_OPS;
2967 	case BPF_CGROUP_DEVICE:
2968 		return BPF_PROG_TYPE_CGROUP_DEVICE;
2969 	case BPF_SK_MSG_VERDICT:
2970 		return BPF_PROG_TYPE_SK_MSG;
2971 	case BPF_SK_SKB_STREAM_PARSER:
2972 	case BPF_SK_SKB_STREAM_VERDICT:
2973 		return BPF_PROG_TYPE_SK_SKB;
2974 	case BPF_LIRC_MODE2:
2975 		return BPF_PROG_TYPE_LIRC_MODE2;
2976 	case BPF_FLOW_DISSECTOR:
2977 		return BPF_PROG_TYPE_FLOW_DISSECTOR;
2978 	case BPF_CGROUP_SYSCTL:
2979 		return BPF_PROG_TYPE_CGROUP_SYSCTL;
2980 	case BPF_CGROUP_GETSOCKOPT:
2981 	case BPF_CGROUP_SETSOCKOPT:
2982 		return BPF_PROG_TYPE_CGROUP_SOCKOPT;
2983 	case BPF_TRACE_ITER:
2984 		return BPF_PROG_TYPE_TRACING;
2985 	case BPF_SK_LOOKUP:
2986 		return BPF_PROG_TYPE_SK_LOOKUP;
2987 	case BPF_XDP:
2988 		return BPF_PROG_TYPE_XDP;
2989 	default:
2990 		return BPF_PROG_TYPE_UNSPEC;
2991 	}
2992 }
2993 
2994 #define BPF_PROG_ATTACH_LAST_FIELD replace_bpf_fd
2995 
2996 #define BPF_F_ATTACH_MASK \
2997 	(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI | BPF_F_REPLACE)
2998 
bpf_prog_attach(const union bpf_attr * attr)2999 static int bpf_prog_attach(const union bpf_attr *attr)
3000 {
3001 	enum bpf_prog_type ptype;
3002 	struct bpf_prog *prog;
3003 	int ret;
3004 
3005 	if (CHECK_ATTR(BPF_PROG_ATTACH))
3006 		return -EINVAL;
3007 
3008 	if (attr->attach_flags & ~BPF_F_ATTACH_MASK)
3009 		return -EINVAL;
3010 
3011 	ptype = attach_type_to_prog_type(attr->attach_type);
3012 	if (ptype == BPF_PROG_TYPE_UNSPEC)
3013 		return -EINVAL;
3014 
3015 	prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
3016 	if (IS_ERR(prog))
3017 		return PTR_ERR(prog);
3018 
3019 	if (bpf_prog_attach_check_attach_type(prog, attr->attach_type)) {
3020 		bpf_prog_put(prog);
3021 		return -EINVAL;
3022 	}
3023 
3024 	switch (ptype) {
3025 	case BPF_PROG_TYPE_SK_SKB:
3026 	case BPF_PROG_TYPE_SK_MSG:
3027 		ret = sock_map_get_from_fd(attr, prog);
3028 		break;
3029 	case BPF_PROG_TYPE_LIRC_MODE2:
3030 		ret = lirc_prog_attach(attr, prog);
3031 		break;
3032 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3033 		ret = netns_bpf_prog_attach(attr, prog);
3034 		break;
3035 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3036 	case BPF_PROG_TYPE_CGROUP_SKB:
3037 	case BPF_PROG_TYPE_CGROUP_SOCK:
3038 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3039 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3040 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3041 	case BPF_PROG_TYPE_SOCK_OPS:
3042 		ret = cgroup_bpf_prog_attach(attr, ptype, prog);
3043 		break;
3044 	default:
3045 		ret = -EINVAL;
3046 	}
3047 
3048 	if (ret)
3049 		bpf_prog_put(prog);
3050 	return ret;
3051 }
3052 
3053 #define BPF_PROG_DETACH_LAST_FIELD attach_type
3054 
bpf_prog_detach(const union bpf_attr * attr)3055 static int bpf_prog_detach(const union bpf_attr *attr)
3056 {
3057 	enum bpf_prog_type ptype;
3058 
3059 	if (CHECK_ATTR(BPF_PROG_DETACH))
3060 		return -EINVAL;
3061 
3062 	ptype = attach_type_to_prog_type(attr->attach_type);
3063 
3064 	switch (ptype) {
3065 	case BPF_PROG_TYPE_SK_MSG:
3066 	case BPF_PROG_TYPE_SK_SKB:
3067 		return sock_map_prog_detach(attr, ptype);
3068 	case BPF_PROG_TYPE_LIRC_MODE2:
3069 		return lirc_prog_detach(attr);
3070 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
3071 		return netns_bpf_prog_detach(attr, ptype);
3072 	case BPF_PROG_TYPE_CGROUP_DEVICE:
3073 	case BPF_PROG_TYPE_CGROUP_SKB:
3074 	case BPF_PROG_TYPE_CGROUP_SOCK:
3075 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
3076 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
3077 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
3078 	case BPF_PROG_TYPE_SOCK_OPS:
3079 		return cgroup_bpf_prog_detach(attr, ptype);
3080 	default:
3081 		return -EINVAL;
3082 	}
3083 }
3084 
3085 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
3086 
bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)3087 static int bpf_prog_query(const union bpf_attr *attr,
3088 			  union bpf_attr __user *uattr)
3089 {
3090 	if (!capable(CAP_NET_ADMIN))
3091 		return -EPERM;
3092 	if (CHECK_ATTR(BPF_PROG_QUERY))
3093 		return -EINVAL;
3094 	if (attr->query.query_flags & ~BPF_F_QUERY_EFFECTIVE)
3095 		return -EINVAL;
3096 
3097 	switch (attr->query.attach_type) {
3098 	case BPF_CGROUP_INET_INGRESS:
3099 	case BPF_CGROUP_INET_EGRESS:
3100 	case BPF_CGROUP_INET_SOCK_CREATE:
3101 	case BPF_CGROUP_INET_SOCK_RELEASE:
3102 	case BPF_CGROUP_INET4_BIND:
3103 	case BPF_CGROUP_INET6_BIND:
3104 	case BPF_CGROUP_INET4_POST_BIND:
3105 	case BPF_CGROUP_INET6_POST_BIND:
3106 	case BPF_CGROUP_INET4_CONNECT:
3107 	case BPF_CGROUP_INET6_CONNECT:
3108 	case BPF_CGROUP_INET4_GETPEERNAME:
3109 	case BPF_CGROUP_INET6_GETPEERNAME:
3110 	case BPF_CGROUP_INET4_GETSOCKNAME:
3111 	case BPF_CGROUP_INET6_GETSOCKNAME:
3112 	case BPF_CGROUP_UDP4_SENDMSG:
3113 	case BPF_CGROUP_UDP6_SENDMSG:
3114 	case BPF_CGROUP_UDP4_RECVMSG:
3115 	case BPF_CGROUP_UDP6_RECVMSG:
3116 	case BPF_CGROUP_SOCK_OPS:
3117 	case BPF_CGROUP_DEVICE:
3118 	case BPF_CGROUP_SYSCTL:
3119 	case BPF_CGROUP_GETSOCKOPT:
3120 	case BPF_CGROUP_SETSOCKOPT:
3121 		return cgroup_bpf_prog_query(attr, uattr);
3122 	case BPF_LIRC_MODE2:
3123 		return lirc_prog_query(attr, uattr);
3124 	case BPF_FLOW_DISSECTOR:
3125 	case BPF_SK_LOOKUP:
3126 		return netns_bpf_prog_query(attr, uattr);
3127 	default:
3128 		return -EINVAL;
3129 	}
3130 }
3131 
3132 #define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
3133 
bpf_prog_test_run(const union bpf_attr * attr,union bpf_attr __user * uattr)3134 static int bpf_prog_test_run(const union bpf_attr *attr,
3135 			     union bpf_attr __user *uattr)
3136 {
3137 	struct bpf_prog *prog;
3138 	int ret = -ENOTSUPP;
3139 
3140 	if (CHECK_ATTR(BPF_PROG_TEST_RUN))
3141 		return -EINVAL;
3142 
3143 	if ((attr->test.ctx_size_in && !attr->test.ctx_in) ||
3144 	    (!attr->test.ctx_size_in && attr->test.ctx_in))
3145 		return -EINVAL;
3146 
3147 	if ((attr->test.ctx_size_out && !attr->test.ctx_out) ||
3148 	    (!attr->test.ctx_size_out && attr->test.ctx_out))
3149 		return -EINVAL;
3150 
3151 	prog = bpf_prog_get(attr->test.prog_fd);
3152 	if (IS_ERR(prog))
3153 		return PTR_ERR(prog);
3154 
3155 	if (prog->aux->ops->test_run)
3156 		ret = prog->aux->ops->test_run(prog, attr, uattr);
3157 
3158 	bpf_prog_put(prog);
3159 	return ret;
3160 }
3161 
3162 #define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
3163 
bpf_obj_get_next_id(const union bpf_attr * attr,union bpf_attr __user * uattr,struct idr * idr,spinlock_t * lock)3164 static int bpf_obj_get_next_id(const union bpf_attr *attr,
3165 			       union bpf_attr __user *uattr,
3166 			       struct idr *idr,
3167 			       spinlock_t *lock)
3168 {
3169 	u32 next_id = attr->start_id;
3170 	int err = 0;
3171 
3172 	if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
3173 		return -EINVAL;
3174 
3175 	if (!capable(CAP_SYS_ADMIN))
3176 		return -EPERM;
3177 
3178 	next_id++;
3179 	spin_lock_bh(lock);
3180 	if (!idr_get_next(idr, &next_id))
3181 		err = -ENOENT;
3182 	spin_unlock_bh(lock);
3183 
3184 	if (!err)
3185 		err = put_user(next_id, &uattr->next_id);
3186 
3187 	return err;
3188 }
3189 
bpf_map_get_curr_or_next(u32 * id)3190 struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
3191 {
3192 	struct bpf_map *map;
3193 
3194 	spin_lock_bh(&map_idr_lock);
3195 again:
3196 	map = idr_get_next(&map_idr, id);
3197 	if (map) {
3198 		map = __bpf_map_inc_not_zero(map, false);
3199 		if (IS_ERR(map)) {
3200 			(*id)++;
3201 			goto again;
3202 		}
3203 	}
3204 	spin_unlock_bh(&map_idr_lock);
3205 
3206 	return map;
3207 }
3208 
bpf_prog_get_curr_or_next(u32 * id)3209 struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id)
3210 {
3211 	struct bpf_prog *prog;
3212 
3213 	spin_lock_bh(&prog_idr_lock);
3214 again:
3215 	prog = idr_get_next(&prog_idr, id);
3216 	if (prog) {
3217 		prog = bpf_prog_inc_not_zero(prog);
3218 		if (IS_ERR(prog)) {
3219 			(*id)++;
3220 			goto again;
3221 		}
3222 	}
3223 	spin_unlock_bh(&prog_idr_lock);
3224 
3225 	return prog;
3226 }
3227 
3228 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
3229 
bpf_prog_by_id(u32 id)3230 struct bpf_prog *bpf_prog_by_id(u32 id)
3231 {
3232 	struct bpf_prog *prog;
3233 
3234 	if (!id)
3235 		return ERR_PTR(-ENOENT);
3236 
3237 	spin_lock_bh(&prog_idr_lock);
3238 	prog = idr_find(&prog_idr, id);
3239 	if (prog)
3240 		prog = bpf_prog_inc_not_zero(prog);
3241 	else
3242 		prog = ERR_PTR(-ENOENT);
3243 	spin_unlock_bh(&prog_idr_lock);
3244 	return prog;
3245 }
3246 
bpf_prog_get_fd_by_id(const union bpf_attr * attr)3247 static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
3248 {
3249 	struct bpf_prog *prog;
3250 	u32 id = attr->prog_id;
3251 	int fd;
3252 
3253 	if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
3254 		return -EINVAL;
3255 
3256 	if (!capable(CAP_SYS_ADMIN))
3257 		return -EPERM;
3258 
3259 	prog = bpf_prog_by_id(id);
3260 	if (IS_ERR(prog))
3261 		return PTR_ERR(prog);
3262 
3263 	fd = bpf_prog_new_fd(prog);
3264 	if (fd < 0)
3265 		bpf_prog_put(prog);
3266 
3267 	return fd;
3268 }
3269 
3270 #define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags
3271 
bpf_map_get_fd_by_id(const union bpf_attr * attr)3272 static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
3273 {
3274 	struct bpf_map *map;
3275 	u32 id = attr->map_id;
3276 	int f_flags;
3277 	int fd;
3278 
3279 	if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) ||
3280 	    attr->open_flags & ~BPF_OBJ_FLAG_MASK)
3281 		return -EINVAL;
3282 
3283 	if (!capable(CAP_SYS_ADMIN))
3284 		return -EPERM;
3285 
3286 	f_flags = bpf_get_file_flag(attr->open_flags);
3287 	if (f_flags < 0)
3288 		return f_flags;
3289 
3290 	spin_lock_bh(&map_idr_lock);
3291 	map = idr_find(&map_idr, id);
3292 	if (map)
3293 		map = __bpf_map_inc_not_zero(map, true);
3294 	else
3295 		map = ERR_PTR(-ENOENT);
3296 	spin_unlock_bh(&map_idr_lock);
3297 
3298 	if (IS_ERR(map))
3299 		return PTR_ERR(map);
3300 
3301 	fd = bpf_map_new_fd(map, f_flags);
3302 	if (fd < 0)
3303 		bpf_map_put_with_uref(map);
3304 
3305 	return fd;
3306 }
3307 
bpf_map_from_imm(const struct bpf_prog * prog,unsigned long addr,u32 * off,u32 * type)3308 static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
3309 					      unsigned long addr, u32 *off,
3310 					      u32 *type)
3311 {
3312 	const struct bpf_map *map;
3313 	int i;
3314 
3315 	mutex_lock(&prog->aux->used_maps_mutex);
3316 	for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
3317 		map = prog->aux->used_maps[i];
3318 		if (map == (void *)addr) {
3319 			*type = BPF_PSEUDO_MAP_FD;
3320 			goto out;
3321 		}
3322 		if (!map->ops->map_direct_value_meta)
3323 			continue;
3324 		if (!map->ops->map_direct_value_meta(map, addr, off)) {
3325 			*type = BPF_PSEUDO_MAP_VALUE;
3326 			goto out;
3327 		}
3328 	}
3329 	map = NULL;
3330 
3331 out:
3332 	mutex_unlock(&prog->aux->used_maps_mutex);
3333 	return map;
3334 }
3335 
bpf_insn_prepare_dump(const struct bpf_prog * prog,const struct cred * f_cred)3336 static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
3337 					      const struct cred *f_cred)
3338 {
3339 	const struct bpf_map *map;
3340 	struct bpf_insn *insns;
3341 	u32 off, type;
3342 	u64 imm;
3343 	u8 code;
3344 	int i;
3345 
3346 	insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
3347 			GFP_USER);
3348 	if (!insns)
3349 		return insns;
3350 
3351 	for (i = 0; i < prog->len; i++) {
3352 		code = insns[i].code;
3353 
3354 		if (code == (BPF_JMP | BPF_TAIL_CALL)) {
3355 			insns[i].code = BPF_JMP | BPF_CALL;
3356 			insns[i].imm = BPF_FUNC_tail_call;
3357 			/* fall-through */
3358 		}
3359 		if (code == (BPF_JMP | BPF_CALL) ||
3360 		    code == (BPF_JMP | BPF_CALL_ARGS)) {
3361 			if (code == (BPF_JMP | BPF_CALL_ARGS))
3362 				insns[i].code = BPF_JMP | BPF_CALL;
3363 			if (!bpf_dump_raw_ok(f_cred))
3364 				insns[i].imm = 0;
3365 			continue;
3366 		}
3367 		if (BPF_CLASS(code) == BPF_LDX && BPF_MODE(code) == BPF_PROBE_MEM) {
3368 			insns[i].code = BPF_LDX | BPF_SIZE(code) | BPF_MEM;
3369 			continue;
3370 		}
3371 
3372 		if (code != (BPF_LD | BPF_IMM | BPF_DW))
3373 			continue;
3374 
3375 		imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
3376 		map = bpf_map_from_imm(prog, imm, &off, &type);
3377 		if (map) {
3378 			insns[i].src_reg = type;
3379 			insns[i].imm = map->id;
3380 			insns[i + 1].imm = off;
3381 			continue;
3382 		}
3383 	}
3384 
3385 	return insns;
3386 }
3387 
set_info_rec_size(struct bpf_prog_info * info)3388 static int set_info_rec_size(struct bpf_prog_info *info)
3389 {
3390 	/*
3391 	 * Ensure info.*_rec_size is the same as kernel expected size
3392 	 *
3393 	 * or
3394 	 *
3395 	 * Only allow zero *_rec_size if both _rec_size and _cnt are
3396 	 * zero.  In this case, the kernel will set the expected
3397 	 * _rec_size back to the info.
3398 	 */
3399 
3400 	if ((info->nr_func_info || info->func_info_rec_size) &&
3401 	    info->func_info_rec_size != sizeof(struct bpf_func_info))
3402 		return -EINVAL;
3403 
3404 	if ((info->nr_line_info || info->line_info_rec_size) &&
3405 	    info->line_info_rec_size != sizeof(struct bpf_line_info))
3406 		return -EINVAL;
3407 
3408 	if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
3409 	    info->jited_line_info_rec_size != sizeof(__u64))
3410 		return -EINVAL;
3411 
3412 	info->func_info_rec_size = sizeof(struct bpf_func_info);
3413 	info->line_info_rec_size = sizeof(struct bpf_line_info);
3414 	info->jited_line_info_rec_size = sizeof(__u64);
3415 
3416 	return 0;
3417 }
3418 
bpf_prog_get_info_by_fd(struct file * file,struct bpf_prog * prog,const union bpf_attr * attr,union bpf_attr __user * uattr)3419 static int bpf_prog_get_info_by_fd(struct file *file,
3420 				   struct bpf_prog *prog,
3421 				   const union bpf_attr *attr,
3422 				   union bpf_attr __user *uattr)
3423 {
3424 	struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3425 	struct bpf_prog_info info;
3426 	u32 info_len = attr->info.info_len;
3427 	struct bpf_prog_stats stats;
3428 	char __user *uinsns;
3429 	u32 ulen;
3430 	int err;
3431 
3432 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3433 	if (err)
3434 		return err;
3435 	info_len = min_t(u32, sizeof(info), info_len);
3436 
3437 	memset(&info, 0, sizeof(info));
3438 	if (copy_from_user(&info, uinfo, info_len))
3439 		return -EFAULT;
3440 
3441 	info.type = prog->type;
3442 	info.id = prog->aux->id;
3443 	info.load_time = prog->aux->load_time;
3444 	info.created_by_uid = from_kuid_munged(current_user_ns(),
3445 					       prog->aux->user->uid);
3446 	info.gpl_compatible = prog->gpl_compatible;
3447 
3448 	memcpy(info.tag, prog->tag, sizeof(prog->tag));
3449 	memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
3450 
3451 	mutex_lock(&prog->aux->used_maps_mutex);
3452 	ulen = info.nr_map_ids;
3453 	info.nr_map_ids = prog->aux->used_map_cnt;
3454 	ulen = min_t(u32, info.nr_map_ids, ulen);
3455 	if (ulen) {
3456 		u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids);
3457 		u32 i;
3458 
3459 		for (i = 0; i < ulen; i++)
3460 			if (put_user(prog->aux->used_maps[i]->id,
3461 				     &user_map_ids[i])) {
3462 				mutex_unlock(&prog->aux->used_maps_mutex);
3463 				return -EFAULT;
3464 			}
3465 	}
3466 	mutex_unlock(&prog->aux->used_maps_mutex);
3467 
3468 	err = set_info_rec_size(&info);
3469 	if (err)
3470 		return err;
3471 
3472 	bpf_prog_get_stats(prog, &stats);
3473 	info.run_time_ns = stats.nsecs;
3474 	info.run_cnt = stats.cnt;
3475 
3476 	if (!bpf_capable()) {
3477 		info.jited_prog_len = 0;
3478 		info.xlated_prog_len = 0;
3479 		info.nr_jited_ksyms = 0;
3480 		info.nr_jited_func_lens = 0;
3481 		info.nr_func_info = 0;
3482 		info.nr_line_info = 0;
3483 		info.nr_jited_line_info = 0;
3484 		goto done;
3485 	}
3486 
3487 	ulen = info.xlated_prog_len;
3488 	info.xlated_prog_len = bpf_prog_insn_size(prog);
3489 	if (info.xlated_prog_len && ulen) {
3490 		struct bpf_insn *insns_sanitized;
3491 		bool fault;
3492 
3493 		if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
3494 			info.xlated_prog_insns = 0;
3495 			goto done;
3496 		}
3497 		insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
3498 		if (!insns_sanitized)
3499 			return -ENOMEM;
3500 		uinsns = u64_to_user_ptr(info.xlated_prog_insns);
3501 		ulen = min_t(u32, info.xlated_prog_len, ulen);
3502 		fault = copy_to_user(uinsns, insns_sanitized, ulen);
3503 		kfree(insns_sanitized);
3504 		if (fault)
3505 			return -EFAULT;
3506 	}
3507 
3508 	if (bpf_prog_is_dev_bound(prog->aux)) {
3509 		err = bpf_prog_offload_info_fill(&info, prog);
3510 		if (err)
3511 			return err;
3512 		goto done;
3513 	}
3514 
3515 	/* NOTE: the following code is supposed to be skipped for offload.
3516 	 * bpf_prog_offload_info_fill() is the place to fill similar fields
3517 	 * for offload.
3518 	 */
3519 	ulen = info.jited_prog_len;
3520 	if (prog->aux->func_cnt) {
3521 		u32 i;
3522 
3523 		info.jited_prog_len = 0;
3524 		for (i = 0; i < prog->aux->func_cnt; i++)
3525 			info.jited_prog_len += prog->aux->func[i]->jited_len;
3526 	} else {
3527 		info.jited_prog_len = prog->jited_len;
3528 	}
3529 
3530 	if (info.jited_prog_len && ulen) {
3531 		if (bpf_dump_raw_ok(file->f_cred)) {
3532 			uinsns = u64_to_user_ptr(info.jited_prog_insns);
3533 			ulen = min_t(u32, info.jited_prog_len, ulen);
3534 
3535 			/* for multi-function programs, copy the JITed
3536 			 * instructions for all the functions
3537 			 */
3538 			if (prog->aux->func_cnt) {
3539 				u32 len, free, i;
3540 				u8 *img;
3541 
3542 				free = ulen;
3543 				for (i = 0; i < prog->aux->func_cnt; i++) {
3544 					len = prog->aux->func[i]->jited_len;
3545 					len = min_t(u32, len, free);
3546 					img = (u8 *) prog->aux->func[i]->bpf_func;
3547 					if (copy_to_user(uinsns, img, len))
3548 						return -EFAULT;
3549 					uinsns += len;
3550 					free -= len;
3551 					if (!free)
3552 						break;
3553 				}
3554 			} else {
3555 				if (copy_to_user(uinsns, prog->bpf_func, ulen))
3556 					return -EFAULT;
3557 			}
3558 		} else {
3559 			info.jited_prog_insns = 0;
3560 		}
3561 	}
3562 
3563 	ulen = info.nr_jited_ksyms;
3564 	info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
3565 	if (ulen) {
3566 		if (bpf_dump_raw_ok(file->f_cred)) {
3567 			unsigned long ksym_addr;
3568 			u64 __user *user_ksyms;
3569 			u32 i;
3570 
3571 			/* copy the address of the kernel symbol
3572 			 * corresponding to each function
3573 			 */
3574 			ulen = min_t(u32, info.nr_jited_ksyms, ulen);
3575 			user_ksyms = u64_to_user_ptr(info.jited_ksyms);
3576 			if (prog->aux->func_cnt) {
3577 				for (i = 0; i < ulen; i++) {
3578 					ksym_addr = (unsigned long)
3579 						prog->aux->func[i]->bpf_func;
3580 					if (put_user((u64) ksym_addr,
3581 						     &user_ksyms[i]))
3582 						return -EFAULT;
3583 				}
3584 			} else {
3585 				ksym_addr = (unsigned long) prog->bpf_func;
3586 				if (put_user((u64) ksym_addr, &user_ksyms[0]))
3587 					return -EFAULT;
3588 			}
3589 		} else {
3590 			info.jited_ksyms = 0;
3591 		}
3592 	}
3593 
3594 	ulen = info.nr_jited_func_lens;
3595 	info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
3596 	if (ulen) {
3597 		if (bpf_dump_raw_ok(file->f_cred)) {
3598 			u32 __user *user_lens;
3599 			u32 func_len, i;
3600 
3601 			/* copy the JITed image lengths for each function */
3602 			ulen = min_t(u32, info.nr_jited_func_lens, ulen);
3603 			user_lens = u64_to_user_ptr(info.jited_func_lens);
3604 			if (prog->aux->func_cnt) {
3605 				for (i = 0; i < ulen; i++) {
3606 					func_len =
3607 						prog->aux->func[i]->jited_len;
3608 					if (put_user(func_len, &user_lens[i]))
3609 						return -EFAULT;
3610 				}
3611 			} else {
3612 				func_len = prog->jited_len;
3613 				if (put_user(func_len, &user_lens[0]))
3614 					return -EFAULT;
3615 			}
3616 		} else {
3617 			info.jited_func_lens = 0;
3618 		}
3619 	}
3620 
3621 	if (prog->aux->btf)
3622 		info.btf_id = btf_id(prog->aux->btf);
3623 
3624 	ulen = info.nr_func_info;
3625 	info.nr_func_info = prog->aux->func_info_cnt;
3626 	if (info.nr_func_info && ulen) {
3627 		char __user *user_finfo;
3628 
3629 		user_finfo = u64_to_user_ptr(info.func_info);
3630 		ulen = min_t(u32, info.nr_func_info, ulen);
3631 		if (copy_to_user(user_finfo, prog->aux->func_info,
3632 				 info.func_info_rec_size * ulen))
3633 			return -EFAULT;
3634 	}
3635 
3636 	ulen = info.nr_line_info;
3637 	info.nr_line_info = prog->aux->nr_linfo;
3638 	if (info.nr_line_info && ulen) {
3639 		__u8 __user *user_linfo;
3640 
3641 		user_linfo = u64_to_user_ptr(info.line_info);
3642 		ulen = min_t(u32, info.nr_line_info, ulen);
3643 		if (copy_to_user(user_linfo, prog->aux->linfo,
3644 				 info.line_info_rec_size * ulen))
3645 			return -EFAULT;
3646 	}
3647 
3648 	ulen = info.nr_jited_line_info;
3649 	if (prog->aux->jited_linfo)
3650 		info.nr_jited_line_info = prog->aux->nr_linfo;
3651 	else
3652 		info.nr_jited_line_info = 0;
3653 	if (info.nr_jited_line_info && ulen) {
3654 		if (bpf_dump_raw_ok(file->f_cred)) {
3655 			__u64 __user *user_linfo;
3656 			u32 i;
3657 
3658 			user_linfo = u64_to_user_ptr(info.jited_line_info);
3659 			ulen = min_t(u32, info.nr_jited_line_info, ulen);
3660 			for (i = 0; i < ulen; i++) {
3661 				if (put_user((__u64)(long)prog->aux->jited_linfo[i],
3662 					     &user_linfo[i]))
3663 					return -EFAULT;
3664 			}
3665 		} else {
3666 			info.jited_line_info = 0;
3667 		}
3668 	}
3669 
3670 	ulen = info.nr_prog_tags;
3671 	info.nr_prog_tags = prog->aux->func_cnt ? : 1;
3672 	if (ulen) {
3673 		__u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
3674 		u32 i;
3675 
3676 		user_prog_tags = u64_to_user_ptr(info.prog_tags);
3677 		ulen = min_t(u32, info.nr_prog_tags, ulen);
3678 		if (prog->aux->func_cnt) {
3679 			for (i = 0; i < ulen; i++) {
3680 				if (copy_to_user(user_prog_tags[i],
3681 						 prog->aux->func[i]->tag,
3682 						 BPF_TAG_SIZE))
3683 					return -EFAULT;
3684 			}
3685 		} else {
3686 			if (copy_to_user(user_prog_tags[0],
3687 					 prog->tag, BPF_TAG_SIZE))
3688 				return -EFAULT;
3689 		}
3690 	}
3691 
3692 done:
3693 	if (copy_to_user(uinfo, &info, info_len) ||
3694 	    put_user(info_len, &uattr->info.info_len))
3695 		return -EFAULT;
3696 
3697 	return 0;
3698 }
3699 
bpf_map_get_info_by_fd(struct file * file,struct bpf_map * map,const union bpf_attr * attr,union bpf_attr __user * uattr)3700 static int bpf_map_get_info_by_fd(struct file *file,
3701 				  struct bpf_map *map,
3702 				  const union bpf_attr *attr,
3703 				  union bpf_attr __user *uattr)
3704 {
3705 	struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3706 	struct bpf_map_info info;
3707 	u32 info_len = attr->info.info_len;
3708 	int err;
3709 
3710 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3711 	if (err)
3712 		return err;
3713 	info_len = min_t(u32, sizeof(info), info_len);
3714 
3715 	memset(&info, 0, sizeof(info));
3716 	info.type = map->map_type;
3717 	info.id = map->id;
3718 	info.key_size = map->key_size;
3719 	info.value_size = map->value_size;
3720 	info.max_entries = map->max_entries;
3721 	info.map_flags = map->map_flags;
3722 	memcpy(info.name, map->name, sizeof(map->name));
3723 
3724 	if (map->btf) {
3725 		info.btf_id = btf_id(map->btf);
3726 		info.btf_key_type_id = map->btf_key_type_id;
3727 		info.btf_value_type_id = map->btf_value_type_id;
3728 	}
3729 	info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
3730 
3731 	if (bpf_map_is_dev_bound(map)) {
3732 		err = bpf_map_offload_info_fill(&info, map);
3733 		if (err)
3734 			return err;
3735 	}
3736 
3737 	if (copy_to_user(uinfo, &info, info_len) ||
3738 	    put_user(info_len, &uattr->info.info_len))
3739 		return -EFAULT;
3740 
3741 	return 0;
3742 }
3743 
bpf_btf_get_info_by_fd(struct file * file,struct btf * btf,const union bpf_attr * attr,union bpf_attr __user * uattr)3744 static int bpf_btf_get_info_by_fd(struct file *file,
3745 				  struct btf *btf,
3746 				  const union bpf_attr *attr,
3747 				  union bpf_attr __user *uattr)
3748 {
3749 	struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3750 	u32 info_len = attr->info.info_len;
3751 	int err;
3752 
3753 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
3754 	if (err)
3755 		return err;
3756 
3757 	return btf_get_info_by_fd(btf, attr, uattr);
3758 }
3759 
bpf_link_get_info_by_fd(struct file * file,struct bpf_link * link,const union bpf_attr * attr,union bpf_attr __user * uattr)3760 static int bpf_link_get_info_by_fd(struct file *file,
3761 				  struct bpf_link *link,
3762 				  const union bpf_attr *attr,
3763 				  union bpf_attr __user *uattr)
3764 {
3765 	struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
3766 	struct bpf_link_info info;
3767 	u32 info_len = attr->info.info_len;
3768 	int err;
3769 
3770 	err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
3771 	if (err)
3772 		return err;
3773 	info_len = min_t(u32, sizeof(info), info_len);
3774 
3775 	memset(&info, 0, sizeof(info));
3776 	if (copy_from_user(&info, uinfo, info_len))
3777 		return -EFAULT;
3778 
3779 	info.type = link->type;
3780 	info.id = link->id;
3781 	info.prog_id = link->prog->aux->id;
3782 
3783 	if (link->ops->fill_link_info) {
3784 		err = link->ops->fill_link_info(link, &info);
3785 		if (err)
3786 			return err;
3787 	}
3788 
3789 	if (copy_to_user(uinfo, &info, info_len) ||
3790 	    put_user(info_len, &uattr->info.info_len))
3791 		return -EFAULT;
3792 
3793 	return 0;
3794 }
3795 
3796 
3797 #define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
3798 
bpf_obj_get_info_by_fd(const union bpf_attr * attr,union bpf_attr __user * uattr)3799 static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
3800 				  union bpf_attr __user *uattr)
3801 {
3802 	int ufd = attr->info.bpf_fd;
3803 	struct fd f;
3804 	int err;
3805 
3806 	if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
3807 		return -EINVAL;
3808 
3809 	f = fdget(ufd);
3810 	if (!f.file)
3811 		return -EBADFD;
3812 
3813 	if (f.file->f_op == &bpf_prog_fops)
3814 		err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
3815 					      uattr);
3816 	else if (f.file->f_op == &bpf_map_fops)
3817 		err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
3818 					     uattr);
3819 	else if (f.file->f_op == &btf_fops)
3820 		err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
3821 	else if (f.file->f_op == &bpf_link_fops)
3822 		err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
3823 					      attr, uattr);
3824 	else
3825 		err = -EINVAL;
3826 
3827 	fdput(f);
3828 	return err;
3829 }
3830 
3831 #define BPF_BTF_LOAD_LAST_FIELD btf_log_level
3832 
bpf_btf_load(const union bpf_attr * attr)3833 static int bpf_btf_load(const union bpf_attr *attr)
3834 {
3835 	if (CHECK_ATTR(BPF_BTF_LOAD))
3836 		return -EINVAL;
3837 
3838 	if (!bpf_capable())
3839 		return -EPERM;
3840 
3841 	return btf_new_fd(attr);
3842 }
3843 
3844 #define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
3845 
bpf_btf_get_fd_by_id(const union bpf_attr * attr)3846 static int bpf_btf_get_fd_by_id(const union bpf_attr *attr)
3847 {
3848 	if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID))
3849 		return -EINVAL;
3850 
3851 	if (!capable(CAP_SYS_ADMIN))
3852 		return -EPERM;
3853 
3854 	return btf_get_fd_by_id(attr->btf_id);
3855 }
3856 
bpf_task_fd_query_copy(const union bpf_attr * attr,union bpf_attr __user * uattr,u32 prog_id,u32 fd_type,const char * buf,u64 probe_offset,u64 probe_addr)3857 static int bpf_task_fd_query_copy(const union bpf_attr *attr,
3858 				    union bpf_attr __user *uattr,
3859 				    u32 prog_id, u32 fd_type,
3860 				    const char *buf, u64 probe_offset,
3861 				    u64 probe_addr)
3862 {
3863 	char __user *ubuf = u64_to_user_ptr(attr->task_fd_query.buf);
3864 	u32 len = buf ? strlen(buf) : 0, input_len;
3865 	int err = 0;
3866 
3867 	if (put_user(len, &uattr->task_fd_query.buf_len))
3868 		return -EFAULT;
3869 	input_len = attr->task_fd_query.buf_len;
3870 	if (input_len && ubuf) {
3871 		if (!len) {
3872 			/* nothing to copy, just make ubuf NULL terminated */
3873 			char zero = '\0';
3874 
3875 			if (put_user(zero, ubuf))
3876 				return -EFAULT;
3877 		} else if (input_len >= len + 1) {
3878 			/* ubuf can hold the string with NULL terminator */
3879 			if (copy_to_user(ubuf, buf, len + 1))
3880 				return -EFAULT;
3881 		} else {
3882 			/* ubuf cannot hold the string with NULL terminator,
3883 			 * do a partial copy with NULL terminator.
3884 			 */
3885 			char zero = '\0';
3886 
3887 			err = -ENOSPC;
3888 			if (copy_to_user(ubuf, buf, input_len - 1))
3889 				return -EFAULT;
3890 			if (put_user(zero, ubuf + input_len - 1))
3891 				return -EFAULT;
3892 		}
3893 	}
3894 
3895 	if (put_user(prog_id, &uattr->task_fd_query.prog_id) ||
3896 	    put_user(fd_type, &uattr->task_fd_query.fd_type) ||
3897 	    put_user(probe_offset, &uattr->task_fd_query.probe_offset) ||
3898 	    put_user(probe_addr, &uattr->task_fd_query.probe_addr))
3899 		return -EFAULT;
3900 
3901 	return err;
3902 }
3903 
3904 #define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr
3905 
bpf_task_fd_query(const union bpf_attr * attr,union bpf_attr __user * uattr)3906 static int bpf_task_fd_query(const union bpf_attr *attr,
3907 			     union bpf_attr __user *uattr)
3908 {
3909 	pid_t pid = attr->task_fd_query.pid;
3910 	u32 fd = attr->task_fd_query.fd;
3911 	const struct perf_event *event;
3912 	struct files_struct *files;
3913 	struct task_struct *task;
3914 	struct file *file;
3915 	int err;
3916 
3917 	if (CHECK_ATTR(BPF_TASK_FD_QUERY))
3918 		return -EINVAL;
3919 
3920 	if (!capable(CAP_SYS_ADMIN))
3921 		return -EPERM;
3922 
3923 	if (attr->task_fd_query.flags != 0)
3924 		return -EINVAL;
3925 
3926 	rcu_read_lock();
3927 	task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
3928 	rcu_read_unlock();
3929 	if (!task)
3930 		return -ENOENT;
3931 
3932 	files = get_files_struct(task);
3933 	put_task_struct(task);
3934 	if (!files)
3935 		return -ENOENT;
3936 
3937 	err = 0;
3938 	spin_lock(&files->file_lock);
3939 	file = fcheck_files(files, fd);
3940 	if (!file)
3941 		err = -EBADF;
3942 	else
3943 		get_file(file);
3944 	spin_unlock(&files->file_lock);
3945 	put_files_struct(files);
3946 
3947 	if (err)
3948 		goto out;
3949 
3950 	if (file->f_op == &bpf_link_fops) {
3951 		struct bpf_link *link = file->private_data;
3952 
3953 		if (link->ops == &bpf_raw_tp_link_lops) {
3954 			struct bpf_raw_tp_link *raw_tp =
3955 				container_of(link, struct bpf_raw_tp_link, link);
3956 			struct bpf_raw_event_map *btp = raw_tp->btp;
3957 
3958 			err = bpf_task_fd_query_copy(attr, uattr,
3959 						     raw_tp->link.prog->aux->id,
3960 						     BPF_FD_TYPE_RAW_TRACEPOINT,
3961 						     btp->tp->name, 0, 0);
3962 			goto put_file;
3963 		}
3964 		goto out_not_supp;
3965 	}
3966 
3967 	event = perf_get_event(file);
3968 	if (!IS_ERR(event)) {
3969 		u64 probe_offset, probe_addr;
3970 		u32 prog_id, fd_type;
3971 		const char *buf;
3972 
3973 		err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
3974 					      &buf, &probe_offset,
3975 					      &probe_addr);
3976 		if (!err)
3977 			err = bpf_task_fd_query_copy(attr, uattr, prog_id,
3978 						     fd_type, buf,
3979 						     probe_offset,
3980 						     probe_addr);
3981 		goto put_file;
3982 	}
3983 
3984 out_not_supp:
3985 	err = -ENOTSUPP;
3986 put_file:
3987 	fput(file);
3988 out:
3989 	return err;
3990 }
3991 
3992 #define BPF_MAP_BATCH_LAST_FIELD batch.flags
3993 
3994 #define BPF_DO_BATCH(fn)			\
3995 	do {					\
3996 		if (!fn) {			\
3997 			err = -ENOTSUPP;	\
3998 			goto err_put;		\
3999 		}				\
4000 		err = fn(map, attr, uattr);	\
4001 	} while (0)
4002 
bpf_map_do_batch(const union bpf_attr * attr,union bpf_attr __user * uattr,int cmd)4003 static int bpf_map_do_batch(const union bpf_attr *attr,
4004 			    union bpf_attr __user *uattr,
4005 			    int cmd)
4006 {
4007 	bool has_read  = cmd == BPF_MAP_LOOKUP_BATCH ||
4008 			 cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH;
4009 	bool has_write = cmd != BPF_MAP_LOOKUP_BATCH;
4010 	struct bpf_map *map;
4011 	int err, ufd;
4012 	struct fd f;
4013 
4014 	if (CHECK_ATTR(BPF_MAP_BATCH))
4015 		return -EINVAL;
4016 
4017 	ufd = attr->batch.map_fd;
4018 	f = fdget(ufd);
4019 	map = __bpf_map_get(f);
4020 	if (IS_ERR(map))
4021 		return PTR_ERR(map);
4022 	if (has_write)
4023 		bpf_map_write_active_inc(map);
4024 	if (has_read && !(map_get_sys_perms(map, f) & FMODE_CAN_READ)) {
4025 		err = -EPERM;
4026 		goto err_put;
4027 	}
4028 	if (has_write && !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) {
4029 		err = -EPERM;
4030 		goto err_put;
4031 	}
4032 
4033 	if (cmd == BPF_MAP_LOOKUP_BATCH)
4034 		BPF_DO_BATCH(map->ops->map_lookup_batch);
4035 	else if (cmd == BPF_MAP_LOOKUP_AND_DELETE_BATCH)
4036 		BPF_DO_BATCH(map->ops->map_lookup_and_delete_batch);
4037 	else if (cmd == BPF_MAP_UPDATE_BATCH)
4038 		BPF_DO_BATCH(map->ops->map_update_batch);
4039 	else
4040 		BPF_DO_BATCH(map->ops->map_delete_batch);
4041 err_put:
4042 	if (has_write)
4043 		bpf_map_write_active_dec(map);
4044 	fdput(f);
4045 	return err;
4046 }
4047 
tracing_bpf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)4048 static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
4049 {
4050 	if (attr->link_create.attach_type != prog->expected_attach_type)
4051 		return -EINVAL;
4052 
4053 	if (prog->expected_attach_type == BPF_TRACE_ITER)
4054 		return bpf_iter_link_attach(attr, prog);
4055 	else if (prog->type == BPF_PROG_TYPE_EXT)
4056 		return bpf_tracing_prog_attach(prog,
4057 					       attr->link_create.target_fd,
4058 					       attr->link_create.target_btf_id);
4059 	return -EINVAL;
4060 }
4061 
4062 #define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
link_create(union bpf_attr * attr)4063 static int link_create(union bpf_attr *attr)
4064 {
4065 	enum bpf_prog_type ptype;
4066 	struct bpf_prog *prog;
4067 	int ret;
4068 
4069 	if (CHECK_ATTR(BPF_LINK_CREATE))
4070 		return -EINVAL;
4071 
4072 	prog = bpf_prog_get(attr->link_create.prog_fd);
4073 	if (IS_ERR(prog))
4074 		return PTR_ERR(prog);
4075 
4076 	ret = bpf_prog_attach_check_attach_type(prog,
4077 						attr->link_create.attach_type);
4078 	if (ret)
4079 		goto out;
4080 
4081 	if (prog->type == BPF_PROG_TYPE_EXT) {
4082 		ret = tracing_bpf_link_attach(attr, prog);
4083 		goto out;
4084 	}
4085 
4086 	ptype = attach_type_to_prog_type(attr->link_create.attach_type);
4087 	if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
4088 		ret = -EINVAL;
4089 		goto out;
4090 	}
4091 
4092 	switch (ptype) {
4093 	case BPF_PROG_TYPE_CGROUP_SKB:
4094 	case BPF_PROG_TYPE_CGROUP_SOCK:
4095 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
4096 	case BPF_PROG_TYPE_SOCK_OPS:
4097 	case BPF_PROG_TYPE_CGROUP_DEVICE:
4098 	case BPF_PROG_TYPE_CGROUP_SYSCTL:
4099 	case BPF_PROG_TYPE_CGROUP_SOCKOPT:
4100 		ret = cgroup_bpf_link_attach(attr, prog);
4101 		break;
4102 	case BPF_PROG_TYPE_TRACING:
4103 		ret = tracing_bpf_link_attach(attr, prog);
4104 		break;
4105 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
4106 	case BPF_PROG_TYPE_SK_LOOKUP:
4107 		ret = netns_bpf_link_create(attr, prog);
4108 		break;
4109 #ifdef CONFIG_NET
4110 	case BPF_PROG_TYPE_XDP:
4111 		ret = bpf_xdp_link_attach(attr, prog);
4112 		break;
4113 #endif
4114 	default:
4115 		ret = -EINVAL;
4116 	}
4117 
4118 out:
4119 	if (ret < 0)
4120 		bpf_prog_put(prog);
4121 	return ret;
4122 }
4123 
4124 #define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
4125 
link_update(union bpf_attr * attr)4126 static int link_update(union bpf_attr *attr)
4127 {
4128 	struct bpf_prog *old_prog = NULL, *new_prog;
4129 	struct bpf_link *link;
4130 	u32 flags;
4131 	int ret;
4132 
4133 	if (CHECK_ATTR(BPF_LINK_UPDATE))
4134 		return -EINVAL;
4135 
4136 	flags = attr->link_update.flags;
4137 	if (flags & ~BPF_F_REPLACE)
4138 		return -EINVAL;
4139 
4140 	link = bpf_link_get_from_fd(attr->link_update.link_fd);
4141 	if (IS_ERR(link))
4142 		return PTR_ERR(link);
4143 
4144 	new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
4145 	if (IS_ERR(new_prog)) {
4146 		ret = PTR_ERR(new_prog);
4147 		goto out_put_link;
4148 	}
4149 
4150 	if (flags & BPF_F_REPLACE) {
4151 		old_prog = bpf_prog_get(attr->link_update.old_prog_fd);
4152 		if (IS_ERR(old_prog)) {
4153 			ret = PTR_ERR(old_prog);
4154 			old_prog = NULL;
4155 			goto out_put_progs;
4156 		}
4157 	} else if (attr->link_update.old_prog_fd) {
4158 		ret = -EINVAL;
4159 		goto out_put_progs;
4160 	}
4161 
4162 	if (link->ops->update_prog)
4163 		ret = link->ops->update_prog(link, new_prog, old_prog);
4164 	else
4165 		ret = -EINVAL;
4166 
4167 out_put_progs:
4168 	if (old_prog)
4169 		bpf_prog_put(old_prog);
4170 	if (ret)
4171 		bpf_prog_put(new_prog);
4172 out_put_link:
4173 	bpf_link_put(link);
4174 	return ret;
4175 }
4176 
4177 #define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd
4178 
link_detach(union bpf_attr * attr)4179 static int link_detach(union bpf_attr *attr)
4180 {
4181 	struct bpf_link *link;
4182 	int ret;
4183 
4184 	if (CHECK_ATTR(BPF_LINK_DETACH))
4185 		return -EINVAL;
4186 
4187 	link = bpf_link_get_from_fd(attr->link_detach.link_fd);
4188 	if (IS_ERR(link))
4189 		return PTR_ERR(link);
4190 
4191 	if (link->ops->detach)
4192 		ret = link->ops->detach(link);
4193 	else
4194 		ret = -EOPNOTSUPP;
4195 
4196 	bpf_link_put(link);
4197 	return ret;
4198 }
4199 
bpf_link_inc_not_zero(struct bpf_link * link)4200 static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
4201 {
4202 	return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
4203 }
4204 
bpf_link_by_id(u32 id)4205 struct bpf_link *bpf_link_by_id(u32 id)
4206 {
4207 	struct bpf_link *link;
4208 
4209 	if (!id)
4210 		return ERR_PTR(-ENOENT);
4211 
4212 	spin_lock_bh(&link_idr_lock);
4213 	/* before link is "settled", ID is 0, pretend it doesn't exist yet */
4214 	link = idr_find(&link_idr, id);
4215 	if (link) {
4216 		if (link->id)
4217 			link = bpf_link_inc_not_zero(link);
4218 		else
4219 			link = ERR_PTR(-EAGAIN);
4220 	} else {
4221 		link = ERR_PTR(-ENOENT);
4222 	}
4223 	spin_unlock_bh(&link_idr_lock);
4224 	return link;
4225 }
4226 
4227 #define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
4228 
bpf_link_get_fd_by_id(const union bpf_attr * attr)4229 static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
4230 {
4231 	struct bpf_link *link;
4232 	u32 id = attr->link_id;
4233 	int fd;
4234 
4235 	if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
4236 		return -EINVAL;
4237 
4238 	if (!capable(CAP_SYS_ADMIN))
4239 		return -EPERM;
4240 
4241 	link = bpf_link_by_id(id);
4242 	if (IS_ERR(link))
4243 		return PTR_ERR(link);
4244 
4245 	fd = bpf_link_new_fd(link);
4246 	if (fd < 0)
4247 		bpf_link_put(link);
4248 
4249 	return fd;
4250 }
4251 
4252 DEFINE_MUTEX(bpf_stats_enabled_mutex);
4253 
bpf_stats_release(struct inode * inode,struct file * file)4254 static int bpf_stats_release(struct inode *inode, struct file *file)
4255 {
4256 	mutex_lock(&bpf_stats_enabled_mutex);
4257 	static_key_slow_dec(&bpf_stats_enabled_key.key);
4258 	mutex_unlock(&bpf_stats_enabled_mutex);
4259 	return 0;
4260 }
4261 
4262 static const struct file_operations bpf_stats_fops = {
4263 	.release = bpf_stats_release,
4264 };
4265 
bpf_enable_runtime_stats(void)4266 static int bpf_enable_runtime_stats(void)
4267 {
4268 	int fd;
4269 
4270 	mutex_lock(&bpf_stats_enabled_mutex);
4271 
4272 	/* Set a very high limit to avoid overflow */
4273 	if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
4274 		mutex_unlock(&bpf_stats_enabled_mutex);
4275 		return -EBUSY;
4276 	}
4277 
4278 	fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
4279 	if (fd >= 0)
4280 		static_key_slow_inc(&bpf_stats_enabled_key.key);
4281 
4282 	mutex_unlock(&bpf_stats_enabled_mutex);
4283 	return fd;
4284 }
4285 
4286 #define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
4287 
bpf_enable_stats(union bpf_attr * attr)4288 static int bpf_enable_stats(union bpf_attr *attr)
4289 {
4290 
4291 	if (CHECK_ATTR(BPF_ENABLE_STATS))
4292 		return -EINVAL;
4293 
4294 	if (!capable(CAP_SYS_ADMIN))
4295 		return -EPERM;
4296 
4297 	switch (attr->enable_stats.type) {
4298 	case BPF_STATS_RUN_TIME:
4299 		return bpf_enable_runtime_stats();
4300 	default:
4301 		break;
4302 	}
4303 	return -EINVAL;
4304 }
4305 
4306 #define BPF_ITER_CREATE_LAST_FIELD iter_create.flags
4307 
bpf_iter_create(union bpf_attr * attr)4308 static int bpf_iter_create(union bpf_attr *attr)
4309 {
4310 	struct bpf_link *link;
4311 	int err;
4312 
4313 	if (CHECK_ATTR(BPF_ITER_CREATE))
4314 		return -EINVAL;
4315 
4316 	if (attr->iter_create.flags)
4317 		return -EINVAL;
4318 
4319 	link = bpf_link_get_from_fd(attr->iter_create.link_fd);
4320 	if (IS_ERR(link))
4321 		return PTR_ERR(link);
4322 
4323 	err = bpf_iter_new_fd(link);
4324 	bpf_link_put(link);
4325 
4326 	return err;
4327 }
4328 
4329 #define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
4330 
bpf_prog_bind_map(union bpf_attr * attr)4331 static int bpf_prog_bind_map(union bpf_attr *attr)
4332 {
4333 	struct bpf_prog *prog;
4334 	struct bpf_map *map;
4335 	struct bpf_map **used_maps_old, **used_maps_new;
4336 	int i, ret = 0;
4337 
4338 	if (CHECK_ATTR(BPF_PROG_BIND_MAP))
4339 		return -EINVAL;
4340 
4341 	if (attr->prog_bind_map.flags)
4342 		return -EINVAL;
4343 
4344 	prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
4345 	if (IS_ERR(prog))
4346 		return PTR_ERR(prog);
4347 
4348 	map = bpf_map_get(attr->prog_bind_map.map_fd);
4349 	if (IS_ERR(map)) {
4350 		ret = PTR_ERR(map);
4351 		goto out_prog_put;
4352 	}
4353 
4354 	mutex_lock(&prog->aux->used_maps_mutex);
4355 
4356 	used_maps_old = prog->aux->used_maps;
4357 
4358 	for (i = 0; i < prog->aux->used_map_cnt; i++)
4359 		if (used_maps_old[i] == map) {
4360 			bpf_map_put(map);
4361 			goto out_unlock;
4362 		}
4363 
4364 	used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
4365 				      sizeof(used_maps_new[0]),
4366 				      GFP_KERNEL);
4367 	if (!used_maps_new) {
4368 		ret = -ENOMEM;
4369 		goto out_unlock;
4370 	}
4371 
4372 	memcpy(used_maps_new, used_maps_old,
4373 	       sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
4374 	used_maps_new[prog->aux->used_map_cnt] = map;
4375 
4376 	prog->aux->used_map_cnt++;
4377 	prog->aux->used_maps = used_maps_new;
4378 
4379 	kfree(used_maps_old);
4380 
4381 out_unlock:
4382 	mutex_unlock(&prog->aux->used_maps_mutex);
4383 
4384 	if (ret)
4385 		bpf_map_put(map);
4386 out_prog_put:
4387 	bpf_prog_put(prog);
4388 	return ret;
4389 }
4390 
SYSCALL_DEFINE3(bpf,int,cmd,union bpf_attr __user *,uattr,unsigned int,size)4391 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
4392 {
4393 	union bpf_attr attr;
4394 	int err;
4395 
4396 	if (sysctl_unprivileged_bpf_disabled && !bpf_capable())
4397 		return -EPERM;
4398 
4399 	err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size);
4400 	if (err)
4401 		return err;
4402 	size = min_t(u32, size, sizeof(attr));
4403 
4404 	/* copy attributes from user space, may be less than sizeof(bpf_attr) */
4405 	memset(&attr, 0, sizeof(attr));
4406 	if (copy_from_user(&attr, uattr, size) != 0)
4407 		return -EFAULT;
4408 
4409 	trace_android_vh_check_bpf_syscall(cmd, &attr, size);
4410 
4411 	err = security_bpf(cmd, &attr, size);
4412 	if (err < 0)
4413 		return err;
4414 
4415 	switch (cmd) {
4416 	case BPF_MAP_CREATE:
4417 		err = map_create(&attr);
4418 		break;
4419 	case BPF_MAP_LOOKUP_ELEM:
4420 		err = map_lookup_elem(&attr);
4421 		break;
4422 	case BPF_MAP_UPDATE_ELEM:
4423 		err = map_update_elem(&attr);
4424 		break;
4425 	case BPF_MAP_DELETE_ELEM:
4426 		err = map_delete_elem(&attr);
4427 		break;
4428 	case BPF_MAP_GET_NEXT_KEY:
4429 		err = map_get_next_key(&attr);
4430 		break;
4431 	case BPF_MAP_FREEZE:
4432 		err = map_freeze(&attr);
4433 		break;
4434 	case BPF_PROG_LOAD:
4435 		err = bpf_prog_load(&attr, uattr);
4436 		break;
4437 	case BPF_OBJ_PIN:
4438 		err = bpf_obj_pin(&attr);
4439 		break;
4440 	case BPF_OBJ_GET:
4441 		err = bpf_obj_get(&attr);
4442 		break;
4443 	case BPF_PROG_ATTACH:
4444 		err = bpf_prog_attach(&attr);
4445 		break;
4446 	case BPF_PROG_DETACH:
4447 		err = bpf_prog_detach(&attr);
4448 		break;
4449 	case BPF_PROG_QUERY:
4450 		err = bpf_prog_query(&attr, uattr);
4451 		break;
4452 	case BPF_PROG_TEST_RUN:
4453 		err = bpf_prog_test_run(&attr, uattr);
4454 		break;
4455 	case BPF_PROG_GET_NEXT_ID:
4456 		err = bpf_obj_get_next_id(&attr, uattr,
4457 					  &prog_idr, &prog_idr_lock);
4458 		break;
4459 	case BPF_MAP_GET_NEXT_ID:
4460 		err = bpf_obj_get_next_id(&attr, uattr,
4461 					  &map_idr, &map_idr_lock);
4462 		break;
4463 	case BPF_BTF_GET_NEXT_ID:
4464 		err = bpf_obj_get_next_id(&attr, uattr,
4465 					  &btf_idr, &btf_idr_lock);
4466 		break;
4467 	case BPF_PROG_GET_FD_BY_ID:
4468 		err = bpf_prog_get_fd_by_id(&attr);
4469 		break;
4470 	case BPF_MAP_GET_FD_BY_ID:
4471 		err = bpf_map_get_fd_by_id(&attr);
4472 		break;
4473 	case BPF_OBJ_GET_INFO_BY_FD:
4474 		err = bpf_obj_get_info_by_fd(&attr, uattr);
4475 		break;
4476 	case BPF_RAW_TRACEPOINT_OPEN:
4477 		err = bpf_raw_tracepoint_open(&attr);
4478 		break;
4479 	case BPF_BTF_LOAD:
4480 		err = bpf_btf_load(&attr);
4481 		break;
4482 	case BPF_BTF_GET_FD_BY_ID:
4483 		err = bpf_btf_get_fd_by_id(&attr);
4484 		break;
4485 	case BPF_TASK_FD_QUERY:
4486 		err = bpf_task_fd_query(&attr, uattr);
4487 		break;
4488 	case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
4489 		err = map_lookup_and_delete_elem(&attr);
4490 		break;
4491 	case BPF_MAP_LOOKUP_BATCH:
4492 		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
4493 		break;
4494 	case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
4495 		err = bpf_map_do_batch(&attr, uattr,
4496 				       BPF_MAP_LOOKUP_AND_DELETE_BATCH);
4497 		break;
4498 	case BPF_MAP_UPDATE_BATCH:
4499 		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
4500 		break;
4501 	case BPF_MAP_DELETE_BATCH:
4502 		err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
4503 		break;
4504 	case BPF_LINK_CREATE:
4505 		err = link_create(&attr);
4506 		break;
4507 	case BPF_LINK_UPDATE:
4508 		err = link_update(&attr);
4509 		break;
4510 	case BPF_LINK_GET_FD_BY_ID:
4511 		err = bpf_link_get_fd_by_id(&attr);
4512 		break;
4513 	case BPF_LINK_GET_NEXT_ID:
4514 		err = bpf_obj_get_next_id(&attr, uattr,
4515 					  &link_idr, &link_idr_lock);
4516 		break;
4517 	case BPF_ENABLE_STATS:
4518 		err = bpf_enable_stats(&attr);
4519 		break;
4520 	case BPF_ITER_CREATE:
4521 		err = bpf_iter_create(&attr);
4522 		break;
4523 	case BPF_LINK_DETACH:
4524 		err = link_detach(&attr);
4525 		break;
4526 	case BPF_PROG_BIND_MAP:
4527 		err = bpf_prog_bind_map(&attr);
4528 		break;
4529 	default:
4530 		err = -EINVAL;
4531 		break;
4532 	}
4533 
4534 	return err;
4535 }
4536