• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 
4 #include <linux/bpf.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/btf.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 #include <trace/hooks/memory.h>
14 
15 enum bpf_struct_ops_state {
16 	BPF_STRUCT_OPS_STATE_INIT,
17 	BPF_STRUCT_OPS_STATE_INUSE,
18 	BPF_STRUCT_OPS_STATE_TOBEFREE,
19 };
20 
21 #define BPF_STRUCT_OPS_COMMON_VALUE			\
22 	refcount_t refcnt;				\
23 	enum bpf_struct_ops_state state
24 
25 struct bpf_struct_ops_value {
26 	BPF_STRUCT_OPS_COMMON_VALUE;
27 	char data[] ____cacheline_aligned_in_smp;
28 };
29 
30 struct bpf_struct_ops_map {
31 	struct bpf_map map;
32 	const struct bpf_struct_ops *st_ops;
33 	/* protect map_update */
34 	struct mutex lock;
35 	/* progs has all the bpf_prog that is populated
36 	 * to the func ptr of the kernel's struct
37 	 * (in kvalue.data).
38 	 */
39 	struct bpf_prog **progs;
40 	/* image is a page that has all the trampolines
41 	 * that stores the func args before calling the bpf_prog.
42 	 * A PAGE_SIZE "image" is enough to store all trampoline for
43 	 * "progs[]".
44 	 */
45 	void *image;
46 	/* uvalue->data stores the kernel struct
47 	 * (e.g. tcp_congestion_ops) that is more useful
48 	 * to userspace than the kvalue.  For example,
49 	 * the bpf_prog's id is stored instead of the kernel
50 	 * address of a func ptr.
51 	 */
52 	struct bpf_struct_ops_value *uvalue;
53 	/* kvalue.data stores the actual kernel's struct
54 	 * (e.g. tcp_congestion_ops) that will be
55 	 * registered to the kernel subsystem.
56 	 */
57 	struct bpf_struct_ops_value kvalue;
58 };
59 
60 #define VALUE_PREFIX "bpf_struct_ops_"
61 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
62 
63 /* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
64  * the map's value exposed to the userspace and its btf-type-id is
65  * stored at the map->btf_vmlinux_value_type_id.
66  *
67  */
68 #define BPF_STRUCT_OPS_TYPE(_name)				\
69 extern struct bpf_struct_ops bpf_##_name;			\
70 								\
71 struct bpf_struct_ops_##_name {						\
72 	BPF_STRUCT_OPS_COMMON_VALUE;				\
73 	struct _name data ____cacheline_aligned_in_smp;		\
74 };
75 #include "bpf_struct_ops_types.h"
76 #undef BPF_STRUCT_OPS_TYPE
77 
78 enum {
79 #define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
80 #include "bpf_struct_ops_types.h"
81 #undef BPF_STRUCT_OPS_TYPE
82 	__NR_BPF_STRUCT_OPS_TYPE,
83 };
84 
85 static struct bpf_struct_ops * const bpf_struct_ops[] = {
86 #define BPF_STRUCT_OPS_TYPE(_name)				\
87 	[BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
88 #include "bpf_struct_ops_types.h"
89 #undef BPF_STRUCT_OPS_TYPE
90 };
91 
92 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
93 };
94 
95 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
96 };
97 
98 static const struct btf_type *module_type;
99 
bpf_struct_ops_init(struct btf * btf,struct bpf_verifier_log * log)100 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
101 {
102 	s32 type_id, value_id, module_id;
103 	const struct btf_member *member;
104 	struct bpf_struct_ops *st_ops;
105 	const struct btf_type *t;
106 	char value_name[128];
107 	const char *mname;
108 	u32 i, j;
109 
110 	/* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
111 #define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
112 #include "bpf_struct_ops_types.h"
113 #undef BPF_STRUCT_OPS_TYPE
114 
115 	module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
116 	if (module_id < 0) {
117 		pr_warn("Cannot find struct module in btf_vmlinux\n");
118 		return;
119 	}
120 	module_type = btf_type_by_id(btf, module_id);
121 
122 	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
123 		st_ops = bpf_struct_ops[i];
124 
125 		if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
126 		    sizeof(value_name)) {
127 			pr_warn("struct_ops name %s is too long\n",
128 				st_ops->name);
129 			continue;
130 		}
131 		sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
132 
133 		value_id = btf_find_by_name_kind(btf, value_name,
134 						 BTF_KIND_STRUCT);
135 		if (value_id < 0) {
136 			pr_warn("Cannot find struct %s in btf_vmlinux\n",
137 				value_name);
138 			continue;
139 		}
140 
141 		type_id = btf_find_by_name_kind(btf, st_ops->name,
142 						BTF_KIND_STRUCT);
143 		if (type_id < 0) {
144 			pr_warn("Cannot find struct %s in btf_vmlinux\n",
145 				st_ops->name);
146 			continue;
147 		}
148 		t = btf_type_by_id(btf, type_id);
149 		if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
150 			pr_warn("Cannot support #%u members in struct %s\n",
151 				btf_type_vlen(t), st_ops->name);
152 			continue;
153 		}
154 
155 		for_each_member(j, t, member) {
156 			const struct btf_type *func_proto;
157 
158 			mname = btf_name_by_offset(btf, member->name_off);
159 			if (!*mname) {
160 				pr_warn("anon member in struct %s is not supported\n",
161 					st_ops->name);
162 				break;
163 			}
164 
165 			if (btf_member_bitfield_size(t, member)) {
166 				pr_warn("bit field member %s in struct %s is not supported\n",
167 					mname, st_ops->name);
168 				break;
169 			}
170 
171 			func_proto = btf_type_resolve_func_ptr(btf,
172 							       member->type,
173 							       NULL);
174 			if (func_proto &&
175 			    btf_distill_func_proto(log, btf,
176 						   func_proto, mname,
177 						   &st_ops->func_models[j])) {
178 				pr_warn("Error in parsing func ptr %s in struct %s\n",
179 					mname, st_ops->name);
180 				break;
181 			}
182 		}
183 
184 		if (j == btf_type_vlen(t)) {
185 			if (st_ops->init(btf)) {
186 				pr_warn("Error in init bpf_struct_ops %s\n",
187 					st_ops->name);
188 			} else {
189 				st_ops->type_id = type_id;
190 				st_ops->type = t;
191 				st_ops->value_id = value_id;
192 				st_ops->value_type = btf_type_by_id(btf,
193 								    value_id);
194 			}
195 		}
196 	}
197 }
198 
199 extern struct btf *btf_vmlinux;
200 
201 static const struct bpf_struct_ops *
bpf_struct_ops_find_value(u32 value_id)202 bpf_struct_ops_find_value(u32 value_id)
203 {
204 	unsigned int i;
205 
206 	if (!value_id || !btf_vmlinux)
207 		return NULL;
208 
209 	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
210 		if (bpf_struct_ops[i]->value_id == value_id)
211 			return bpf_struct_ops[i];
212 	}
213 
214 	return NULL;
215 }
216 
bpf_struct_ops_find(u32 type_id)217 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
218 {
219 	unsigned int i;
220 
221 	if (!type_id || !btf_vmlinux)
222 		return NULL;
223 
224 	for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
225 		if (bpf_struct_ops[i]->type_id == type_id)
226 			return bpf_struct_ops[i];
227 	}
228 
229 	return NULL;
230 }
231 
bpf_struct_ops_map_get_next_key(struct bpf_map * map,void * key,void * next_key)232 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
233 					   void *next_key)
234 {
235 	if (key && *(u32 *)key == 0)
236 		return -ENOENT;
237 
238 	*(u32 *)next_key = 0;
239 	return 0;
240 }
241 
bpf_struct_ops_map_sys_lookup_elem(struct bpf_map * map,void * key,void * value)242 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
243 				       void *value)
244 {
245 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
246 	struct bpf_struct_ops_value *uvalue, *kvalue;
247 	enum bpf_struct_ops_state state;
248 
249 	if (unlikely(*(u32 *)key != 0))
250 		return -ENOENT;
251 
252 	kvalue = &st_map->kvalue;
253 	/* Pair with smp_store_release() during map_update */
254 	state = smp_load_acquire(&kvalue->state);
255 	if (state == BPF_STRUCT_OPS_STATE_INIT) {
256 		memset(value, 0, map->value_size);
257 		return 0;
258 	}
259 
260 	/* No lock is needed.  state and refcnt do not need
261 	 * to be updated together under atomic context.
262 	 */
263 	uvalue = (struct bpf_struct_ops_value *)value;
264 	memcpy(uvalue, st_map->uvalue, map->value_size);
265 	uvalue->state = state;
266 	refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt));
267 
268 	return 0;
269 }
270 
bpf_struct_ops_map_lookup_elem(struct bpf_map * map,void * key)271 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
272 {
273 	return ERR_PTR(-EINVAL);
274 }
275 
bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map * st_map)276 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
277 {
278 	const struct btf_type *t = st_map->st_ops->type;
279 	u32 i;
280 
281 	for (i = 0; i < btf_type_vlen(t); i++) {
282 		if (st_map->progs[i]) {
283 			bpf_prog_put(st_map->progs[i]);
284 			st_map->progs[i] = NULL;
285 		}
286 	}
287 }
288 
check_zero_holes(const struct btf_type * t,void * data)289 static int check_zero_holes(const struct btf_type *t, void *data)
290 {
291 	const struct btf_member *member;
292 	u32 i, moff, msize, prev_mend = 0;
293 	const struct btf_type *mtype;
294 
295 	for_each_member(i, t, member) {
296 		moff = btf_member_bit_offset(t, member) / 8;
297 		if (moff > prev_mend &&
298 		    memchr_inv(data + prev_mend, 0, moff - prev_mend))
299 			return -EINVAL;
300 
301 		mtype = btf_type_by_id(btf_vmlinux, member->type);
302 		mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
303 		if (IS_ERR(mtype))
304 			return PTR_ERR(mtype);
305 		prev_mend = moff + msize;
306 	}
307 
308 	if (t->size > prev_mend &&
309 	    memchr_inv(data + prev_mend, 0, t->size - prev_mend))
310 		return -EINVAL;
311 
312 	return 0;
313 }
314 
bpf_struct_ops_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)315 static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
316 					  void *value, u64 flags)
317 {
318 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
319 	const struct bpf_struct_ops *st_ops = st_map->st_ops;
320 	struct bpf_struct_ops_value *uvalue, *kvalue;
321 	const struct btf_member *member;
322 	const struct btf_type *t = st_ops->type;
323 	struct bpf_tramp_progs *tprogs = NULL;
324 	void *udata, *kdata;
325 	int prog_fd, err = 0;
326 	void *image;
327 	u32 i;
328 
329 	if (flags)
330 		return -EINVAL;
331 
332 	if (*(u32 *)key != 0)
333 		return -E2BIG;
334 
335 	err = check_zero_holes(st_ops->value_type, value);
336 	if (err)
337 		return err;
338 
339 	uvalue = (struct bpf_struct_ops_value *)value;
340 	err = check_zero_holes(t, uvalue->data);
341 	if (err)
342 		return err;
343 
344 	if (uvalue->state || refcount_read(&uvalue->refcnt))
345 		return -EINVAL;
346 
347 	tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
348 	if (!tprogs)
349 		return -ENOMEM;
350 
351 	uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
352 	kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
353 
354 	mutex_lock(&st_map->lock);
355 
356 	if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
357 		err = -EBUSY;
358 		goto unlock;
359 	}
360 
361 	memcpy(uvalue, value, map->value_size);
362 
363 	udata = &uvalue->data;
364 	kdata = &kvalue->data;
365 	image = st_map->image;
366 
367 	for_each_member(i, t, member) {
368 		const struct btf_type *mtype, *ptype;
369 		struct bpf_prog *prog;
370 		u32 moff;
371 		u32 flags;
372 
373 		moff = btf_member_bit_offset(t, member) / 8;
374 		ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
375 		if (ptype == module_type) {
376 			if (*(void **)(udata + moff))
377 				goto reset_unlock;
378 			*(void **)(kdata + moff) = BPF_MODULE_OWNER;
379 			continue;
380 		}
381 
382 		err = st_ops->init_member(t, member, kdata, udata);
383 		if (err < 0)
384 			goto reset_unlock;
385 
386 		/* The ->init_member() has handled this member */
387 		if (err > 0)
388 			continue;
389 
390 		/* If st_ops->init_member does not handle it,
391 		 * we will only handle func ptrs and zero-ed members
392 		 * here.  Reject everything else.
393 		 */
394 
395 		/* All non func ptr member must be 0 */
396 		if (!ptype || !btf_type_is_func_proto(ptype)) {
397 			u32 msize;
398 
399 			mtype = btf_type_by_id(btf_vmlinux, member->type);
400 			mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
401 			if (IS_ERR(mtype)) {
402 				err = PTR_ERR(mtype);
403 				goto reset_unlock;
404 			}
405 
406 			if (memchr_inv(udata + moff, 0, msize)) {
407 				err = -EINVAL;
408 				goto reset_unlock;
409 			}
410 
411 			continue;
412 		}
413 
414 		prog_fd = (int)(*(unsigned long *)(udata + moff));
415 		/* Similar check as the attr->attach_prog_fd */
416 		if (!prog_fd)
417 			continue;
418 
419 		prog = bpf_prog_get(prog_fd);
420 		if (IS_ERR(prog)) {
421 			err = PTR_ERR(prog);
422 			goto reset_unlock;
423 		}
424 		st_map->progs[i] = prog;
425 
426 		if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
427 		    prog->aux->attach_btf_id != st_ops->type_id ||
428 		    prog->expected_attach_type != i) {
429 			err = -EINVAL;
430 			goto reset_unlock;
431 		}
432 
433 		tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
434 		tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
435 		flags = st_ops->func_models[i].ret_size > 0 ?
436 			BPF_TRAMP_F_RET_FENTRY_RET : 0;
437 		err = arch_prepare_bpf_trampoline(NULL, image,
438 						  st_map->image + PAGE_SIZE,
439 						  &st_ops->func_models[i],
440 						  flags, tprogs, NULL);
441 		if (err < 0)
442 			goto reset_unlock;
443 
444 		*(void **)(kdata + moff) = image;
445 		image += err;
446 
447 		/* put prog_id to udata */
448 		*(unsigned long *)(udata + moff) = prog->aux->id;
449 	}
450 
451 	refcount_set(&kvalue->refcnt, 1);
452 	bpf_map_inc(map);
453 
454 	set_memory_ro((long)st_map->image, 1);
455 	trace_android_vh_set_memory_ro((unsigned long)st_map->image, 1);
456 	set_memory_x((long)st_map->image, 1);
457 	trace_android_vh_set_memory_x((unsigned long)st_map->image, 1);
458 	err = st_ops->reg(kdata);
459 	if (likely(!err)) {
460 		/* Pair with smp_load_acquire() during lookup_elem().
461 		 * It ensures the above udata updates (e.g. prog->aux->id)
462 		 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
463 		 */
464 		smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
465 		goto unlock;
466 	}
467 
468 	/* Error during st_ops->reg().  It is very unlikely since
469 	 * the above init_member() should have caught it earlier
470 	 * before reg().  The only possibility is if there was a race
471 	 * in registering the struct_ops (under the same name) to
472 	 * a sub-system through different struct_ops's maps.
473 	 */
474 	set_memory_nx((long)st_map->image, 1);
475 	set_memory_rw((long)st_map->image, 1);
476 	bpf_map_put(map);
477 
478 reset_unlock:
479 	bpf_struct_ops_map_put_progs(st_map);
480 	memset(uvalue, 0, map->value_size);
481 	memset(kvalue, 0, map->value_size);
482 unlock:
483 	kfree(tprogs);
484 	mutex_unlock(&st_map->lock);
485 	return err;
486 }
487 
bpf_struct_ops_map_delete_elem(struct bpf_map * map,void * key)488 static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
489 {
490 	enum bpf_struct_ops_state prev_state;
491 	struct bpf_struct_ops_map *st_map;
492 
493 	st_map = (struct bpf_struct_ops_map *)map;
494 	prev_state = cmpxchg(&st_map->kvalue.state,
495 			     BPF_STRUCT_OPS_STATE_INUSE,
496 			     BPF_STRUCT_OPS_STATE_TOBEFREE);
497 	switch (prev_state) {
498 	case BPF_STRUCT_OPS_STATE_INUSE:
499 		st_map->st_ops->unreg(&st_map->kvalue.data);
500 		if (refcount_dec_and_test(&st_map->kvalue.refcnt))
501 			bpf_map_put(map);
502 		return 0;
503 	case BPF_STRUCT_OPS_STATE_TOBEFREE:
504 		return -EINPROGRESS;
505 	case BPF_STRUCT_OPS_STATE_INIT:
506 		return -ENOENT;
507 	default:
508 		WARN_ON_ONCE(1);
509 		/* Should never happen.  Treat it as not found. */
510 		return -ENOENT;
511 	}
512 }
513 
bpf_struct_ops_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)514 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
515 					     struct seq_file *m)
516 {
517 	void *value;
518 	int err;
519 
520 	value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
521 	if (!value)
522 		return;
523 
524 	err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
525 	if (!err) {
526 		btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
527 				  value, m);
528 		seq_puts(m, "\n");
529 	}
530 
531 	kfree(value);
532 }
533 
bpf_struct_ops_map_free(struct bpf_map * map)534 static void bpf_struct_ops_map_free(struct bpf_map *map)
535 {
536 	struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
537 
538 	if (st_map->progs)
539 		bpf_struct_ops_map_put_progs(st_map);
540 	bpf_map_area_free(st_map->progs);
541 	trace_android_vh_set_memory_rw((unsigned long)st_map->image, 1);
542 	trace_android_vh_set_memory_nx((unsigned long)st_map->image, 1);
543 	bpf_jit_free_exec(st_map->image);
544 	bpf_map_area_free(st_map->uvalue);
545 	bpf_map_area_free(st_map);
546 }
547 
bpf_struct_ops_map_alloc_check(union bpf_attr * attr)548 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
549 {
550 	if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
551 	    attr->map_flags || !attr->btf_vmlinux_value_type_id)
552 		return -EINVAL;
553 	return 0;
554 }
555 
bpf_struct_ops_map_alloc(union bpf_attr * attr)556 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
557 {
558 	const struct bpf_struct_ops *st_ops;
559 	size_t map_total_size, st_map_size;
560 	struct bpf_struct_ops_map *st_map;
561 	const struct btf_type *t, *vt;
562 	struct bpf_map_memory mem;
563 	struct bpf_map *map;
564 	int err;
565 
566 	if (!bpf_capable())
567 		return ERR_PTR(-EPERM);
568 
569 	st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
570 	if (!st_ops)
571 		return ERR_PTR(-ENOTSUPP);
572 
573 	vt = st_ops->value_type;
574 	if (attr->value_size != vt->size)
575 		return ERR_PTR(-EINVAL);
576 
577 	t = st_ops->type;
578 
579 	st_map_size = sizeof(*st_map) +
580 		/* kvalue stores the
581 		 * struct bpf_struct_ops_tcp_congestions_ops
582 		 */
583 		(vt->size - sizeof(struct bpf_struct_ops_value));
584 	map_total_size = st_map_size +
585 		/* uvalue */
586 		sizeof(vt->size) +
587 		/* struct bpf_progs **progs */
588 		 btf_type_vlen(t) * sizeof(struct bpf_prog *);
589 	err = bpf_map_charge_init(&mem, map_total_size);
590 	if (err < 0)
591 		return ERR_PTR(err);
592 
593 	st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
594 	if (!st_map) {
595 		bpf_map_charge_finish(&mem);
596 		return ERR_PTR(-ENOMEM);
597 	}
598 	st_map->st_ops = st_ops;
599 	map = &st_map->map;
600 
601 	st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
602 	st_map->progs =
603 		bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_prog *),
604 				   NUMA_NO_NODE);
605 	st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
606 	if (!st_map->uvalue || !st_map->progs || !st_map->image) {
607 		bpf_struct_ops_map_free(map);
608 		bpf_map_charge_finish(&mem);
609 		return ERR_PTR(-ENOMEM);
610 	}
611 
612 	mutex_init(&st_map->lock);
613 	set_vm_flush_reset_perms(st_map->image);
614 	bpf_map_init_from_attr(map, attr);
615 	bpf_map_charge_move(&map->memory, &mem);
616 
617 	return map;
618 }
619 
620 static int bpf_struct_ops_map_btf_id;
621 const struct bpf_map_ops bpf_struct_ops_map_ops = {
622 	.map_alloc_check = bpf_struct_ops_map_alloc_check,
623 	.map_alloc = bpf_struct_ops_map_alloc,
624 	.map_free = bpf_struct_ops_map_free,
625 	.map_get_next_key = bpf_struct_ops_map_get_next_key,
626 	.map_lookup_elem = bpf_struct_ops_map_lookup_elem,
627 	.map_delete_elem = bpf_struct_ops_map_delete_elem,
628 	.map_update_elem = bpf_struct_ops_map_update_elem,
629 	.map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
630 	.map_btf_name = "bpf_struct_ops_map",
631 	.map_btf_id = &bpf_struct_ops_map_btf_id,
632 };
633 
634 /* "const void *" because some subsystem is
635  * passing a const (e.g. const struct tcp_congestion_ops *)
636  */
bpf_struct_ops_get(const void * kdata)637 bool bpf_struct_ops_get(const void *kdata)
638 {
639 	struct bpf_struct_ops_value *kvalue;
640 
641 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
642 
643 	return refcount_inc_not_zero(&kvalue->refcnt);
644 }
645 
bpf_struct_ops_put(const void * kdata)646 void bpf_struct_ops_put(const void *kdata)
647 {
648 	struct bpf_struct_ops_value *kvalue;
649 
650 	kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
651 	if (refcount_dec_and_test(&kvalue->refcnt)) {
652 		struct bpf_struct_ops_map *st_map;
653 
654 		st_map = container_of(kvalue, struct bpf_struct_ops_map,
655 				      kvalue);
656 		bpf_map_put(&st_map->map);
657 	}
658 }
659