1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3
4 #include <linux/bpf.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/btf.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13
14 enum bpf_struct_ops_state {
15 BPF_STRUCT_OPS_STATE_INIT,
16 BPF_STRUCT_OPS_STATE_INUSE,
17 BPF_STRUCT_OPS_STATE_TOBEFREE,
18 };
19
20 #define BPF_STRUCT_OPS_COMMON_VALUE \
21 refcount_t refcnt; \
22 enum bpf_struct_ops_state state
23
24 struct bpf_struct_ops_value {
25 BPF_STRUCT_OPS_COMMON_VALUE;
26 char data[] ____cacheline_aligned_in_smp;
27 };
28
29 struct bpf_struct_ops_map {
30 struct bpf_map map;
31 const struct bpf_struct_ops *st_ops;
32 /* protect map_update */
33 struct mutex lock;
34 /* progs has all the bpf_prog that is populated
35 * to the func ptr of the kernel's struct
36 * (in kvalue.data).
37 */
38 struct bpf_prog **progs;
39 /* image is a page that has all the trampolines
40 * that stores the func args before calling the bpf_prog.
41 * A PAGE_SIZE "image" is enough to store all trampoline for
42 * "progs[]".
43 */
44 void *image;
45 /* uvalue->data stores the kernel struct
46 * (e.g. tcp_congestion_ops) that is more useful
47 * to userspace than the kvalue. For example,
48 * the bpf_prog's id is stored instead of the kernel
49 * address of a func ptr.
50 */
51 struct bpf_struct_ops_value *uvalue;
52 /* kvalue.data stores the actual kernel's struct
53 * (e.g. tcp_congestion_ops) that will be
54 * registered to the kernel subsystem.
55 */
56 struct bpf_struct_ops_value kvalue;
57 };
58
59 #define VALUE_PREFIX "bpf_struct_ops_"
60 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
61
62 /* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
63 * the map's value exposed to the userspace and its btf-type-id is
64 * stored at the map->btf_vmlinux_value_type_id.
65 *
66 */
67 #define BPF_STRUCT_OPS_TYPE(_name) \
68 extern struct bpf_struct_ops bpf_##_name; \
69 \
70 struct bpf_struct_ops_##_name { \
71 BPF_STRUCT_OPS_COMMON_VALUE; \
72 struct _name data ____cacheline_aligned_in_smp; \
73 };
74 #include "bpf_struct_ops_types.h"
75 #undef BPF_STRUCT_OPS_TYPE
76
77 enum {
78 #define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
79 #include "bpf_struct_ops_types.h"
80 #undef BPF_STRUCT_OPS_TYPE
81 __NR_BPF_STRUCT_OPS_TYPE,
82 };
83
84 static struct bpf_struct_ops * const bpf_struct_ops[] = {
85 #define BPF_STRUCT_OPS_TYPE(_name) \
86 [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
87 #include "bpf_struct_ops_types.h"
88 #undef BPF_STRUCT_OPS_TYPE
89 };
90
91 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
92 };
93
94 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
95 };
96
97 static const struct btf_type *module_type;
98
bpf_struct_ops_init(struct btf * btf,struct bpf_verifier_log * log)99 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
100 {
101 s32 type_id, value_id, module_id;
102 const struct btf_member *member;
103 struct bpf_struct_ops *st_ops;
104 const struct btf_type *t;
105 char value_name[128];
106 const char *mname;
107 u32 i, j;
108
109 /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
110 #define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
111 #include "bpf_struct_ops_types.h"
112 #undef BPF_STRUCT_OPS_TYPE
113
114 module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
115 if (module_id < 0) {
116 pr_warn("Cannot find struct module in btf_vmlinux\n");
117 return;
118 }
119 module_type = btf_type_by_id(btf, module_id);
120
121 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
122 st_ops = bpf_struct_ops[i];
123
124 if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
125 sizeof(value_name)) {
126 pr_warn("struct_ops name %s is too long\n",
127 st_ops->name);
128 continue;
129 }
130 sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
131
132 value_id = btf_find_by_name_kind(btf, value_name,
133 BTF_KIND_STRUCT);
134 if (value_id < 0) {
135 pr_warn("Cannot find struct %s in btf_vmlinux\n",
136 value_name);
137 continue;
138 }
139
140 type_id = btf_find_by_name_kind(btf, st_ops->name,
141 BTF_KIND_STRUCT);
142 if (type_id < 0) {
143 pr_warn("Cannot find struct %s in btf_vmlinux\n",
144 st_ops->name);
145 continue;
146 }
147 t = btf_type_by_id(btf, type_id);
148 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
149 pr_warn("Cannot support #%u members in struct %s\n",
150 btf_type_vlen(t), st_ops->name);
151 continue;
152 }
153
154 for_each_member(j, t, member) {
155 const struct btf_type *func_proto;
156
157 mname = btf_name_by_offset(btf, member->name_off);
158 if (!*mname) {
159 pr_warn("anon member in struct %s is not supported\n",
160 st_ops->name);
161 break;
162 }
163
164 if (btf_member_bitfield_size(t, member)) {
165 pr_warn("bit field member %s in struct %s is not supported\n",
166 mname, st_ops->name);
167 break;
168 }
169
170 func_proto = btf_type_resolve_func_ptr(btf,
171 member->type,
172 NULL);
173 if (func_proto &&
174 btf_distill_func_proto(log, btf,
175 func_proto, mname,
176 &st_ops->func_models[j])) {
177 pr_warn("Error in parsing func ptr %s in struct %s\n",
178 mname, st_ops->name);
179 break;
180 }
181 }
182
183 if (j == btf_type_vlen(t)) {
184 if (st_ops->init(btf)) {
185 pr_warn("Error in init bpf_struct_ops %s\n",
186 st_ops->name);
187 } else {
188 st_ops->type_id = type_id;
189 st_ops->type = t;
190 st_ops->value_id = value_id;
191 st_ops->value_type = btf_type_by_id(btf,
192 value_id);
193 }
194 }
195 }
196 }
197
198 extern struct btf *btf_vmlinux;
199
200 static const struct bpf_struct_ops *
bpf_struct_ops_find_value(u32 value_id)201 bpf_struct_ops_find_value(u32 value_id)
202 {
203 unsigned int i;
204
205 if (!value_id || !btf_vmlinux)
206 return NULL;
207
208 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
209 if (bpf_struct_ops[i]->value_id == value_id)
210 return bpf_struct_ops[i];
211 }
212
213 return NULL;
214 }
215
bpf_struct_ops_find(u32 type_id)216 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
217 {
218 unsigned int i;
219
220 if (!type_id || !btf_vmlinux)
221 return NULL;
222
223 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
224 if (bpf_struct_ops[i]->type_id == type_id)
225 return bpf_struct_ops[i];
226 }
227
228 return NULL;
229 }
230
bpf_struct_ops_map_get_next_key(struct bpf_map * map,void * key,void * next_key)231 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
232 void *next_key)
233 {
234 if (key && *(u32 *)key == 0)
235 return -ENOENT;
236
237 *(u32 *)next_key = 0;
238 return 0;
239 }
240
bpf_struct_ops_map_sys_lookup_elem(struct bpf_map * map,void * key,void * value)241 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
242 void *value)
243 {
244 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
245 struct bpf_struct_ops_value *uvalue, *kvalue;
246 enum bpf_struct_ops_state state;
247
248 if (unlikely(*(u32 *)key != 0))
249 return -ENOENT;
250
251 kvalue = &st_map->kvalue;
252 /* Pair with smp_store_release() during map_update */
253 state = smp_load_acquire(&kvalue->state);
254 if (state == BPF_STRUCT_OPS_STATE_INIT) {
255 memset(value, 0, map->value_size);
256 return 0;
257 }
258
259 /* No lock is needed. state and refcnt do not need
260 * to be updated together under atomic context.
261 */
262 uvalue = (struct bpf_struct_ops_value *)value;
263 memcpy(uvalue, st_map->uvalue, map->value_size);
264 uvalue->state = state;
265 refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt));
266
267 return 0;
268 }
269
bpf_struct_ops_map_lookup_elem(struct bpf_map * map,void * key)270 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
271 {
272 return ERR_PTR(-EINVAL);
273 }
274
bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map * st_map)275 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
276 {
277 const struct btf_type *t = st_map->st_ops->type;
278 u32 i;
279
280 for (i = 0; i < btf_type_vlen(t); i++) {
281 if (st_map->progs[i]) {
282 bpf_prog_put(st_map->progs[i]);
283 st_map->progs[i] = NULL;
284 }
285 }
286 }
287
check_zero_holes(const struct btf_type * t,void * data)288 static int check_zero_holes(const struct btf_type *t, void *data)
289 {
290 const struct btf_member *member;
291 u32 i, moff, msize, prev_mend = 0;
292 const struct btf_type *mtype;
293
294 for_each_member(i, t, member) {
295 moff = btf_member_bit_offset(t, member) / 8;
296 if (moff > prev_mend &&
297 memchr_inv(data + prev_mend, 0, moff - prev_mend))
298 return -EINVAL;
299
300 mtype = btf_type_by_id(btf_vmlinux, member->type);
301 mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
302 if (IS_ERR(mtype))
303 return PTR_ERR(mtype);
304 prev_mend = moff + msize;
305 }
306
307 if (t->size > prev_mend &&
308 memchr_inv(data + prev_mend, 0, t->size - prev_mend))
309 return -EINVAL;
310
311 return 0;
312 }
313
bpf_struct_ops_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)314 static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
315 void *value, u64 flags)
316 {
317 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
318 const struct bpf_struct_ops *st_ops = st_map->st_ops;
319 struct bpf_struct_ops_value *uvalue, *kvalue;
320 const struct btf_member *member;
321 const struct btf_type *t = st_ops->type;
322 struct bpf_tramp_progs *tprogs = NULL;
323 void *udata, *kdata;
324 int prog_fd, err = 0;
325 void *image;
326 u32 i;
327
328 if (flags)
329 return -EINVAL;
330
331 if (*(u32 *)key != 0)
332 return -E2BIG;
333
334 err = check_zero_holes(st_ops->value_type, value);
335 if (err)
336 return err;
337
338 uvalue = (struct bpf_struct_ops_value *)value;
339 err = check_zero_holes(t, uvalue->data);
340 if (err)
341 return err;
342
343 if (uvalue->state || refcount_read(&uvalue->refcnt))
344 return -EINVAL;
345
346 tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
347 if (!tprogs)
348 return -ENOMEM;
349
350 uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
351 kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
352
353 mutex_lock(&st_map->lock);
354
355 if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
356 err = -EBUSY;
357 goto unlock;
358 }
359
360 memcpy(uvalue, value, map->value_size);
361
362 udata = &uvalue->data;
363 kdata = &kvalue->data;
364 image = st_map->image;
365
366 for_each_member(i, t, member) {
367 const struct btf_type *mtype, *ptype;
368 struct bpf_prog *prog;
369 u32 moff;
370 u32 flags;
371
372 moff = btf_member_bit_offset(t, member) / 8;
373 ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
374 if (ptype == module_type) {
375 if (*(void **)(udata + moff))
376 goto reset_unlock;
377 *(void **)(kdata + moff) = BPF_MODULE_OWNER;
378 continue;
379 }
380
381 err = st_ops->init_member(t, member, kdata, udata);
382 if (err < 0)
383 goto reset_unlock;
384
385 /* The ->init_member() has handled this member */
386 if (err > 0)
387 continue;
388
389 /* If st_ops->init_member does not handle it,
390 * we will only handle func ptrs and zero-ed members
391 * here. Reject everything else.
392 */
393
394 /* All non func ptr member must be 0 */
395 if (!ptype || !btf_type_is_func_proto(ptype)) {
396 u32 msize;
397
398 mtype = btf_type_by_id(btf_vmlinux, member->type);
399 mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
400 if (IS_ERR(mtype)) {
401 err = PTR_ERR(mtype);
402 goto reset_unlock;
403 }
404
405 if (memchr_inv(udata + moff, 0, msize)) {
406 err = -EINVAL;
407 goto reset_unlock;
408 }
409
410 continue;
411 }
412
413 prog_fd = (int)(*(unsigned long *)(udata + moff));
414 /* Similar check as the attr->attach_prog_fd */
415 if (!prog_fd)
416 continue;
417
418 prog = bpf_prog_get(prog_fd);
419 if (IS_ERR(prog)) {
420 err = PTR_ERR(prog);
421 goto reset_unlock;
422 }
423 st_map->progs[i] = prog;
424
425 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
426 prog->aux->attach_btf_id != st_ops->type_id ||
427 prog->expected_attach_type != i) {
428 err = -EINVAL;
429 goto reset_unlock;
430 }
431
432 tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
433 tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
434 flags = st_ops->func_models[i].ret_size > 0 ?
435 BPF_TRAMP_F_RET_FENTRY_RET : 0;
436 err = arch_prepare_bpf_trampoline(NULL, image,
437 st_map->image + PAGE_SIZE,
438 &st_ops->func_models[i],
439 flags, tprogs, NULL);
440 if (err < 0)
441 goto reset_unlock;
442
443 *(void **)(kdata + moff) = image;
444 image += err;
445
446 /* put prog_id to udata */
447 *(unsigned long *)(udata + moff) = prog->aux->id;
448 }
449
450 refcount_set(&kvalue->refcnt, 1);
451 bpf_map_inc(map);
452
453 set_memory_ro((long)st_map->image, 1);
454 set_memory_x((long)st_map->image, 1);
455 err = st_ops->reg(kdata);
456 if (likely(!err)) {
457 /* Pair with smp_load_acquire() during lookup_elem().
458 * It ensures the above udata updates (e.g. prog->aux->id)
459 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
460 */
461 smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
462 goto unlock;
463 }
464
465 /* Error during st_ops->reg(). It is very unlikely since
466 * the above init_member() should have caught it earlier
467 * before reg(). The only possibility is if there was a race
468 * in registering the struct_ops (under the same name) to
469 * a sub-system through different struct_ops's maps.
470 */
471 set_memory_nx((long)st_map->image, 1);
472 set_memory_rw((long)st_map->image, 1);
473 bpf_map_put(map);
474
475 reset_unlock:
476 bpf_struct_ops_map_put_progs(st_map);
477 memset(uvalue, 0, map->value_size);
478 memset(kvalue, 0, map->value_size);
479 unlock:
480 kfree(tprogs);
481 mutex_unlock(&st_map->lock);
482 return err;
483 }
484
bpf_struct_ops_map_delete_elem(struct bpf_map * map,void * key)485 static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
486 {
487 enum bpf_struct_ops_state prev_state;
488 struct bpf_struct_ops_map *st_map;
489
490 st_map = (struct bpf_struct_ops_map *)map;
491 prev_state = cmpxchg(&st_map->kvalue.state,
492 BPF_STRUCT_OPS_STATE_INUSE,
493 BPF_STRUCT_OPS_STATE_TOBEFREE);
494 switch (prev_state) {
495 case BPF_STRUCT_OPS_STATE_INUSE:
496 st_map->st_ops->unreg(&st_map->kvalue.data);
497 if (refcount_dec_and_test(&st_map->kvalue.refcnt))
498 bpf_map_put(map);
499 return 0;
500 case BPF_STRUCT_OPS_STATE_TOBEFREE:
501 return -EINPROGRESS;
502 case BPF_STRUCT_OPS_STATE_INIT:
503 return -ENOENT;
504 default:
505 WARN_ON_ONCE(1);
506 /* Should never happen. Treat it as not found. */
507 return -ENOENT;
508 }
509 }
510
bpf_struct_ops_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)511 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
512 struct seq_file *m)
513 {
514 void *value;
515 int err;
516
517 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
518 if (!value)
519 return;
520
521 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
522 if (!err) {
523 btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
524 value, m);
525 seq_puts(m, "\n");
526 }
527
528 kfree(value);
529 }
530
bpf_struct_ops_map_free(struct bpf_map * map)531 static void bpf_struct_ops_map_free(struct bpf_map *map)
532 {
533 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
534
535 if (st_map->progs)
536 bpf_struct_ops_map_put_progs(st_map);
537 bpf_map_area_free(st_map->progs);
538 bpf_jit_free_exec(st_map->image);
539 bpf_map_area_free(st_map->uvalue);
540 bpf_map_area_free(st_map);
541 }
542
bpf_struct_ops_map_alloc_check(union bpf_attr * attr)543 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
544 {
545 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
546 attr->map_flags || !attr->btf_vmlinux_value_type_id)
547 return -EINVAL;
548 return 0;
549 }
550
bpf_struct_ops_map_alloc(union bpf_attr * attr)551 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
552 {
553 const struct bpf_struct_ops *st_ops;
554 size_t map_total_size, st_map_size;
555 struct bpf_struct_ops_map *st_map;
556 const struct btf_type *t, *vt;
557 struct bpf_map_memory mem;
558 struct bpf_map *map;
559 int err;
560
561 if (!bpf_capable())
562 return ERR_PTR(-EPERM);
563
564 st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
565 if (!st_ops)
566 return ERR_PTR(-ENOTSUPP);
567
568 vt = st_ops->value_type;
569 if (attr->value_size != vt->size)
570 return ERR_PTR(-EINVAL);
571
572 t = st_ops->type;
573
574 st_map_size = sizeof(*st_map) +
575 /* kvalue stores the
576 * struct bpf_struct_ops_tcp_congestions_ops
577 */
578 (vt->size - sizeof(struct bpf_struct_ops_value));
579 map_total_size = st_map_size +
580 /* uvalue */
581 sizeof(vt->size) +
582 /* struct bpf_progs **progs */
583 btf_type_vlen(t) * sizeof(struct bpf_prog *);
584 err = bpf_map_charge_init(&mem, map_total_size);
585 if (err < 0)
586 return ERR_PTR(err);
587
588 st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
589 if (!st_map) {
590 bpf_map_charge_finish(&mem);
591 return ERR_PTR(-ENOMEM);
592 }
593 st_map->st_ops = st_ops;
594 map = &st_map->map;
595
596 st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
597 st_map->progs =
598 bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_prog *),
599 NUMA_NO_NODE);
600 st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
601 if (!st_map->uvalue || !st_map->progs || !st_map->image) {
602 bpf_struct_ops_map_free(map);
603 bpf_map_charge_finish(&mem);
604 return ERR_PTR(-ENOMEM);
605 }
606
607 mutex_init(&st_map->lock);
608 set_vm_flush_reset_perms(st_map->image);
609 bpf_map_init_from_attr(map, attr);
610 bpf_map_charge_move(&map->memory, &mem);
611
612 return map;
613 }
614
615 static int bpf_struct_ops_map_btf_id;
616 const struct bpf_map_ops bpf_struct_ops_map_ops = {
617 .map_alloc_check = bpf_struct_ops_map_alloc_check,
618 .map_alloc = bpf_struct_ops_map_alloc,
619 .map_free = bpf_struct_ops_map_free,
620 .map_get_next_key = bpf_struct_ops_map_get_next_key,
621 .map_lookup_elem = bpf_struct_ops_map_lookup_elem,
622 .map_delete_elem = bpf_struct_ops_map_delete_elem,
623 .map_update_elem = bpf_struct_ops_map_update_elem,
624 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
625 .map_btf_name = "bpf_struct_ops_map",
626 .map_btf_id = &bpf_struct_ops_map_btf_id,
627 };
628
629 /* "const void *" because some subsystem is
630 * passing a const (e.g. const struct tcp_congestion_ops *)
631 */
bpf_struct_ops_get(const void * kdata)632 bool bpf_struct_ops_get(const void *kdata)
633 {
634 struct bpf_struct_ops_value *kvalue;
635
636 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
637
638 return refcount_inc_not_zero(&kvalue->refcnt);
639 }
640
bpf_struct_ops_put(const void * kdata)641 void bpf_struct_ops_put(const void *kdata)
642 {
643 struct bpf_struct_ops_value *kvalue;
644
645 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
646 if (refcount_dec_and_test(&kvalue->refcnt)) {
647 struct bpf_struct_ops_map *st_map;
648
649 st_map = container_of(kvalue, struct bpf_struct_ops_map,
650 kvalue);
651 bpf_map_put(&st_map->map);
652 }
653 }
654