1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3
4 #include <linux/bpf.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/btf.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13
14 enum bpf_struct_ops_state {
15 BPF_STRUCT_OPS_STATE_INIT,
16 BPF_STRUCT_OPS_STATE_INUSE,
17 BPF_STRUCT_OPS_STATE_TOBEFREE,
18 };
19
20 #define BPF_STRUCT_OPS_COMMON_VALUE \
21 refcount_t refcnt; \
22 enum bpf_struct_ops_state state
23
24 struct bpf_struct_ops_value {
25 BPF_STRUCT_OPS_COMMON_VALUE;
26 char data[] ____cacheline_aligned_in_smp;
27 };
28
29 struct bpf_struct_ops_map {
30 struct bpf_map map;
31 struct rcu_head rcu;
32 const struct bpf_struct_ops *st_ops;
33 /* protect map_update */
34 struct mutex lock;
35 /* progs has all the bpf_prog that is populated
36 * to the func ptr of the kernel's struct
37 * (in kvalue.data).
38 */
39 struct bpf_prog **progs;
40 /* image is a page that has all the trampolines
41 * that stores the func args before calling the bpf_prog.
42 * A PAGE_SIZE "image" is enough to store all trampoline for
43 * "progs[]".
44 */
45 void *image;
46 /* uvalue->data stores the kernel struct
47 * (e.g. tcp_congestion_ops) that is more useful
48 * to userspace than the kvalue. For example,
49 * the bpf_prog's id is stored instead of the kernel
50 * address of a func ptr.
51 */
52 struct bpf_struct_ops_value *uvalue;
53 /* kvalue.data stores the actual kernel's struct
54 * (e.g. tcp_congestion_ops) that will be
55 * registered to the kernel subsystem.
56 */
57 struct bpf_struct_ops_value kvalue;
58 };
59
60 #define VALUE_PREFIX "bpf_struct_ops_"
61 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
62
63 /* bpf_struct_ops_##_name (e.g. bpf_struct_ops_tcp_congestion_ops) is
64 * the map's value exposed to the userspace and its btf-type-id is
65 * stored at the map->btf_vmlinux_value_type_id.
66 *
67 */
68 #define BPF_STRUCT_OPS_TYPE(_name) \
69 extern struct bpf_struct_ops bpf_##_name; \
70 \
71 struct bpf_struct_ops_##_name { \
72 BPF_STRUCT_OPS_COMMON_VALUE; \
73 struct _name data ____cacheline_aligned_in_smp; \
74 };
75 #include "bpf_struct_ops_types.h"
76 #undef BPF_STRUCT_OPS_TYPE
77
78 enum {
79 #define BPF_STRUCT_OPS_TYPE(_name) BPF_STRUCT_OPS_TYPE_##_name,
80 #include "bpf_struct_ops_types.h"
81 #undef BPF_STRUCT_OPS_TYPE
82 __NR_BPF_STRUCT_OPS_TYPE,
83 };
84
85 static struct bpf_struct_ops * const bpf_struct_ops[] = {
86 #define BPF_STRUCT_OPS_TYPE(_name) \
87 [BPF_STRUCT_OPS_TYPE_##_name] = &bpf_##_name,
88 #include "bpf_struct_ops_types.h"
89 #undef BPF_STRUCT_OPS_TYPE
90 };
91
92 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
93 };
94
95 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
96 };
97
98 static const struct btf_type *module_type;
99
bpf_struct_ops_init(struct btf * btf,struct bpf_verifier_log * log)100 void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log)
101 {
102 s32 type_id, value_id, module_id;
103 const struct btf_member *member;
104 struct bpf_struct_ops *st_ops;
105 const struct btf_type *t;
106 char value_name[128];
107 const char *mname;
108 u32 i, j;
109
110 /* Ensure BTF type is emitted for "struct bpf_struct_ops_##_name" */
111 #define BPF_STRUCT_OPS_TYPE(_name) BTF_TYPE_EMIT(struct bpf_struct_ops_##_name);
112 #include "bpf_struct_ops_types.h"
113 #undef BPF_STRUCT_OPS_TYPE
114
115 module_id = btf_find_by_name_kind(btf, "module", BTF_KIND_STRUCT);
116 if (module_id < 0) {
117 pr_warn("Cannot find struct module in btf_vmlinux\n");
118 return;
119 }
120 module_type = btf_type_by_id(btf, module_id);
121
122 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
123 st_ops = bpf_struct_ops[i];
124
125 if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
126 sizeof(value_name)) {
127 pr_warn("struct_ops name %s is too long\n",
128 st_ops->name);
129 continue;
130 }
131 sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
132
133 value_id = btf_find_by_name_kind(btf, value_name,
134 BTF_KIND_STRUCT);
135 if (value_id < 0) {
136 pr_warn("Cannot find struct %s in btf_vmlinux\n",
137 value_name);
138 continue;
139 }
140
141 type_id = btf_find_by_name_kind(btf, st_ops->name,
142 BTF_KIND_STRUCT);
143 if (type_id < 0) {
144 pr_warn("Cannot find struct %s in btf_vmlinux\n",
145 st_ops->name);
146 continue;
147 }
148 t = btf_type_by_id(btf, type_id);
149 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
150 pr_warn("Cannot support #%u members in struct %s\n",
151 btf_type_vlen(t), st_ops->name);
152 continue;
153 }
154
155 for_each_member(j, t, member) {
156 const struct btf_type *func_proto;
157
158 mname = btf_name_by_offset(btf, member->name_off);
159 if (!*mname) {
160 pr_warn("anon member in struct %s is not supported\n",
161 st_ops->name);
162 break;
163 }
164
165 if (btf_member_bitfield_size(t, member)) {
166 pr_warn("bit field member %s in struct %s is not supported\n",
167 mname, st_ops->name);
168 break;
169 }
170
171 func_proto = btf_type_resolve_func_ptr(btf,
172 member->type,
173 NULL);
174 if (func_proto &&
175 btf_distill_func_proto(log, btf,
176 func_proto, mname,
177 &st_ops->func_models[j])) {
178 pr_warn("Error in parsing func ptr %s in struct %s\n",
179 mname, st_ops->name);
180 break;
181 }
182 }
183
184 if (j == btf_type_vlen(t)) {
185 if (st_ops->init(btf)) {
186 pr_warn("Error in init bpf_struct_ops %s\n",
187 st_ops->name);
188 } else {
189 st_ops->type_id = type_id;
190 st_ops->type = t;
191 st_ops->value_id = value_id;
192 st_ops->value_type = btf_type_by_id(btf,
193 value_id);
194 }
195 }
196 }
197 }
198
199 extern struct btf *btf_vmlinux;
200
201 static const struct bpf_struct_ops *
bpf_struct_ops_find_value(u32 value_id)202 bpf_struct_ops_find_value(u32 value_id)
203 {
204 unsigned int i;
205
206 if (!value_id || !btf_vmlinux)
207 return NULL;
208
209 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
210 if (bpf_struct_ops[i]->value_id == value_id)
211 return bpf_struct_ops[i];
212 }
213
214 return NULL;
215 }
216
bpf_struct_ops_find(u32 type_id)217 const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
218 {
219 unsigned int i;
220
221 if (!type_id || !btf_vmlinux)
222 return NULL;
223
224 for (i = 0; i < ARRAY_SIZE(bpf_struct_ops); i++) {
225 if (bpf_struct_ops[i]->type_id == type_id)
226 return bpf_struct_ops[i];
227 }
228
229 return NULL;
230 }
231
bpf_struct_ops_map_get_next_key(struct bpf_map * map,void * key,void * next_key)232 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
233 void *next_key)
234 {
235 if (key && *(u32 *)key == 0)
236 return -ENOENT;
237
238 *(u32 *)next_key = 0;
239 return 0;
240 }
241
bpf_struct_ops_map_sys_lookup_elem(struct bpf_map * map,void * key,void * value)242 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
243 void *value)
244 {
245 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
246 struct bpf_struct_ops_value *uvalue, *kvalue;
247 enum bpf_struct_ops_state state;
248
249 if (unlikely(*(u32 *)key != 0))
250 return -ENOENT;
251
252 kvalue = &st_map->kvalue;
253 /* Pair with smp_store_release() during map_update */
254 state = smp_load_acquire(&kvalue->state);
255 if (state == BPF_STRUCT_OPS_STATE_INIT) {
256 memset(value, 0, map->value_size);
257 return 0;
258 }
259
260 /* No lock is needed. state and refcnt do not need
261 * to be updated together under atomic context.
262 */
263 uvalue = (struct bpf_struct_ops_value *)value;
264 memcpy(uvalue, st_map->uvalue, map->value_size);
265 uvalue->state = state;
266 refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt));
267
268 return 0;
269 }
270
bpf_struct_ops_map_lookup_elem(struct bpf_map * map,void * key)271 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
272 {
273 return ERR_PTR(-EINVAL);
274 }
275
bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map * st_map)276 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
277 {
278 const struct btf_type *t = st_map->st_ops->type;
279 u32 i;
280
281 for (i = 0; i < btf_type_vlen(t); i++) {
282 if (st_map->progs[i]) {
283 bpf_prog_put(st_map->progs[i]);
284 st_map->progs[i] = NULL;
285 }
286 }
287 }
288
check_zero_holes(const struct btf_type * t,void * data)289 static int check_zero_holes(const struct btf_type *t, void *data)
290 {
291 const struct btf_member *member;
292 u32 i, moff, msize, prev_mend = 0;
293 const struct btf_type *mtype;
294
295 for_each_member(i, t, member) {
296 moff = btf_member_bit_offset(t, member) / 8;
297 if (moff > prev_mend &&
298 memchr_inv(data + prev_mend, 0, moff - prev_mend))
299 return -EINVAL;
300
301 mtype = btf_type_by_id(btf_vmlinux, member->type);
302 mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
303 if (IS_ERR(mtype))
304 return PTR_ERR(mtype);
305 prev_mend = moff + msize;
306 }
307
308 if (t->size > prev_mend &&
309 memchr_inv(data + prev_mend, 0, t->size - prev_mend))
310 return -EINVAL;
311
312 return 0;
313 }
314
bpf_struct_ops_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)315 static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
316 void *value, u64 flags)
317 {
318 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
319 const struct bpf_struct_ops *st_ops = st_map->st_ops;
320 struct bpf_struct_ops_value *uvalue, *kvalue;
321 const struct btf_member *member;
322 const struct btf_type *t = st_ops->type;
323 struct bpf_tramp_progs *tprogs = NULL;
324 void *udata, *kdata;
325 int prog_fd, err = 0;
326 void *image;
327 u32 i;
328
329 if (flags)
330 return -EINVAL;
331
332 if (*(u32 *)key != 0)
333 return -E2BIG;
334
335 err = check_zero_holes(st_ops->value_type, value);
336 if (err)
337 return err;
338
339 uvalue = (struct bpf_struct_ops_value *)value;
340 err = check_zero_holes(t, uvalue->data);
341 if (err)
342 return err;
343
344 if (uvalue->state || refcount_read(&uvalue->refcnt))
345 return -EINVAL;
346
347 tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
348 if (!tprogs)
349 return -ENOMEM;
350
351 uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
352 kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
353
354 mutex_lock(&st_map->lock);
355
356 if (kvalue->state != BPF_STRUCT_OPS_STATE_INIT) {
357 err = -EBUSY;
358 goto unlock;
359 }
360
361 memcpy(uvalue, value, map->value_size);
362
363 udata = &uvalue->data;
364 kdata = &kvalue->data;
365 image = st_map->image;
366
367 for_each_member(i, t, member) {
368 const struct btf_type *mtype, *ptype;
369 struct bpf_prog *prog;
370 u32 moff;
371 u32 flags;
372
373 moff = btf_member_bit_offset(t, member) / 8;
374 ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
375 if (ptype == module_type) {
376 if (*(void **)(udata + moff))
377 goto reset_unlock;
378 *(void **)(kdata + moff) = BPF_MODULE_OWNER;
379 continue;
380 }
381
382 err = st_ops->init_member(t, member, kdata, udata);
383 if (err < 0)
384 goto reset_unlock;
385
386 /* The ->init_member() has handled this member */
387 if (err > 0)
388 continue;
389
390 /* If st_ops->init_member does not handle it,
391 * we will only handle func ptrs and zero-ed members
392 * here. Reject everything else.
393 */
394
395 /* All non func ptr member must be 0 */
396 if (!ptype || !btf_type_is_func_proto(ptype)) {
397 u32 msize;
398
399 mtype = btf_type_by_id(btf_vmlinux, member->type);
400 mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
401 if (IS_ERR(mtype)) {
402 err = PTR_ERR(mtype);
403 goto reset_unlock;
404 }
405
406 if (memchr_inv(udata + moff, 0, msize)) {
407 err = -EINVAL;
408 goto reset_unlock;
409 }
410
411 continue;
412 }
413
414 prog_fd = (int)(*(unsigned long *)(udata + moff));
415 /* Similar check as the attr->attach_prog_fd */
416 if (!prog_fd)
417 continue;
418
419 prog = bpf_prog_get(prog_fd);
420 if (IS_ERR(prog)) {
421 err = PTR_ERR(prog);
422 goto reset_unlock;
423 }
424 st_map->progs[i] = prog;
425
426 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
427 prog->aux->attach_btf_id != st_ops->type_id ||
428 prog->expected_attach_type != i) {
429 err = -EINVAL;
430 goto reset_unlock;
431 }
432
433 tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
434 tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
435 flags = st_ops->func_models[i].ret_size > 0 ?
436 BPF_TRAMP_F_RET_FENTRY_RET : 0;
437 err = arch_prepare_bpf_trampoline(NULL, image,
438 st_map->image + PAGE_SIZE,
439 &st_ops->func_models[i],
440 flags, tprogs, NULL);
441 if (err < 0)
442 goto reset_unlock;
443
444 *(void **)(kdata + moff) = image;
445 image += err;
446
447 /* put prog_id to udata */
448 *(unsigned long *)(udata + moff) = prog->aux->id;
449 }
450
451 refcount_set(&kvalue->refcnt, 1);
452 bpf_map_inc(map);
453
454 set_memory_ro((long)st_map->image, 1);
455 set_memory_x((long)st_map->image, 1);
456 err = st_ops->reg(kdata);
457 if (likely(!err)) {
458 /* Pair with smp_load_acquire() during lookup_elem().
459 * It ensures the above udata updates (e.g. prog->aux->id)
460 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
461 */
462 smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_INUSE);
463 goto unlock;
464 }
465
466 /* Error during st_ops->reg(). It is very unlikely since
467 * the above init_member() should have caught it earlier
468 * before reg(). The only possibility is if there was a race
469 * in registering the struct_ops (under the same name) to
470 * a sub-system through different struct_ops's maps.
471 */
472 set_memory_nx((long)st_map->image, 1);
473 set_memory_rw((long)st_map->image, 1);
474 bpf_map_put(map);
475
476 reset_unlock:
477 bpf_struct_ops_map_put_progs(st_map);
478 memset(uvalue, 0, map->value_size);
479 memset(kvalue, 0, map->value_size);
480 unlock:
481 kfree(tprogs);
482 mutex_unlock(&st_map->lock);
483 return err;
484 }
485
bpf_struct_ops_map_delete_elem(struct bpf_map * map,void * key)486 static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
487 {
488 enum bpf_struct_ops_state prev_state;
489 struct bpf_struct_ops_map *st_map;
490
491 st_map = (struct bpf_struct_ops_map *)map;
492 prev_state = cmpxchg(&st_map->kvalue.state,
493 BPF_STRUCT_OPS_STATE_INUSE,
494 BPF_STRUCT_OPS_STATE_TOBEFREE);
495 switch (prev_state) {
496 case BPF_STRUCT_OPS_STATE_INUSE:
497 st_map->st_ops->unreg(&st_map->kvalue.data);
498 if (refcount_dec_and_test(&st_map->kvalue.refcnt))
499 bpf_map_put(map);
500 return 0;
501 case BPF_STRUCT_OPS_STATE_TOBEFREE:
502 return -EINPROGRESS;
503 case BPF_STRUCT_OPS_STATE_INIT:
504 return -ENOENT;
505 default:
506 WARN_ON_ONCE(1);
507 /* Should never happen. Treat it as not found. */
508 return -ENOENT;
509 }
510 }
511
bpf_struct_ops_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)512 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
513 struct seq_file *m)
514 {
515 void *value;
516 int err;
517
518 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
519 if (!value)
520 return;
521
522 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
523 if (!err) {
524 btf_type_seq_show(btf_vmlinux, map->btf_vmlinux_value_type_id,
525 value, m);
526 seq_puts(m, "\n");
527 }
528
529 kfree(value);
530 }
531
bpf_struct_ops_map_free(struct bpf_map * map)532 static void bpf_struct_ops_map_free(struct bpf_map *map)
533 {
534 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
535
536 if (st_map->progs)
537 bpf_struct_ops_map_put_progs(st_map);
538 bpf_map_area_free(st_map->progs);
539 bpf_jit_free_exec(st_map->image);
540 bpf_map_area_free(st_map->uvalue);
541 bpf_map_area_free(st_map);
542 }
543
bpf_struct_ops_map_alloc_check(union bpf_attr * attr)544 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
545 {
546 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
547 attr->map_flags || !attr->btf_vmlinux_value_type_id)
548 return -EINVAL;
549 return 0;
550 }
551
bpf_struct_ops_map_alloc(union bpf_attr * attr)552 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
553 {
554 const struct bpf_struct_ops *st_ops;
555 size_t st_map_size;
556 struct bpf_struct_ops_map *st_map;
557 const struct btf_type *t, *vt;
558 struct bpf_map *map;
559
560 if (!bpf_capable())
561 return ERR_PTR(-EPERM);
562
563 st_ops = bpf_struct_ops_find_value(attr->btf_vmlinux_value_type_id);
564 if (!st_ops)
565 return ERR_PTR(-ENOTSUPP);
566
567 vt = st_ops->value_type;
568 if (attr->value_size != vt->size)
569 return ERR_PTR(-EINVAL);
570
571 t = st_ops->type;
572
573 st_map_size = sizeof(*st_map) +
574 /* kvalue stores the
575 * struct bpf_struct_ops_tcp_congestions_ops
576 */
577 (vt->size - sizeof(struct bpf_struct_ops_value));
578
579 st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
580 if (!st_map)
581 return ERR_PTR(-ENOMEM);
582
583 st_map->st_ops = st_ops;
584 map = &st_map->map;
585
586 st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
587 st_map->progs =
588 bpf_map_area_alloc(btf_type_vlen(t) * sizeof(struct bpf_prog *),
589 NUMA_NO_NODE);
590 st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
591 if (!st_map->uvalue || !st_map->progs || !st_map->image) {
592 bpf_struct_ops_map_free(map);
593 return ERR_PTR(-ENOMEM);
594 }
595
596 mutex_init(&st_map->lock);
597 set_vm_flush_reset_perms(st_map->image);
598 bpf_map_init_from_attr(map, attr);
599
600 return map;
601 }
602
603 static int bpf_struct_ops_map_btf_id;
604 const struct bpf_map_ops bpf_struct_ops_map_ops = {
605 .map_alloc_check = bpf_struct_ops_map_alloc_check,
606 .map_alloc = bpf_struct_ops_map_alloc,
607 .map_free = bpf_struct_ops_map_free,
608 .map_get_next_key = bpf_struct_ops_map_get_next_key,
609 .map_lookup_elem = bpf_struct_ops_map_lookup_elem,
610 .map_delete_elem = bpf_struct_ops_map_delete_elem,
611 .map_update_elem = bpf_struct_ops_map_update_elem,
612 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
613 .map_btf_name = "bpf_struct_ops_map",
614 .map_btf_id = &bpf_struct_ops_map_btf_id,
615 };
616
617 /* "const void *" because some subsystem is
618 * passing a const (e.g. const struct tcp_congestion_ops *)
619 */
bpf_struct_ops_get(const void * kdata)620 bool bpf_struct_ops_get(const void *kdata)
621 {
622 struct bpf_struct_ops_value *kvalue;
623
624 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
625
626 return refcount_inc_not_zero(&kvalue->refcnt);
627 }
628
bpf_struct_ops_put_rcu(struct rcu_head * head)629 static void bpf_struct_ops_put_rcu(struct rcu_head *head)
630 {
631 struct bpf_struct_ops_map *st_map;
632
633 st_map = container_of(head, struct bpf_struct_ops_map, rcu);
634 bpf_map_put(&st_map->map);
635 }
636
bpf_struct_ops_put(const void * kdata)637 void bpf_struct_ops_put(const void *kdata)
638 {
639 struct bpf_struct_ops_value *kvalue;
640
641 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
642 if (refcount_dec_and_test(&kvalue->refcnt)) {
643 struct bpf_struct_ops_map *st_map;
644
645 st_map = container_of(kvalue, struct bpf_struct_ops_map,
646 kvalue);
647 /* The struct_ops's function may switch to another struct_ops.
648 *
649 * For example, bpf_tcp_cc_x->init() may switch to
650 * another tcp_cc_y by calling
651 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
652 * During the switch, bpf_struct_ops_put(tcp_cc_x) is called
653 * and its map->refcnt may reach 0 which then free its
654 * trampoline image while tcp_cc_x is still running.
655 *
656 * Thus, a rcu grace period is needed here.
657 */
658 call_rcu(&st_map->rcu, bpf_struct_ops_put_rcu);
659 }
660 }
661