1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3
4 #include <linux/bpf.h>
5 #include <linux/bpf_verifier.h>
6 #include <linux/btf.h>
7 #include <linux/filter.h>
8 #include <linux/slab.h>
9 #include <linux/numa.h>
10 #include <linux/seq_file.h>
11 #include <linux/refcount.h>
12 #include <linux/mutex.h>
13 #include <linux/btf_ids.h>
14 #include <linux/rcupdate_wait.h>
15 #include <linux/poll.h>
16
17 struct bpf_struct_ops_value {
18 struct bpf_struct_ops_common_value common;
19 char data[] ____cacheline_aligned_in_smp;
20 };
21
22 #define MAX_TRAMP_IMAGE_PAGES 8
23
24 struct bpf_struct_ops_map {
25 struct bpf_map map;
26 struct rcu_head rcu;
27 const struct bpf_struct_ops_desc *st_ops_desc;
28 /* protect map_update */
29 struct mutex lock;
30 /* link has all the bpf_links that is populated
31 * to the func ptr of the kernel's struct
32 * (in kvalue.data).
33 */
34 struct bpf_link **links;
35 /* ksyms for bpf trampolines */
36 struct bpf_ksym **ksyms;
37 u32 funcs_cnt;
38 u32 image_pages_cnt;
39 /* image_pages is an array of pages that has all the trampolines
40 * that stores the func args before calling the bpf_prog.
41 */
42 void *image_pages[MAX_TRAMP_IMAGE_PAGES];
43 /* The owner moduler's btf. */
44 struct btf *btf;
45 /* uvalue->data stores the kernel struct
46 * (e.g. tcp_congestion_ops) that is more useful
47 * to userspace than the kvalue. For example,
48 * the bpf_prog's id is stored instead of the kernel
49 * address of a func ptr.
50 */
51 struct bpf_struct_ops_value *uvalue;
52 /* kvalue.data stores the actual kernel's struct
53 * (e.g. tcp_congestion_ops) that will be
54 * registered to the kernel subsystem.
55 */
56 struct bpf_struct_ops_value kvalue;
57 };
58
59 struct bpf_struct_ops_link {
60 struct bpf_link link;
61 struct bpf_map __rcu *map;
62 wait_queue_head_t wait_hup;
63 };
64
65 static DEFINE_MUTEX(update_mutex);
66
67 #define VALUE_PREFIX "bpf_struct_ops_"
68 #define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
69
70 const struct bpf_verifier_ops bpf_struct_ops_verifier_ops = {
71 };
72
73 const struct bpf_prog_ops bpf_struct_ops_prog_ops = {
74 #ifdef CONFIG_NET
75 .test_run = bpf_struct_ops_test_run,
76 #endif
77 };
78
79 BTF_ID_LIST(st_ops_ids)
80 BTF_ID(struct, module)
81 BTF_ID(struct, bpf_struct_ops_common_value)
82
83 enum {
84 IDX_MODULE_ID,
85 IDX_ST_OPS_COMMON_VALUE_ID,
86 };
87
88 extern struct btf *btf_vmlinux;
89
is_valid_value_type(struct btf * btf,s32 value_id,const struct btf_type * type,const char * value_name)90 static bool is_valid_value_type(struct btf *btf, s32 value_id,
91 const struct btf_type *type,
92 const char *value_name)
93 {
94 const struct btf_type *common_value_type;
95 const struct btf_member *member;
96 const struct btf_type *vt, *mt;
97
98 vt = btf_type_by_id(btf, value_id);
99 if (btf_vlen(vt) != 2) {
100 pr_warn("The number of %s's members should be 2, but we get %d\n",
101 value_name, btf_vlen(vt));
102 return false;
103 }
104 member = btf_type_member(vt);
105 mt = btf_type_by_id(btf, member->type);
106 common_value_type = btf_type_by_id(btf_vmlinux,
107 st_ops_ids[IDX_ST_OPS_COMMON_VALUE_ID]);
108 if (mt != common_value_type) {
109 pr_warn("The first member of %s should be bpf_struct_ops_common_value\n",
110 value_name);
111 return false;
112 }
113 member++;
114 mt = btf_type_by_id(btf, member->type);
115 if (mt != type) {
116 pr_warn("The second member of %s should be %s\n",
117 value_name, btf_name_by_offset(btf, type->name_off));
118 return false;
119 }
120
121 return true;
122 }
123
bpf_struct_ops_image_alloc(void)124 static void *bpf_struct_ops_image_alloc(void)
125 {
126 void *image;
127 int err;
128
129 err = bpf_jit_charge_modmem(PAGE_SIZE);
130 if (err)
131 return ERR_PTR(err);
132 image = arch_alloc_bpf_trampoline(PAGE_SIZE);
133 if (!image) {
134 bpf_jit_uncharge_modmem(PAGE_SIZE);
135 return ERR_PTR(-ENOMEM);
136 }
137
138 return image;
139 }
140
bpf_struct_ops_image_free(void * image)141 void bpf_struct_ops_image_free(void *image)
142 {
143 if (image) {
144 arch_free_bpf_trampoline(image, PAGE_SIZE);
145 bpf_jit_uncharge_modmem(PAGE_SIZE);
146 }
147 }
148
149 #define MAYBE_NULL_SUFFIX "__nullable"
150
151 /* Prepare argument info for every nullable argument of a member of a
152 * struct_ops type.
153 *
154 * Initialize a struct bpf_struct_ops_arg_info according to type info of
155 * the arguments of a stub function. (Check kCFI for more information about
156 * stub functions.)
157 *
158 * Each member in the struct_ops type has a struct bpf_struct_ops_arg_info
159 * to provide an array of struct bpf_ctx_arg_aux, which in turn provides
160 * the information that used by the verifier to check the arguments of the
161 * BPF struct_ops program assigned to the member. Here, we only care about
162 * the arguments that are marked as __nullable.
163 *
164 * The array of struct bpf_ctx_arg_aux is eventually assigned to
165 * prog->aux->ctx_arg_info of BPF struct_ops programs and passed to the
166 * verifier. (See check_struct_ops_btf_id())
167 *
168 * arg_info->info will be the list of struct bpf_ctx_arg_aux if success. If
169 * fails, it will be kept untouched.
170 */
prepare_arg_info(struct btf * btf,const char * st_ops_name,const char * member_name,const struct btf_type * func_proto,void * stub_func_addr,struct bpf_struct_ops_arg_info * arg_info)171 static int prepare_arg_info(struct btf *btf,
172 const char *st_ops_name,
173 const char *member_name,
174 const struct btf_type *func_proto, void *stub_func_addr,
175 struct bpf_struct_ops_arg_info *arg_info)
176 {
177 const struct btf_type *stub_func_proto, *pointed_type;
178 const struct btf_param *stub_args, *args;
179 struct bpf_ctx_arg_aux *info, *info_buf;
180 u32 nargs, arg_no, info_cnt = 0;
181 char ksym[KSYM_SYMBOL_LEN];
182 const char *stub_fname;
183 s32 stub_func_id;
184 u32 arg_btf_id;
185 int offset;
186
187 stub_fname = kallsyms_lookup((unsigned long)stub_func_addr, NULL, NULL, NULL, ksym);
188 if (!stub_fname) {
189 pr_warn("Cannot find the stub function name for the %s in struct %s\n",
190 member_name, st_ops_name);
191 return -ENOENT;
192 }
193
194 stub_func_id = btf_find_by_name_kind(btf, stub_fname, BTF_KIND_FUNC);
195 if (stub_func_id < 0) {
196 pr_warn("Cannot find the stub function %s in btf\n", stub_fname);
197 return -ENOENT;
198 }
199
200 stub_func_proto = btf_type_by_id(btf, stub_func_id);
201 stub_func_proto = btf_type_by_id(btf, stub_func_proto->type);
202
203 /* Check if the number of arguments of the stub function is the same
204 * as the number of arguments of the function pointer.
205 */
206 nargs = btf_type_vlen(func_proto);
207 if (nargs != btf_type_vlen(stub_func_proto)) {
208 pr_warn("the number of arguments of the stub function %s does not match the number of arguments of the member %s of struct %s\n",
209 stub_fname, member_name, st_ops_name);
210 return -EINVAL;
211 }
212
213 if (!nargs)
214 return 0;
215
216 args = btf_params(func_proto);
217 stub_args = btf_params(stub_func_proto);
218
219 info_buf = kcalloc(nargs, sizeof(*info_buf), GFP_KERNEL);
220 if (!info_buf)
221 return -ENOMEM;
222
223 /* Prepare info for every nullable argument */
224 info = info_buf;
225 for (arg_no = 0; arg_no < nargs; arg_no++) {
226 /* Skip arguments that is not suffixed with
227 * "__nullable".
228 */
229 if (!btf_param_match_suffix(btf, &stub_args[arg_no],
230 MAYBE_NULL_SUFFIX))
231 continue;
232
233 /* Should be a pointer to struct */
234 pointed_type = btf_type_resolve_ptr(btf,
235 args[arg_no].type,
236 &arg_btf_id);
237 if (!pointed_type ||
238 !btf_type_is_struct(pointed_type)) {
239 pr_warn("stub function %s has %s tagging to an unsupported type\n",
240 stub_fname, MAYBE_NULL_SUFFIX);
241 goto err_out;
242 }
243
244 offset = btf_ctx_arg_offset(btf, func_proto, arg_no);
245 if (offset < 0) {
246 pr_warn("stub function %s has an invalid trampoline ctx offset for arg#%u\n",
247 stub_fname, arg_no);
248 goto err_out;
249 }
250
251 if (args[arg_no].type != stub_args[arg_no].type) {
252 pr_warn("arg#%u type in stub function %s does not match with its original func_proto\n",
253 arg_no, stub_fname);
254 goto err_out;
255 }
256
257 /* Fill the information of the new argument */
258 info->reg_type =
259 PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL;
260 info->btf_id = arg_btf_id;
261 info->btf = btf;
262 info->offset = offset;
263
264 info++;
265 info_cnt++;
266 }
267
268 if (info_cnt) {
269 arg_info->info = info_buf;
270 arg_info->cnt = info_cnt;
271 } else {
272 kfree(info_buf);
273 }
274
275 return 0;
276
277 err_out:
278 kfree(info_buf);
279
280 return -EINVAL;
281 }
282
283 /* Clean up the arg_info in a struct bpf_struct_ops_desc. */
bpf_struct_ops_desc_release(struct bpf_struct_ops_desc * st_ops_desc)284 void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc)
285 {
286 struct bpf_struct_ops_arg_info *arg_info;
287 int i;
288
289 arg_info = st_ops_desc->arg_info;
290 for (i = 0; i < btf_type_vlen(st_ops_desc->type); i++)
291 kfree(arg_info[i].info);
292
293 kfree(arg_info);
294 }
295
is_module_member(const struct btf * btf,u32 id)296 static bool is_module_member(const struct btf *btf, u32 id)
297 {
298 const struct btf_type *t;
299
300 t = btf_type_resolve_ptr(btf, id, NULL);
301 if (!t)
302 return false;
303
304 if (!__btf_type_is_struct(t) && !btf_type_is_fwd(t))
305 return false;
306
307 return !strcmp(btf_name_by_offset(btf, t->name_off), "module");
308 }
309
bpf_struct_ops_supported(const struct bpf_struct_ops * st_ops,u32 moff)310 int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff)
311 {
312 void *func_ptr = *(void **)(st_ops->cfi_stubs + moff);
313
314 return func_ptr ? 0 : -ENOTSUPP;
315 }
316
bpf_struct_ops_desc_init(struct bpf_struct_ops_desc * st_ops_desc,struct btf * btf,struct bpf_verifier_log * log)317 int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc,
318 struct btf *btf,
319 struct bpf_verifier_log *log)
320 {
321 struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
322 struct bpf_struct_ops_arg_info *arg_info;
323 const struct btf_member *member;
324 const struct btf_type *t;
325 s32 type_id, value_id;
326 char value_name[128];
327 const char *mname;
328 int i, err;
329
330 if (strlen(st_ops->name) + VALUE_PREFIX_LEN >=
331 sizeof(value_name)) {
332 pr_warn("struct_ops name %s is too long\n",
333 st_ops->name);
334 return -EINVAL;
335 }
336 sprintf(value_name, "%s%s", VALUE_PREFIX, st_ops->name);
337
338 if (!st_ops->cfi_stubs) {
339 pr_warn("struct_ops for %s has no cfi_stubs\n", st_ops->name);
340 return -EINVAL;
341 }
342
343 type_id = btf_find_by_name_kind(btf, st_ops->name,
344 BTF_KIND_STRUCT);
345 if (type_id < 0) {
346 pr_warn("Cannot find struct %s in %s\n",
347 st_ops->name, btf_get_name(btf));
348 return -EINVAL;
349 }
350 t = btf_type_by_id(btf, type_id);
351 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) {
352 pr_warn("Cannot support #%u members in struct %s\n",
353 btf_type_vlen(t), st_ops->name);
354 return -EINVAL;
355 }
356
357 value_id = btf_find_by_name_kind(btf, value_name,
358 BTF_KIND_STRUCT);
359 if (value_id < 0) {
360 pr_warn("Cannot find struct %s in %s\n",
361 value_name, btf_get_name(btf));
362 return -EINVAL;
363 }
364 if (!is_valid_value_type(btf, value_id, t, value_name))
365 return -EINVAL;
366
367 arg_info = kcalloc(btf_type_vlen(t), sizeof(*arg_info),
368 GFP_KERNEL);
369 if (!arg_info)
370 return -ENOMEM;
371
372 st_ops_desc->arg_info = arg_info;
373 st_ops_desc->type = t;
374 st_ops_desc->type_id = type_id;
375 st_ops_desc->value_id = value_id;
376 st_ops_desc->value_type = btf_type_by_id(btf, value_id);
377
378 for_each_member(i, t, member) {
379 const struct btf_type *func_proto;
380 void **stub_func_addr;
381 u32 moff;
382
383 moff = __btf_member_bit_offset(t, member) / 8;
384 mname = btf_name_by_offset(btf, member->name_off);
385 if (!*mname) {
386 pr_warn("anon member in struct %s is not supported\n",
387 st_ops->name);
388 err = -EOPNOTSUPP;
389 goto errout;
390 }
391
392 if (__btf_member_bitfield_size(t, member)) {
393 pr_warn("bit field member %s in struct %s is not supported\n",
394 mname, st_ops->name);
395 err = -EOPNOTSUPP;
396 goto errout;
397 }
398
399 if (!st_ops_ids[IDX_MODULE_ID] && is_module_member(btf, member->type)) {
400 pr_warn("'struct module' btf id not found. Is CONFIG_MODULES enabled? bpf_struct_ops '%s' needs module support.\n",
401 st_ops->name);
402 err = -EOPNOTSUPP;
403 goto errout;
404 }
405
406 func_proto = btf_type_resolve_func_ptr(btf,
407 member->type,
408 NULL);
409
410 /* The member is not a function pointer or
411 * the function pointer is not supported.
412 */
413 if (!func_proto || bpf_struct_ops_supported(st_ops, moff))
414 continue;
415
416 if (btf_distill_func_proto(log, btf,
417 func_proto, mname,
418 &st_ops->func_models[i])) {
419 pr_warn("Error in parsing func ptr %s in struct %s\n",
420 mname, st_ops->name);
421 err = -EINVAL;
422 goto errout;
423 }
424
425 stub_func_addr = *(void **)(st_ops->cfi_stubs + moff);
426 err = prepare_arg_info(btf, st_ops->name, mname,
427 func_proto, stub_func_addr,
428 arg_info + i);
429 if (err)
430 goto errout;
431 }
432
433 if (st_ops->init(btf)) {
434 pr_warn("Error in init bpf_struct_ops %s\n",
435 st_ops->name);
436 err = -EINVAL;
437 goto errout;
438 }
439
440 return 0;
441
442 errout:
443 bpf_struct_ops_desc_release(st_ops_desc);
444
445 return err;
446 }
447
bpf_struct_ops_map_get_next_key(struct bpf_map * map,void * key,void * next_key)448 static int bpf_struct_ops_map_get_next_key(struct bpf_map *map, void *key,
449 void *next_key)
450 {
451 if (key && *(u32 *)key == 0)
452 return -ENOENT;
453
454 *(u32 *)next_key = 0;
455 return 0;
456 }
457
bpf_struct_ops_map_sys_lookup_elem(struct bpf_map * map,void * key,void * value)458 int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
459 void *value)
460 {
461 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
462 struct bpf_struct_ops_value *uvalue, *kvalue;
463 enum bpf_struct_ops_state state;
464 s64 refcnt;
465
466 if (unlikely(*(u32 *)key != 0))
467 return -ENOENT;
468
469 kvalue = &st_map->kvalue;
470 /* Pair with smp_store_release() during map_update */
471 state = smp_load_acquire(&kvalue->common.state);
472 if (state == BPF_STRUCT_OPS_STATE_INIT) {
473 memset(value, 0, map->value_size);
474 return 0;
475 }
476
477 /* No lock is needed. state and refcnt do not need
478 * to be updated together under atomic context.
479 */
480 uvalue = value;
481 memcpy(uvalue, st_map->uvalue, map->value_size);
482 uvalue->common.state = state;
483
484 /* This value offers the user space a general estimate of how
485 * many sockets are still utilizing this struct_ops for TCP
486 * congestion control. The number might not be exact, but it
487 * should sufficiently meet our present goals.
488 */
489 refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
490 refcount_set(&uvalue->common.refcnt, max_t(s64, refcnt, 0));
491
492 return 0;
493 }
494
bpf_struct_ops_map_lookup_elem(struct bpf_map * map,void * key)495 static void *bpf_struct_ops_map_lookup_elem(struct bpf_map *map, void *key)
496 {
497 return ERR_PTR(-EINVAL);
498 }
499
bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map * st_map)500 static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map)
501 {
502 u32 i;
503
504 for (i = 0; i < st_map->funcs_cnt; i++) {
505 if (!st_map->links[i])
506 break;
507 bpf_link_put(st_map->links[i]);
508 st_map->links[i] = NULL;
509 }
510 }
511
bpf_struct_ops_map_free_image(struct bpf_struct_ops_map * st_map)512 static void bpf_struct_ops_map_free_image(struct bpf_struct_ops_map *st_map)
513 {
514 int i;
515
516 for (i = 0; i < st_map->image_pages_cnt; i++)
517 bpf_struct_ops_image_free(st_map->image_pages[i]);
518 st_map->image_pages_cnt = 0;
519 }
520
check_zero_holes(const struct btf * btf,const struct btf_type * t,void * data)521 static int check_zero_holes(const struct btf *btf, const struct btf_type *t, void *data)
522 {
523 const struct btf_member *member;
524 u32 i, moff, msize, prev_mend = 0;
525 const struct btf_type *mtype;
526
527 for_each_member(i, t, member) {
528 moff = __btf_member_bit_offset(t, member) / 8;
529 if (moff > prev_mend &&
530 memchr_inv(data + prev_mend, 0, moff - prev_mend))
531 return -EINVAL;
532
533 mtype = btf_type_by_id(btf, member->type);
534 mtype = btf_resolve_size(btf, mtype, &msize);
535 if (IS_ERR(mtype))
536 return PTR_ERR(mtype);
537 prev_mend = moff + msize;
538 }
539
540 if (t->size > prev_mend &&
541 memchr_inv(data + prev_mend, 0, t->size - prev_mend))
542 return -EINVAL;
543
544 return 0;
545 }
546
bpf_struct_ops_link_release(struct bpf_link * link)547 static void bpf_struct_ops_link_release(struct bpf_link *link)
548 {
549 }
550
bpf_struct_ops_link_dealloc(struct bpf_link * link)551 static void bpf_struct_ops_link_dealloc(struct bpf_link *link)
552 {
553 struct bpf_tramp_link *tlink = container_of(link, struct bpf_tramp_link, link);
554
555 kfree(tlink);
556 }
557
558 const struct bpf_link_ops bpf_struct_ops_link_lops = {
559 .release = bpf_struct_ops_link_release,
560 .dealloc = bpf_struct_ops_link_dealloc,
561 };
562
bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links * tlinks,struct bpf_tramp_link * link,const struct btf_func_model * model,void * stub_func,void ** _image,u32 * _image_off,bool allow_alloc)563 int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
564 struct bpf_tramp_link *link,
565 const struct btf_func_model *model,
566 void *stub_func,
567 void **_image, u32 *_image_off,
568 bool allow_alloc)
569 {
570 u32 image_off = *_image_off, flags = BPF_TRAMP_F_INDIRECT;
571 void *image = *_image;
572 int size;
573
574 tlinks[BPF_TRAMP_FENTRY].links[0] = link;
575 tlinks[BPF_TRAMP_FENTRY].nr_links = 1;
576
577 if (model->ret_size > 0)
578 flags |= BPF_TRAMP_F_RET_FENTRY_RET;
579
580 size = arch_bpf_trampoline_size(model, flags, tlinks, stub_func);
581 if (size <= 0)
582 return size ? : -EFAULT;
583
584 /* Allocate image buffer if necessary */
585 if (!image || size > PAGE_SIZE - image_off) {
586 if (!allow_alloc)
587 return -E2BIG;
588
589 image = bpf_struct_ops_image_alloc();
590 if (IS_ERR(image))
591 return PTR_ERR(image);
592 image_off = 0;
593 }
594
595 size = arch_prepare_bpf_trampoline(NULL, image + image_off,
596 image + image_off + size,
597 model, flags, tlinks, stub_func);
598 if (size <= 0) {
599 if (image != *_image)
600 bpf_struct_ops_image_free(image);
601 return size ? : -EFAULT;
602 }
603
604 *_image = image;
605 *_image_off = image_off + size;
606 return 0;
607 }
608
bpf_struct_ops_ksym_init(const char * tname,const char * mname,void * image,unsigned int size,struct bpf_ksym * ksym)609 static void bpf_struct_ops_ksym_init(const char *tname, const char *mname,
610 void *image, unsigned int size,
611 struct bpf_ksym *ksym)
612 {
613 snprintf(ksym->name, KSYM_NAME_LEN, "bpf__%s_%s", tname, mname);
614 INIT_LIST_HEAD_RCU(&ksym->lnode);
615 bpf_image_ksym_init(image, size, ksym);
616 }
617
bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map * st_map)618 static void bpf_struct_ops_map_add_ksyms(struct bpf_struct_ops_map *st_map)
619 {
620 u32 i;
621
622 for (i = 0; i < st_map->funcs_cnt; i++) {
623 if (!st_map->ksyms[i])
624 break;
625 bpf_image_ksym_add(st_map->ksyms[i]);
626 }
627 }
628
bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map * st_map)629 static void bpf_struct_ops_map_del_ksyms(struct bpf_struct_ops_map *st_map)
630 {
631 u32 i;
632
633 for (i = 0; i < st_map->funcs_cnt; i++) {
634 if (!st_map->ksyms[i])
635 break;
636 bpf_image_ksym_del(st_map->ksyms[i]);
637 }
638 }
639
bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map * st_map)640 static void bpf_struct_ops_map_free_ksyms(struct bpf_struct_ops_map *st_map)
641 {
642 u32 i;
643
644 for (i = 0; i < st_map->funcs_cnt; i++) {
645 if (!st_map->ksyms[i])
646 break;
647 kfree(st_map->ksyms[i]);
648 st_map->ksyms[i] = NULL;
649 }
650 }
651
bpf_struct_ops_map_update_elem(struct bpf_map * map,void * key,void * value,u64 flags)652 static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
653 void *value, u64 flags)
654 {
655 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
656 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
657 const struct bpf_struct_ops *st_ops = st_ops_desc->st_ops;
658 struct bpf_struct_ops_value *uvalue, *kvalue;
659 const struct btf_type *module_type;
660 const struct btf_member *member;
661 const struct btf_type *t = st_ops_desc->type;
662 struct bpf_tramp_links *tlinks;
663 void *udata, *kdata;
664 int prog_fd, err;
665 u32 i, trampoline_start, image_off = 0;
666 void *cur_image = NULL, *image = NULL;
667 struct bpf_link **plink;
668 struct bpf_ksym **pksym;
669 const char *tname, *mname;
670
671 if (flags)
672 return -EINVAL;
673
674 if (*(u32 *)key != 0)
675 return -E2BIG;
676
677 err = check_zero_holes(st_map->btf, st_ops_desc->value_type, value);
678 if (err)
679 return err;
680
681 uvalue = value;
682 err = check_zero_holes(st_map->btf, t, uvalue->data);
683 if (err)
684 return err;
685
686 if (uvalue->common.state || refcount_read(&uvalue->common.refcnt))
687 return -EINVAL;
688
689 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
690 if (!tlinks)
691 return -ENOMEM;
692
693 uvalue = (struct bpf_struct_ops_value *)st_map->uvalue;
694 kvalue = (struct bpf_struct_ops_value *)&st_map->kvalue;
695
696 mutex_lock(&st_map->lock);
697
698 if (kvalue->common.state != BPF_STRUCT_OPS_STATE_INIT) {
699 err = -EBUSY;
700 goto unlock;
701 }
702
703 memcpy(uvalue, value, map->value_size);
704
705 udata = &uvalue->data;
706 kdata = &kvalue->data;
707
708 plink = st_map->links;
709 pksym = st_map->ksyms;
710 tname = btf_name_by_offset(st_map->btf, t->name_off);
711 module_type = btf_type_by_id(btf_vmlinux, st_ops_ids[IDX_MODULE_ID]);
712 for_each_member(i, t, member) {
713 const struct btf_type *mtype, *ptype;
714 struct bpf_prog *prog;
715 struct bpf_tramp_link *link;
716 struct bpf_ksym *ksym;
717 u32 moff;
718
719 moff = __btf_member_bit_offset(t, member) / 8;
720 mname = btf_name_by_offset(st_map->btf, member->name_off);
721 ptype = btf_type_resolve_ptr(st_map->btf, member->type, NULL);
722 if (ptype == module_type) {
723 if (*(void **)(udata + moff))
724 goto reset_unlock;
725 *(void **)(kdata + moff) = BPF_MODULE_OWNER;
726 continue;
727 }
728
729 err = st_ops->init_member(t, member, kdata, udata);
730 if (err < 0)
731 goto reset_unlock;
732
733 /* The ->init_member() has handled this member */
734 if (err > 0)
735 continue;
736
737 /* If st_ops->init_member does not handle it,
738 * we will only handle func ptrs and zero-ed members
739 * here. Reject everything else.
740 */
741
742 /* All non func ptr member must be 0 */
743 if (!ptype || !btf_type_is_func_proto(ptype)) {
744 u32 msize;
745
746 mtype = btf_type_by_id(st_map->btf, member->type);
747 mtype = btf_resolve_size(st_map->btf, mtype, &msize);
748 if (IS_ERR(mtype)) {
749 err = PTR_ERR(mtype);
750 goto reset_unlock;
751 }
752
753 if (memchr_inv(udata + moff, 0, msize)) {
754 err = -EINVAL;
755 goto reset_unlock;
756 }
757
758 continue;
759 }
760
761 prog_fd = (int)(*(unsigned long *)(udata + moff));
762 /* Similar check as the attr->attach_prog_fd */
763 if (!prog_fd)
764 continue;
765
766 prog = bpf_prog_get(prog_fd);
767 if (IS_ERR(prog)) {
768 err = PTR_ERR(prog);
769 goto reset_unlock;
770 }
771
772 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
773 prog->aux->attach_btf_id != st_ops_desc->type_id ||
774 prog->expected_attach_type != i) {
775 bpf_prog_put(prog);
776 err = -EINVAL;
777 goto reset_unlock;
778 }
779
780 link = kzalloc(sizeof(*link), GFP_USER);
781 if (!link) {
782 bpf_prog_put(prog);
783 err = -ENOMEM;
784 goto reset_unlock;
785 }
786 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS,
787 &bpf_struct_ops_link_lops, prog);
788 *plink++ = &link->link;
789
790 ksym = kzalloc(sizeof(*ksym), GFP_USER);
791 if (!ksym) {
792 err = -ENOMEM;
793 goto reset_unlock;
794 }
795 *pksym++ = ksym;
796
797 trampoline_start = image_off;
798 err = bpf_struct_ops_prepare_trampoline(tlinks, link,
799 &st_ops->func_models[i],
800 *(void **)(st_ops->cfi_stubs + moff),
801 &image, &image_off,
802 st_map->image_pages_cnt < MAX_TRAMP_IMAGE_PAGES);
803 if (err)
804 goto reset_unlock;
805
806 if (cur_image != image) {
807 st_map->image_pages[st_map->image_pages_cnt++] = image;
808 cur_image = image;
809 trampoline_start = 0;
810 }
811
812 *(void **)(kdata + moff) = image + trampoline_start + cfi_get_offset();
813
814 /* put prog_id to udata */
815 *(unsigned long *)(udata + moff) = prog->aux->id;
816
817 /* init ksym for this trampoline */
818 bpf_struct_ops_ksym_init(tname, mname,
819 image + trampoline_start,
820 image_off - trampoline_start,
821 ksym);
822 }
823
824 if (st_ops->validate) {
825 err = st_ops->validate(kdata);
826 if (err)
827 goto reset_unlock;
828 }
829 for (i = 0; i < st_map->image_pages_cnt; i++) {
830 err = arch_protect_bpf_trampoline(st_map->image_pages[i],
831 PAGE_SIZE);
832 if (err)
833 goto reset_unlock;
834 }
835
836 if (st_map->map.map_flags & BPF_F_LINK) {
837 err = 0;
838 /* Let bpf_link handle registration & unregistration.
839 *
840 * Pair with smp_load_acquire() during lookup_elem().
841 */
842 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_READY);
843 goto unlock;
844 }
845
846 err = st_ops->reg(kdata, NULL);
847 if (likely(!err)) {
848 /* This refcnt increment on the map here after
849 * 'st_ops->reg()' is secure since the state of the
850 * map must be set to INIT at this moment, and thus
851 * bpf_struct_ops_map_delete_elem() can't unregister
852 * or transition it to TOBEFREE concurrently.
853 */
854 bpf_map_inc(map);
855 /* Pair with smp_load_acquire() during lookup_elem().
856 * It ensures the above udata updates (e.g. prog->aux->id)
857 * can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
858 */
859 smp_store_release(&kvalue->common.state, BPF_STRUCT_OPS_STATE_INUSE);
860 goto unlock;
861 }
862
863 /* Error during st_ops->reg(). Can happen if this struct_ops needs to be
864 * verified as a whole, after all init_member() calls. Can also happen if
865 * there was a race in registering the struct_ops (under the same name) to
866 * a sub-system through different struct_ops's maps.
867 */
868
869 reset_unlock:
870 bpf_struct_ops_map_free_ksyms(st_map);
871 bpf_struct_ops_map_free_image(st_map);
872 bpf_struct_ops_map_put_progs(st_map);
873 memset(uvalue, 0, map->value_size);
874 memset(kvalue, 0, map->value_size);
875 unlock:
876 kfree(tlinks);
877 mutex_unlock(&st_map->lock);
878 if (!err)
879 bpf_struct_ops_map_add_ksyms(st_map);
880 return err;
881 }
882
bpf_struct_ops_map_delete_elem(struct bpf_map * map,void * key)883 static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
884 {
885 enum bpf_struct_ops_state prev_state;
886 struct bpf_struct_ops_map *st_map;
887
888 st_map = (struct bpf_struct_ops_map *)map;
889 if (st_map->map.map_flags & BPF_F_LINK)
890 return -EOPNOTSUPP;
891
892 prev_state = cmpxchg(&st_map->kvalue.common.state,
893 BPF_STRUCT_OPS_STATE_INUSE,
894 BPF_STRUCT_OPS_STATE_TOBEFREE);
895 switch (prev_state) {
896 case BPF_STRUCT_OPS_STATE_INUSE:
897 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL);
898 bpf_map_put(map);
899 return 0;
900 case BPF_STRUCT_OPS_STATE_TOBEFREE:
901 return -EINPROGRESS;
902 case BPF_STRUCT_OPS_STATE_INIT:
903 return -ENOENT;
904 default:
905 WARN_ON_ONCE(1);
906 /* Should never happen. Treat it as not found. */
907 return -ENOENT;
908 }
909 }
910
bpf_struct_ops_map_seq_show_elem(struct bpf_map * map,void * key,struct seq_file * m)911 static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
912 struct seq_file *m)
913 {
914 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
915 void *value;
916 int err;
917
918 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
919 if (!value)
920 return;
921
922 err = bpf_struct_ops_map_sys_lookup_elem(map, key, value);
923 if (!err) {
924 btf_type_seq_show(st_map->btf,
925 map->btf_vmlinux_value_type_id,
926 value, m);
927 seq_putc(m, '\n');
928 }
929
930 kfree(value);
931 }
932
__bpf_struct_ops_map_free(struct bpf_map * map)933 static void __bpf_struct_ops_map_free(struct bpf_map *map)
934 {
935 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
936
937 if (st_map->links)
938 bpf_struct_ops_map_put_progs(st_map);
939 if (st_map->ksyms)
940 bpf_struct_ops_map_free_ksyms(st_map);
941 bpf_map_area_free(st_map->links);
942 bpf_map_area_free(st_map->ksyms);
943 bpf_struct_ops_map_free_image(st_map);
944 bpf_map_area_free(st_map->uvalue);
945 bpf_map_area_free(st_map);
946 }
947
bpf_struct_ops_map_free(struct bpf_map * map)948 static void bpf_struct_ops_map_free(struct bpf_map *map)
949 {
950 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
951
952 /* st_ops->owner was acquired during map_alloc to implicitly holds
953 * the btf's refcnt. The acquire was only done when btf_is_module()
954 * st_map->btf cannot be NULL here.
955 */
956 if (btf_is_module(st_map->btf))
957 module_put(st_map->st_ops_desc->st_ops->owner);
958
959 bpf_struct_ops_map_del_ksyms(st_map);
960
961 /* The struct_ops's function may switch to another struct_ops.
962 *
963 * For example, bpf_tcp_cc_x->init() may switch to
964 * another tcp_cc_y by calling
965 * setsockopt(TCP_CONGESTION, "tcp_cc_y").
966 * During the switch, bpf_struct_ops_put(tcp_cc_x) is called
967 * and its refcount may reach 0 which then free its
968 * trampoline image while tcp_cc_x is still running.
969 *
970 * A vanilla rcu gp is to wait for all bpf-tcp-cc prog
971 * to finish. bpf-tcp-cc prog is non sleepable.
972 * A rcu_tasks gp is to wait for the last few insn
973 * in the tramopline image to finish before releasing
974 * the trampoline image.
975 */
976 synchronize_rcu_mult(call_rcu, call_rcu_tasks);
977
978 __bpf_struct_ops_map_free(map);
979 }
980
bpf_struct_ops_map_alloc_check(union bpf_attr * attr)981 static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
982 {
983 if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
984 (attr->map_flags & ~(BPF_F_LINK | BPF_F_VTYPE_BTF_OBJ_FD)) ||
985 !attr->btf_vmlinux_value_type_id)
986 return -EINVAL;
987 return 0;
988 }
989
count_func_ptrs(const struct btf * btf,const struct btf_type * t)990 static u32 count_func_ptrs(const struct btf *btf, const struct btf_type *t)
991 {
992 int i;
993 u32 count;
994 const struct btf_member *member;
995
996 count = 0;
997 for_each_member(i, t, member)
998 if (btf_type_resolve_func_ptr(btf, member->type, NULL))
999 count++;
1000 return count;
1001 }
1002
bpf_struct_ops_map_alloc(union bpf_attr * attr)1003 static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
1004 {
1005 const struct bpf_struct_ops_desc *st_ops_desc;
1006 size_t st_map_size;
1007 struct bpf_struct_ops_map *st_map;
1008 const struct btf_type *t, *vt;
1009 struct module *mod = NULL;
1010 struct bpf_map *map;
1011 struct btf *btf;
1012 int ret;
1013
1014 if (attr->map_flags & BPF_F_VTYPE_BTF_OBJ_FD) {
1015 /* The map holds btf for its whole life time. */
1016 btf = btf_get_by_fd(attr->value_type_btf_obj_fd);
1017 if (IS_ERR(btf))
1018 return ERR_CAST(btf);
1019 if (!btf_is_module(btf)) {
1020 btf_put(btf);
1021 return ERR_PTR(-EINVAL);
1022 }
1023
1024 mod = btf_try_get_module(btf);
1025 /* mod holds a refcnt to btf. We don't need an extra refcnt
1026 * here.
1027 */
1028 btf_put(btf);
1029 if (!mod)
1030 return ERR_PTR(-EINVAL);
1031 } else {
1032 btf = bpf_get_btf_vmlinux();
1033 if (IS_ERR(btf))
1034 return ERR_CAST(btf);
1035 if (!btf)
1036 return ERR_PTR(-ENOTSUPP);
1037 }
1038
1039 st_ops_desc = bpf_struct_ops_find_value(btf, attr->btf_vmlinux_value_type_id);
1040 if (!st_ops_desc) {
1041 ret = -ENOTSUPP;
1042 goto errout;
1043 }
1044
1045 vt = st_ops_desc->value_type;
1046 if (attr->value_size != vt->size) {
1047 ret = -EINVAL;
1048 goto errout;
1049 }
1050
1051 t = st_ops_desc->type;
1052
1053 st_map_size = sizeof(*st_map) +
1054 /* kvalue stores the
1055 * struct bpf_struct_ops_tcp_congestions_ops
1056 */
1057 (vt->size - sizeof(struct bpf_struct_ops_value));
1058
1059 st_map = bpf_map_area_alloc(st_map_size, NUMA_NO_NODE);
1060 if (!st_map) {
1061 ret = -ENOMEM;
1062 goto errout;
1063 }
1064
1065 st_map->st_ops_desc = st_ops_desc;
1066 map = &st_map->map;
1067
1068 st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE);
1069 st_map->funcs_cnt = count_func_ptrs(btf, t);
1070 st_map->links =
1071 bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_link *),
1072 NUMA_NO_NODE);
1073
1074 st_map->ksyms =
1075 bpf_map_area_alloc(st_map->funcs_cnt * sizeof(struct bpf_ksym *),
1076 NUMA_NO_NODE);
1077 if (!st_map->uvalue || !st_map->links || !st_map->ksyms) {
1078 ret = -ENOMEM;
1079 goto errout_free;
1080 }
1081 st_map->btf = btf;
1082
1083 mutex_init(&st_map->lock);
1084 bpf_map_init_from_attr(map, attr);
1085
1086 return map;
1087
1088 errout_free:
1089 __bpf_struct_ops_map_free(map);
1090 errout:
1091 module_put(mod);
1092
1093 return ERR_PTR(ret);
1094 }
1095
bpf_struct_ops_map_mem_usage(const struct bpf_map * map)1096 static u64 bpf_struct_ops_map_mem_usage(const struct bpf_map *map)
1097 {
1098 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1099 const struct bpf_struct_ops_desc *st_ops_desc = st_map->st_ops_desc;
1100 const struct btf_type *vt = st_ops_desc->value_type;
1101 u64 usage;
1102
1103 usage = sizeof(*st_map) +
1104 vt->size - sizeof(struct bpf_struct_ops_value);
1105 usage += vt->size;
1106 usage += st_map->funcs_cnt * sizeof(struct bpf_link *);
1107 usage += st_map->funcs_cnt * sizeof(struct bpf_ksym *);
1108 usage += PAGE_SIZE;
1109 return usage;
1110 }
1111
1112 BTF_ID_LIST_SINGLE(bpf_struct_ops_map_btf_ids, struct, bpf_struct_ops_map)
1113 const struct bpf_map_ops bpf_struct_ops_map_ops = {
1114 .map_alloc_check = bpf_struct_ops_map_alloc_check,
1115 .map_alloc = bpf_struct_ops_map_alloc,
1116 .map_free = bpf_struct_ops_map_free,
1117 .map_get_next_key = bpf_struct_ops_map_get_next_key,
1118 .map_lookup_elem = bpf_struct_ops_map_lookup_elem,
1119 .map_delete_elem = bpf_struct_ops_map_delete_elem,
1120 .map_update_elem = bpf_struct_ops_map_update_elem,
1121 .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
1122 .map_mem_usage = bpf_struct_ops_map_mem_usage,
1123 .map_btf_id = &bpf_struct_ops_map_btf_ids[0],
1124 };
1125
1126 /* "const void *" because some subsystem is
1127 * passing a const (e.g. const struct tcp_congestion_ops *)
1128 */
bpf_struct_ops_get(const void * kdata)1129 bool bpf_struct_ops_get(const void *kdata)
1130 {
1131 struct bpf_struct_ops_value *kvalue;
1132 struct bpf_struct_ops_map *st_map;
1133 struct bpf_map *map;
1134
1135 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1136 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1137
1138 map = __bpf_map_inc_not_zero(&st_map->map, false);
1139 return !IS_ERR(map);
1140 }
1141
bpf_struct_ops_put(const void * kdata)1142 void bpf_struct_ops_put(const void *kdata)
1143 {
1144 struct bpf_struct_ops_value *kvalue;
1145 struct bpf_struct_ops_map *st_map;
1146
1147 kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
1148 st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
1149
1150 bpf_map_put(&st_map->map);
1151 }
1152
bpf_struct_ops_valid_to_reg(struct bpf_map * map)1153 static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
1154 {
1155 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1156
1157 return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
1158 map->map_flags & BPF_F_LINK &&
1159 /* Pair with smp_store_release() during map_update */
1160 smp_load_acquire(&st_map->kvalue.common.state) == BPF_STRUCT_OPS_STATE_READY;
1161 }
1162
bpf_struct_ops_map_link_dealloc(struct bpf_link * link)1163 static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
1164 {
1165 struct bpf_struct_ops_link *st_link;
1166 struct bpf_struct_ops_map *st_map;
1167
1168 st_link = container_of(link, struct bpf_struct_ops_link, link);
1169 st_map = (struct bpf_struct_ops_map *)
1170 rcu_dereference_protected(st_link->map, true);
1171 if (st_map) {
1172 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1173 bpf_map_put(&st_map->map);
1174 }
1175 kfree(st_link);
1176 }
1177
bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link * link,struct seq_file * seq)1178 static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
1179 struct seq_file *seq)
1180 {
1181 struct bpf_struct_ops_link *st_link;
1182 struct bpf_map *map;
1183
1184 st_link = container_of(link, struct bpf_struct_ops_link, link);
1185 rcu_read_lock();
1186 map = rcu_dereference(st_link->map);
1187 if (map)
1188 seq_printf(seq, "map_id:\t%d\n", map->id);
1189 rcu_read_unlock();
1190 }
1191
bpf_struct_ops_map_link_fill_link_info(const struct bpf_link * link,struct bpf_link_info * info)1192 static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
1193 struct bpf_link_info *info)
1194 {
1195 struct bpf_struct_ops_link *st_link;
1196 struct bpf_map *map;
1197
1198 st_link = container_of(link, struct bpf_struct_ops_link, link);
1199 rcu_read_lock();
1200 map = rcu_dereference(st_link->map);
1201 if (map)
1202 info->struct_ops.map_id = map->id;
1203 rcu_read_unlock();
1204 return 0;
1205 }
1206
bpf_struct_ops_map_link_update(struct bpf_link * link,struct bpf_map * new_map,struct bpf_map * expected_old_map)1207 static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
1208 struct bpf_map *expected_old_map)
1209 {
1210 struct bpf_struct_ops_map *st_map, *old_st_map;
1211 struct bpf_map *old_map;
1212 struct bpf_struct_ops_link *st_link;
1213 int err;
1214
1215 st_link = container_of(link, struct bpf_struct_ops_link, link);
1216 st_map = container_of(new_map, struct bpf_struct_ops_map, map);
1217
1218 if (!bpf_struct_ops_valid_to_reg(new_map))
1219 return -EINVAL;
1220
1221 if (!st_map->st_ops_desc->st_ops->update)
1222 return -EOPNOTSUPP;
1223
1224 mutex_lock(&update_mutex);
1225
1226 old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1227 if (!old_map) {
1228 err = -ENOLINK;
1229 goto err_out;
1230 }
1231 if (expected_old_map && old_map != expected_old_map) {
1232 err = -EPERM;
1233 goto err_out;
1234 }
1235
1236 old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
1237 /* The new and old struct_ops must be the same type. */
1238 if (st_map->st_ops_desc != old_st_map->st_ops_desc) {
1239 err = -EINVAL;
1240 goto err_out;
1241 }
1242
1243 err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link);
1244 if (err)
1245 goto err_out;
1246
1247 bpf_map_inc(new_map);
1248 rcu_assign_pointer(st_link->map, new_map);
1249 bpf_map_put(old_map);
1250
1251 err_out:
1252 mutex_unlock(&update_mutex);
1253
1254 return err;
1255 }
1256
bpf_struct_ops_map_link_detach(struct bpf_link * link)1257 static int bpf_struct_ops_map_link_detach(struct bpf_link *link)
1258 {
1259 struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link);
1260 struct bpf_struct_ops_map *st_map;
1261 struct bpf_map *map;
1262
1263 mutex_lock(&update_mutex);
1264
1265 map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
1266 if (!map) {
1267 mutex_unlock(&update_mutex);
1268 return 0;
1269 }
1270 st_map = container_of(map, struct bpf_struct_ops_map, map);
1271
1272 st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link);
1273
1274 RCU_INIT_POINTER(st_link->map, NULL);
1275 /* Pair with bpf_map_get() in bpf_struct_ops_link_create() or
1276 * bpf_map_inc() in bpf_struct_ops_map_link_update().
1277 */
1278 bpf_map_put(&st_map->map);
1279
1280 mutex_unlock(&update_mutex);
1281
1282 wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP);
1283
1284 return 0;
1285 }
1286
bpf_struct_ops_map_link_poll(struct file * file,struct poll_table_struct * pts)1287 static __poll_t bpf_struct_ops_map_link_poll(struct file *file,
1288 struct poll_table_struct *pts)
1289 {
1290 struct bpf_struct_ops_link *st_link = file->private_data;
1291
1292 poll_wait(file, &st_link->wait_hup, pts);
1293
1294 return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP;
1295 }
1296
1297 static const struct bpf_link_ops bpf_struct_ops_map_lops = {
1298 .dealloc = bpf_struct_ops_map_link_dealloc,
1299 .detach = bpf_struct_ops_map_link_detach,
1300 .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
1301 .fill_link_info = bpf_struct_ops_map_link_fill_link_info,
1302 .update_map = bpf_struct_ops_map_link_update,
1303 .poll = bpf_struct_ops_map_link_poll,
1304 };
1305
bpf_struct_ops_link_create(union bpf_attr * attr)1306 int bpf_struct_ops_link_create(union bpf_attr *attr)
1307 {
1308 struct bpf_struct_ops_link *link = NULL;
1309 struct bpf_link_primer link_primer;
1310 struct bpf_struct_ops_map *st_map;
1311 struct bpf_map *map;
1312 int err;
1313
1314 map = bpf_map_get(attr->link_create.map_fd);
1315 if (IS_ERR(map))
1316 return PTR_ERR(map);
1317
1318 st_map = (struct bpf_struct_ops_map *)map;
1319
1320 if (!bpf_struct_ops_valid_to_reg(map)) {
1321 err = -EINVAL;
1322 goto err_out;
1323 }
1324
1325 link = kzalloc(sizeof(*link), GFP_USER);
1326 if (!link) {
1327 err = -ENOMEM;
1328 goto err_out;
1329 }
1330 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL);
1331
1332 err = bpf_link_prime(&link->link, &link_primer);
1333 if (err)
1334 goto err_out;
1335
1336 init_waitqueue_head(&link->wait_hup);
1337
1338 /* Hold the update_mutex such that the subsystem cannot
1339 * do link->ops->detach() before the link is fully initialized.
1340 */
1341 mutex_lock(&update_mutex);
1342 err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link);
1343 if (err) {
1344 mutex_unlock(&update_mutex);
1345 bpf_link_cleanup(&link_primer);
1346 link = NULL;
1347 goto err_out;
1348 }
1349 RCU_INIT_POINTER(link->map, map);
1350 mutex_unlock(&update_mutex);
1351
1352 return bpf_link_settle(&link_primer);
1353
1354 err_out:
1355 bpf_map_put(map);
1356 kfree(link);
1357 return err;
1358 }
1359
bpf_map_struct_ops_info_fill(struct bpf_map_info * info,struct bpf_map * map)1360 void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map)
1361 {
1362 struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
1363
1364 info->btf_vmlinux_id = btf_obj_id(st_map->btf);
1365 }
1366