1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bpf-loader.c
4 *
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
7 */
8
9 #include <linux/bpf.h>
10 #include <bpf/libbpf.h>
11 #include <bpf/bpf.h>
12 #include <linux/err.h>
13 #include <linux/kernel.h>
14 #include <linux/string.h>
15 #include <linux/zalloc.h>
16 #include <errno.h>
17 #include <stdlib.h>
18 #include "debug.h"
19 #include "evlist.h"
20 #include "bpf-loader.h"
21 #include "bpf-prologue.h"
22 #include "probe-event.h"
23 #include "probe-finder.h" // for MAX_PROBES
24 #include "parse-events.h"
25 #include "strfilter.h"
26 #include "util.h"
27 #include "llvm-utils.h"
28 #include "c++/clang-c.h"
29
30 #include <internal/xyarray.h>
31
libbpf_perf_print(enum libbpf_print_level level,const char * fmt,va_list args)32 static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
33 const char *fmt, va_list args)
34 {
35 return veprintf(1, verbose, pr_fmt(fmt), args);
36 }
37
38 struct bpf_prog_priv {
39 bool is_tp;
40 char *sys_name;
41 char *evt_name;
42 struct perf_probe_event pev;
43 bool need_prologue;
44 struct bpf_insn *insns_buf;
45 int nr_types;
46 int *type_mapping;
47 };
48
49 static bool libbpf_initialized;
50
51 struct bpf_object *
bpf__prepare_load_buffer(void * obj_buf,size_t obj_buf_sz,const char * name)52 bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
53 {
54 struct bpf_object *obj;
55
56 if (!libbpf_initialized) {
57 libbpf_set_print(libbpf_perf_print);
58 libbpf_initialized = true;
59 }
60
61 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
62 if (IS_ERR_OR_NULL(obj)) {
63 pr_debug("bpf: failed to load buffer\n");
64 return ERR_PTR(-EINVAL);
65 }
66
67 return obj;
68 }
69
bpf__prepare_load(const char * filename,bool source)70 struct bpf_object *bpf__prepare_load(const char *filename, bool source)
71 {
72 struct bpf_object *obj;
73
74 if (!libbpf_initialized) {
75 libbpf_set_print(libbpf_perf_print);
76 libbpf_initialized = true;
77 }
78
79 if (source) {
80 int err;
81 void *obj_buf;
82 size_t obj_buf_sz;
83
84 perf_clang__init();
85 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
86 perf_clang__cleanup();
87 if (err) {
88 pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
89 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
90 if (err)
91 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
92 } else
93 pr_debug("bpf: successful builtin compilation\n");
94 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
95
96 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
97 llvm__dump_obj(filename, obj_buf, obj_buf_sz);
98
99 free(obj_buf);
100 } else
101 obj = bpf_object__open(filename);
102
103 if (IS_ERR_OR_NULL(obj)) {
104 pr_debug("bpf: failed to load %s\n", filename);
105 return obj;
106 }
107
108 return obj;
109 }
110
bpf__clear(void)111 void bpf__clear(void)
112 {
113 struct bpf_object *obj, *tmp;
114
115 bpf_object__for_each_safe(obj, tmp) {
116 bpf__unprobe(obj);
117 bpf_object__close(obj);
118 }
119 }
120
121 static void
clear_prog_priv(struct bpf_program * prog __maybe_unused,void * _priv)122 clear_prog_priv(struct bpf_program *prog __maybe_unused,
123 void *_priv)
124 {
125 struct bpf_prog_priv *priv = _priv;
126
127 cleanup_perf_probe_events(&priv->pev, 1);
128 zfree(&priv->insns_buf);
129 zfree(&priv->type_mapping);
130 zfree(&priv->sys_name);
131 zfree(&priv->evt_name);
132 free(priv);
133 }
134
135 static int
prog_config__exec(const char * value,struct perf_probe_event * pev)136 prog_config__exec(const char *value, struct perf_probe_event *pev)
137 {
138 pev->uprobes = true;
139 pev->target = strdup(value);
140 if (!pev->target)
141 return -ENOMEM;
142 return 0;
143 }
144
145 static int
prog_config__module(const char * value,struct perf_probe_event * pev)146 prog_config__module(const char *value, struct perf_probe_event *pev)
147 {
148 pev->uprobes = false;
149 pev->target = strdup(value);
150 if (!pev->target)
151 return -ENOMEM;
152 return 0;
153 }
154
155 static int
prog_config__bool(const char * value,bool * pbool,bool invert)156 prog_config__bool(const char *value, bool *pbool, bool invert)
157 {
158 int err;
159 bool bool_value;
160
161 if (!pbool)
162 return -EINVAL;
163
164 err = strtobool(value, &bool_value);
165 if (err)
166 return err;
167
168 *pbool = invert ? !bool_value : bool_value;
169 return 0;
170 }
171
172 static int
prog_config__inlines(const char * value,struct perf_probe_event * pev __maybe_unused)173 prog_config__inlines(const char *value,
174 struct perf_probe_event *pev __maybe_unused)
175 {
176 return prog_config__bool(value, &probe_conf.no_inlines, true);
177 }
178
179 static int
prog_config__force(const char * value,struct perf_probe_event * pev __maybe_unused)180 prog_config__force(const char *value,
181 struct perf_probe_event *pev __maybe_unused)
182 {
183 return prog_config__bool(value, &probe_conf.force_add, false);
184 }
185
186 static struct {
187 const char *key;
188 const char *usage;
189 const char *desc;
190 int (*func)(const char *, struct perf_probe_event *);
191 } bpf_prog_config_terms[] = {
192 {
193 .key = "exec",
194 .usage = "exec=<full path of file>",
195 .desc = "Set uprobe target",
196 .func = prog_config__exec,
197 },
198 {
199 .key = "module",
200 .usage = "module=<module name> ",
201 .desc = "Set kprobe module",
202 .func = prog_config__module,
203 },
204 {
205 .key = "inlines",
206 .usage = "inlines=[yes|no] ",
207 .desc = "Probe at inline symbol",
208 .func = prog_config__inlines,
209 },
210 {
211 .key = "force",
212 .usage = "force=[yes|no] ",
213 .desc = "Forcibly add events with existing name",
214 .func = prog_config__force,
215 },
216 };
217
218 static int
do_prog_config(const char * key,const char * value,struct perf_probe_event * pev)219 do_prog_config(const char *key, const char *value,
220 struct perf_probe_event *pev)
221 {
222 unsigned int i;
223
224 pr_debug("config bpf program: %s=%s\n", key, value);
225 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
226 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
227 return bpf_prog_config_terms[i].func(value, pev);
228
229 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
230 key, value);
231
232 pr_debug("\nHint: Valid options are:\n");
233 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
234 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
235 bpf_prog_config_terms[i].desc);
236 pr_debug("\n");
237
238 return -BPF_LOADER_ERRNO__PROGCONF_TERM;
239 }
240
241 static const char *
parse_prog_config_kvpair(const char * config_str,struct perf_probe_event * pev)242 parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
243 {
244 char *text = strdup(config_str);
245 char *sep, *line;
246 const char *main_str = NULL;
247 int err = 0;
248
249 if (!text) {
250 pr_debug("Not enough memory: dup config_str failed\n");
251 return ERR_PTR(-ENOMEM);
252 }
253
254 line = text;
255 while ((sep = strchr(line, ';'))) {
256 char *equ;
257
258 *sep = '\0';
259 equ = strchr(line, '=');
260 if (!equ) {
261 pr_warning("WARNING: invalid config in BPF object: %s\n",
262 line);
263 pr_warning("\tShould be 'key=value'.\n");
264 goto nextline;
265 }
266 *equ = '\0';
267
268 err = do_prog_config(line, equ + 1, pev);
269 if (err)
270 break;
271 nextline:
272 line = sep + 1;
273 }
274
275 if (!err)
276 main_str = config_str + (line - text);
277 free(text);
278
279 return err ? ERR_PTR(err) : main_str;
280 }
281
282 static int
parse_prog_config(const char * config_str,const char ** p_main_str,bool * is_tp,struct perf_probe_event * pev)283 parse_prog_config(const char *config_str, const char **p_main_str,
284 bool *is_tp, struct perf_probe_event *pev)
285 {
286 int err;
287 const char *main_str = parse_prog_config_kvpair(config_str, pev);
288
289 if (IS_ERR(main_str))
290 return PTR_ERR(main_str);
291
292 *p_main_str = main_str;
293 if (!strchr(main_str, '=')) {
294 /* Is a tracepoint event? */
295 const char *s = strchr(main_str, ':');
296
297 if (!s) {
298 pr_debug("bpf: '%s' is not a valid tracepoint\n",
299 config_str);
300 return -BPF_LOADER_ERRNO__CONFIG;
301 }
302
303 *is_tp = true;
304 return 0;
305 }
306
307 *is_tp = false;
308 err = parse_perf_probe_command(main_str, pev);
309 if (err < 0) {
310 pr_debug("bpf: '%s' is not a valid config string\n",
311 config_str);
312 /* parse failed, don't need clear pev. */
313 return -BPF_LOADER_ERRNO__CONFIG;
314 }
315 return 0;
316 }
317
318 static int
config_bpf_program(struct bpf_program * prog)319 config_bpf_program(struct bpf_program *prog)
320 {
321 struct perf_probe_event *pev = NULL;
322 struct bpf_prog_priv *priv = NULL;
323 const char *config_str, *main_str;
324 bool is_tp = false;
325 int err;
326
327 /* Initialize per-program probing setting */
328 probe_conf.no_inlines = false;
329 probe_conf.force_add = false;
330
331 priv = calloc(sizeof(*priv), 1);
332 if (!priv) {
333 pr_debug("bpf: failed to alloc priv\n");
334 return -ENOMEM;
335 }
336 pev = &priv->pev;
337
338 config_str = bpf_program__section_name(prog);
339 pr_debug("bpf: config program '%s'\n", config_str);
340 err = parse_prog_config(config_str, &main_str, &is_tp, pev);
341 if (err)
342 goto errout;
343
344 if (is_tp) {
345 char *s = strchr(main_str, ':');
346
347 priv->is_tp = true;
348 priv->sys_name = strndup(main_str, s - main_str);
349 priv->evt_name = strdup(s + 1);
350 goto set_priv;
351 }
352
353 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
354 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
355 config_str, PERF_BPF_PROBE_GROUP);
356 err = -BPF_LOADER_ERRNO__GROUP;
357 goto errout;
358 } else if (!pev->group)
359 pev->group = strdup(PERF_BPF_PROBE_GROUP);
360
361 if (!pev->group) {
362 pr_debug("bpf: strdup failed\n");
363 err = -ENOMEM;
364 goto errout;
365 }
366
367 if (!pev->event) {
368 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
369 config_str);
370 err = -BPF_LOADER_ERRNO__EVENTNAME;
371 goto errout;
372 }
373 pr_debug("bpf: config '%s' is ok\n", config_str);
374
375 set_priv:
376 err = bpf_program__set_priv(prog, priv, clear_prog_priv);
377 if (err) {
378 pr_debug("Failed to set priv for program '%s'\n", config_str);
379 goto errout;
380 }
381
382 return 0;
383
384 errout:
385 if (pev)
386 clear_perf_probe_event(pev);
387 free(priv);
388 return err;
389 }
390
bpf__prepare_probe(void)391 static int bpf__prepare_probe(void)
392 {
393 static int err = 0;
394 static bool initialized = false;
395
396 /*
397 * Make err static, so if init failed the first, bpf__prepare_probe()
398 * fails each time without calling init_probe_symbol_maps multiple
399 * times.
400 */
401 if (initialized)
402 return err;
403
404 initialized = true;
405 err = init_probe_symbol_maps(false);
406 if (err < 0)
407 pr_debug("Failed to init_probe_symbol_maps\n");
408 probe_conf.max_probes = MAX_PROBES;
409 return err;
410 }
411
412 static int
preproc_gen_prologue(struct bpf_program * prog,int n,struct bpf_insn * orig_insns,int orig_insns_cnt,struct bpf_prog_prep_result * res)413 preproc_gen_prologue(struct bpf_program *prog, int n,
414 struct bpf_insn *orig_insns, int orig_insns_cnt,
415 struct bpf_prog_prep_result *res)
416 {
417 struct bpf_prog_priv *priv = bpf_program__priv(prog);
418 struct probe_trace_event *tev;
419 struct perf_probe_event *pev;
420 struct bpf_insn *buf;
421 size_t prologue_cnt = 0;
422 int i, err;
423
424 if (IS_ERR(priv) || !priv || priv->is_tp)
425 goto errout;
426
427 pev = &priv->pev;
428
429 if (n < 0 || n >= priv->nr_types)
430 goto errout;
431
432 /* Find a tev belongs to that type */
433 for (i = 0; i < pev->ntevs; i++) {
434 if (priv->type_mapping[i] == n)
435 break;
436 }
437
438 if (i >= pev->ntevs) {
439 pr_debug("Internal error: prologue type %d not found\n", n);
440 return -BPF_LOADER_ERRNO__PROLOGUE;
441 }
442
443 tev = &pev->tevs[i];
444
445 buf = priv->insns_buf;
446 err = bpf__gen_prologue(tev->args, tev->nargs,
447 buf, &prologue_cnt,
448 BPF_MAXINSNS - orig_insns_cnt);
449 if (err) {
450 const char *title;
451
452 title = bpf_program__section_name(prog);
453 pr_debug("Failed to generate prologue for program %s\n",
454 title);
455 return err;
456 }
457
458 memcpy(&buf[prologue_cnt], orig_insns,
459 sizeof(struct bpf_insn) * orig_insns_cnt);
460
461 res->new_insn_ptr = buf;
462 res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
463 res->pfd = NULL;
464 return 0;
465
466 errout:
467 pr_debug("Internal error in preproc_gen_prologue\n");
468 return -BPF_LOADER_ERRNO__PROLOGUE;
469 }
470
471 /*
472 * compare_tev_args is reflexive, transitive and antisymmetric.
473 * I can proof it but this margin is too narrow to contain.
474 */
compare_tev_args(const void * ptev1,const void * ptev2)475 static int compare_tev_args(const void *ptev1, const void *ptev2)
476 {
477 int i, ret;
478 const struct probe_trace_event *tev1 =
479 *(const struct probe_trace_event **)ptev1;
480 const struct probe_trace_event *tev2 =
481 *(const struct probe_trace_event **)ptev2;
482
483 ret = tev2->nargs - tev1->nargs;
484 if (ret)
485 return ret;
486
487 for (i = 0; i < tev1->nargs; i++) {
488 struct probe_trace_arg *arg1, *arg2;
489 struct probe_trace_arg_ref *ref1, *ref2;
490
491 arg1 = &tev1->args[i];
492 arg2 = &tev2->args[i];
493
494 ret = strcmp(arg1->value, arg2->value);
495 if (ret)
496 return ret;
497
498 ref1 = arg1->ref;
499 ref2 = arg2->ref;
500
501 while (ref1 && ref2) {
502 ret = ref2->offset - ref1->offset;
503 if (ret)
504 return ret;
505
506 ref1 = ref1->next;
507 ref2 = ref2->next;
508 }
509
510 if (ref1 || ref2)
511 return ref2 ? 1 : -1;
512 }
513
514 return 0;
515 }
516
517 /*
518 * Assign a type number to each tevs in a pev.
519 * mapping is an array with same slots as tevs in that pev.
520 * nr_types will be set to number of types.
521 */
map_prologue(struct perf_probe_event * pev,int * mapping,int * nr_types)522 static int map_prologue(struct perf_probe_event *pev, int *mapping,
523 int *nr_types)
524 {
525 int i, type = 0;
526 struct probe_trace_event **ptevs;
527
528 size_t array_sz = sizeof(*ptevs) * pev->ntevs;
529
530 ptevs = malloc(array_sz);
531 if (!ptevs) {
532 pr_debug("Not enough memory: alloc ptevs failed\n");
533 return -ENOMEM;
534 }
535
536 pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
537 for (i = 0; i < pev->ntevs; i++)
538 ptevs[i] = &pev->tevs[i];
539
540 qsort(ptevs, pev->ntevs, sizeof(*ptevs),
541 compare_tev_args);
542
543 for (i = 0; i < pev->ntevs; i++) {
544 int n;
545
546 n = ptevs[i] - pev->tevs;
547 if (i == 0) {
548 mapping[n] = type;
549 pr_debug("mapping[%d]=%d\n", n, type);
550 continue;
551 }
552
553 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
554 mapping[n] = type;
555 else
556 mapping[n] = ++type;
557
558 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
559 }
560 free(ptevs);
561 *nr_types = type + 1;
562
563 return 0;
564 }
565
hook_load_preprocessor(struct bpf_program * prog)566 static int hook_load_preprocessor(struct bpf_program *prog)
567 {
568 struct bpf_prog_priv *priv = bpf_program__priv(prog);
569 struct perf_probe_event *pev;
570 bool need_prologue = false;
571 int err, i;
572
573 if (IS_ERR(priv) || !priv) {
574 pr_debug("Internal error when hook preprocessor\n");
575 return -BPF_LOADER_ERRNO__INTERNAL;
576 }
577
578 if (priv->is_tp) {
579 priv->need_prologue = false;
580 return 0;
581 }
582
583 pev = &priv->pev;
584 for (i = 0; i < pev->ntevs; i++) {
585 struct probe_trace_event *tev = &pev->tevs[i];
586
587 if (tev->nargs > 0) {
588 need_prologue = true;
589 break;
590 }
591 }
592
593 /*
594 * Since all tevs don't have argument, we don't need generate
595 * prologue.
596 */
597 if (!need_prologue) {
598 priv->need_prologue = false;
599 return 0;
600 }
601
602 priv->need_prologue = true;
603 priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
604 if (!priv->insns_buf) {
605 pr_debug("Not enough memory: alloc insns_buf failed\n");
606 return -ENOMEM;
607 }
608
609 priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
610 if (!priv->type_mapping) {
611 pr_debug("Not enough memory: alloc type_mapping failed\n");
612 return -ENOMEM;
613 }
614 memset(priv->type_mapping, -1,
615 sizeof(int) * pev->ntevs);
616
617 err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
618 if (err)
619 return err;
620
621 err = bpf_program__set_prep(prog, priv->nr_types,
622 preproc_gen_prologue);
623 return err;
624 }
625
bpf__probe(struct bpf_object * obj)626 int bpf__probe(struct bpf_object *obj)
627 {
628 int err = 0;
629 struct bpf_program *prog;
630 struct bpf_prog_priv *priv;
631 struct perf_probe_event *pev;
632
633 err = bpf__prepare_probe();
634 if (err) {
635 pr_debug("bpf__prepare_probe failed\n");
636 return err;
637 }
638
639 bpf_object__for_each_program(prog, obj) {
640 err = config_bpf_program(prog);
641 if (err)
642 goto out;
643
644 priv = bpf_program__priv(prog);
645 if (IS_ERR(priv) || !priv) {
646 err = PTR_ERR(priv);
647 goto out;
648 }
649
650 if (priv->is_tp) {
651 bpf_program__set_tracepoint(prog);
652 continue;
653 }
654
655 bpf_program__set_kprobe(prog);
656 pev = &priv->pev;
657
658 err = convert_perf_probe_events(pev, 1);
659 if (err < 0) {
660 pr_debug("bpf_probe: failed to convert perf probe events\n");
661 goto out;
662 }
663
664 err = apply_perf_probe_events(pev, 1);
665 if (err < 0) {
666 pr_debug("bpf_probe: failed to apply perf probe events\n");
667 goto out;
668 }
669
670 /*
671 * After probing, let's consider prologue, which
672 * adds program fetcher to BPF programs.
673 *
674 * hook_load_preprocessorr() hooks pre-processor
675 * to bpf_program, let it generate prologue
676 * dynamically during loading.
677 */
678 err = hook_load_preprocessor(prog);
679 if (err)
680 goto out;
681 }
682 out:
683 return err < 0 ? err : 0;
684 }
685
686 #define EVENTS_WRITE_BUFSIZE 4096
bpf__unprobe(struct bpf_object * obj)687 int bpf__unprobe(struct bpf_object *obj)
688 {
689 int err, ret = 0;
690 struct bpf_program *prog;
691
692 bpf_object__for_each_program(prog, obj) {
693 struct bpf_prog_priv *priv = bpf_program__priv(prog);
694 int i;
695
696 if (IS_ERR(priv) || !priv || priv->is_tp)
697 continue;
698
699 for (i = 0; i < priv->pev.ntevs; i++) {
700 struct probe_trace_event *tev = &priv->pev.tevs[i];
701 char name_buf[EVENTS_WRITE_BUFSIZE];
702 struct strfilter *delfilter;
703
704 snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
705 "%s:%s", tev->group, tev->event);
706 name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
707
708 delfilter = strfilter__new(name_buf, NULL);
709 if (!delfilter) {
710 pr_debug("Failed to create filter for unprobing\n");
711 ret = -ENOMEM;
712 continue;
713 }
714
715 err = del_perf_probe_events(delfilter);
716 strfilter__delete(delfilter);
717 if (err) {
718 pr_debug("Failed to delete %s\n", name_buf);
719 ret = err;
720 continue;
721 }
722 }
723 }
724 return ret;
725 }
726
bpf__load(struct bpf_object * obj)727 int bpf__load(struct bpf_object *obj)
728 {
729 int err;
730
731 err = bpf_object__load(obj);
732 if (err) {
733 char bf[128];
734 libbpf_strerror(err, bf, sizeof(bf));
735 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
736 return err;
737 }
738 return 0;
739 }
740
bpf__foreach_event(struct bpf_object * obj,bpf_prog_iter_callback_t func,void * arg)741 int bpf__foreach_event(struct bpf_object *obj,
742 bpf_prog_iter_callback_t func,
743 void *arg)
744 {
745 struct bpf_program *prog;
746 int err;
747
748 bpf_object__for_each_program(prog, obj) {
749 struct bpf_prog_priv *priv = bpf_program__priv(prog);
750 struct probe_trace_event *tev;
751 struct perf_probe_event *pev;
752 int i, fd;
753
754 if (IS_ERR(priv) || !priv) {
755 pr_debug("bpf: failed to get private field\n");
756 return -BPF_LOADER_ERRNO__INTERNAL;
757 }
758
759 if (priv->is_tp) {
760 fd = bpf_program__fd(prog);
761 err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
762 if (err) {
763 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
764 return err;
765 }
766 continue;
767 }
768
769 pev = &priv->pev;
770 for (i = 0; i < pev->ntevs; i++) {
771 tev = &pev->tevs[i];
772
773 if (priv->need_prologue) {
774 int type = priv->type_mapping[i];
775
776 fd = bpf_program__nth_fd(prog, type);
777 } else {
778 fd = bpf_program__fd(prog);
779 }
780
781 if (fd < 0) {
782 pr_debug("bpf: failed to get file descriptor\n");
783 return fd;
784 }
785
786 err = (*func)(tev->group, tev->event, fd, obj, arg);
787 if (err) {
788 pr_debug("bpf: call back failed, stop iterate\n");
789 return err;
790 }
791 }
792 }
793 return 0;
794 }
795
796 enum bpf_map_op_type {
797 BPF_MAP_OP_SET_VALUE,
798 BPF_MAP_OP_SET_EVSEL,
799 };
800
801 enum bpf_map_key_type {
802 BPF_MAP_KEY_ALL,
803 BPF_MAP_KEY_RANGES,
804 };
805
806 struct bpf_map_op {
807 struct list_head list;
808 enum bpf_map_op_type op_type;
809 enum bpf_map_key_type key_type;
810 union {
811 struct parse_events_array array;
812 } k;
813 union {
814 u64 value;
815 struct evsel *evsel;
816 } v;
817 };
818
819 struct bpf_map_priv {
820 struct list_head ops_list;
821 };
822
823 static void
bpf_map_op__delete(struct bpf_map_op * op)824 bpf_map_op__delete(struct bpf_map_op *op)
825 {
826 if (!list_empty(&op->list))
827 list_del_init(&op->list);
828 if (op->key_type == BPF_MAP_KEY_RANGES)
829 parse_events__clear_array(&op->k.array);
830 free(op);
831 }
832
833 static void
bpf_map_priv__purge(struct bpf_map_priv * priv)834 bpf_map_priv__purge(struct bpf_map_priv *priv)
835 {
836 struct bpf_map_op *pos, *n;
837
838 list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
839 list_del_init(&pos->list);
840 bpf_map_op__delete(pos);
841 }
842 }
843
844 static void
bpf_map_priv__clear(struct bpf_map * map __maybe_unused,void * _priv)845 bpf_map_priv__clear(struct bpf_map *map __maybe_unused,
846 void *_priv)
847 {
848 struct bpf_map_priv *priv = _priv;
849
850 bpf_map_priv__purge(priv);
851 free(priv);
852 }
853
854 static int
bpf_map_op_setkey(struct bpf_map_op * op,struct parse_events_term * term)855 bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
856 {
857 op->key_type = BPF_MAP_KEY_ALL;
858 if (!term)
859 return 0;
860
861 if (term->array.nr_ranges) {
862 size_t memsz = term->array.nr_ranges *
863 sizeof(op->k.array.ranges[0]);
864
865 op->k.array.ranges = memdup(term->array.ranges, memsz);
866 if (!op->k.array.ranges) {
867 pr_debug("Not enough memory to alloc indices for map\n");
868 return -ENOMEM;
869 }
870 op->key_type = BPF_MAP_KEY_RANGES;
871 op->k.array.nr_ranges = term->array.nr_ranges;
872 }
873 return 0;
874 }
875
876 static struct bpf_map_op *
bpf_map_op__new(struct parse_events_term * term)877 bpf_map_op__new(struct parse_events_term *term)
878 {
879 struct bpf_map_op *op;
880 int err;
881
882 op = zalloc(sizeof(*op));
883 if (!op) {
884 pr_debug("Failed to alloc bpf_map_op\n");
885 return ERR_PTR(-ENOMEM);
886 }
887 INIT_LIST_HEAD(&op->list);
888
889 err = bpf_map_op_setkey(op, term);
890 if (err) {
891 free(op);
892 return ERR_PTR(err);
893 }
894 return op;
895 }
896
897 static struct bpf_map_op *
bpf_map_op__clone(struct bpf_map_op * op)898 bpf_map_op__clone(struct bpf_map_op *op)
899 {
900 struct bpf_map_op *newop;
901
902 newop = memdup(op, sizeof(*op));
903 if (!newop) {
904 pr_debug("Failed to alloc bpf_map_op\n");
905 return NULL;
906 }
907
908 INIT_LIST_HEAD(&newop->list);
909 if (op->key_type == BPF_MAP_KEY_RANGES) {
910 size_t memsz = op->k.array.nr_ranges *
911 sizeof(op->k.array.ranges[0]);
912
913 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
914 if (!newop->k.array.ranges) {
915 pr_debug("Failed to alloc indices for map\n");
916 free(newop);
917 return NULL;
918 }
919 }
920
921 return newop;
922 }
923
924 static struct bpf_map_priv *
bpf_map_priv__clone(struct bpf_map_priv * priv)925 bpf_map_priv__clone(struct bpf_map_priv *priv)
926 {
927 struct bpf_map_priv *newpriv;
928 struct bpf_map_op *pos, *newop;
929
930 newpriv = zalloc(sizeof(*newpriv));
931 if (!newpriv) {
932 pr_debug("Not enough memory to alloc map private\n");
933 return NULL;
934 }
935 INIT_LIST_HEAD(&newpriv->ops_list);
936
937 list_for_each_entry(pos, &priv->ops_list, list) {
938 newop = bpf_map_op__clone(pos);
939 if (!newop) {
940 bpf_map_priv__purge(newpriv);
941 return NULL;
942 }
943 list_add_tail(&newop->list, &newpriv->ops_list);
944 }
945
946 return newpriv;
947 }
948
949 static int
bpf_map__add_op(struct bpf_map * map,struct bpf_map_op * op)950 bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
951 {
952 const char *map_name = bpf_map__name(map);
953 struct bpf_map_priv *priv = bpf_map__priv(map);
954
955 if (IS_ERR(priv)) {
956 pr_debug("Failed to get private from map %s\n", map_name);
957 return PTR_ERR(priv);
958 }
959
960 if (!priv) {
961 priv = zalloc(sizeof(*priv));
962 if (!priv) {
963 pr_debug("Not enough memory to alloc map private\n");
964 return -ENOMEM;
965 }
966 INIT_LIST_HEAD(&priv->ops_list);
967
968 if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) {
969 free(priv);
970 return -BPF_LOADER_ERRNO__INTERNAL;
971 }
972 }
973
974 list_add_tail(&op->list, &priv->ops_list);
975 return 0;
976 }
977
978 static struct bpf_map_op *
bpf_map__add_newop(struct bpf_map * map,struct parse_events_term * term)979 bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
980 {
981 struct bpf_map_op *op;
982 int err;
983
984 op = bpf_map_op__new(term);
985 if (IS_ERR(op))
986 return op;
987
988 err = bpf_map__add_op(map, op);
989 if (err) {
990 bpf_map_op__delete(op);
991 return ERR_PTR(err);
992 }
993 return op;
994 }
995
996 static int
__bpf_map__config_value(struct bpf_map * map,struct parse_events_term * term)997 __bpf_map__config_value(struct bpf_map *map,
998 struct parse_events_term *term)
999 {
1000 struct bpf_map_op *op;
1001 const char *map_name = bpf_map__name(map);
1002 const struct bpf_map_def *def = bpf_map__def(map);
1003
1004 if (IS_ERR(def)) {
1005 pr_debug("Unable to get map definition from '%s'\n",
1006 map_name);
1007 return -BPF_LOADER_ERRNO__INTERNAL;
1008 }
1009
1010 if (def->type != BPF_MAP_TYPE_ARRAY) {
1011 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1012 map_name);
1013 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1014 }
1015 if (def->key_size < sizeof(unsigned int)) {
1016 pr_debug("Map %s has incorrect key size\n", map_name);
1017 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1018 }
1019 switch (def->value_size) {
1020 case 1:
1021 case 2:
1022 case 4:
1023 case 8:
1024 break;
1025 default:
1026 pr_debug("Map %s has incorrect value size\n", map_name);
1027 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1028 }
1029
1030 op = bpf_map__add_newop(map, term);
1031 if (IS_ERR(op))
1032 return PTR_ERR(op);
1033 op->op_type = BPF_MAP_OP_SET_VALUE;
1034 op->v.value = term->val.num;
1035 return 0;
1036 }
1037
1038 static int
bpf_map__config_value(struct bpf_map * map,struct parse_events_term * term,struct evlist * evlist __maybe_unused)1039 bpf_map__config_value(struct bpf_map *map,
1040 struct parse_events_term *term,
1041 struct evlist *evlist __maybe_unused)
1042 {
1043 if (!term->err_val) {
1044 pr_debug("Config value not set\n");
1045 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1046 }
1047
1048 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1049 pr_debug("ERROR: wrong value type for 'value'\n");
1050 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1051 }
1052
1053 return __bpf_map__config_value(map, term);
1054 }
1055
1056 static int
__bpf_map__config_event(struct bpf_map * map,struct parse_events_term * term,struct evlist * evlist)1057 __bpf_map__config_event(struct bpf_map *map,
1058 struct parse_events_term *term,
1059 struct evlist *evlist)
1060 {
1061 struct evsel *evsel;
1062 const struct bpf_map_def *def;
1063 struct bpf_map_op *op;
1064 const char *map_name = bpf_map__name(map);
1065
1066 evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str);
1067 if (!evsel) {
1068 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1069 map_name, term->val.str);
1070 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1071 }
1072
1073 def = bpf_map__def(map);
1074 if (IS_ERR(def)) {
1075 pr_debug("Unable to get map definition from '%s'\n",
1076 map_name);
1077 return PTR_ERR(def);
1078 }
1079
1080 /*
1081 * No need to check key_size and value_size:
1082 * kernel has already checked them.
1083 */
1084 if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1085 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1086 map_name);
1087 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1088 }
1089
1090 op = bpf_map__add_newop(map, term);
1091 if (IS_ERR(op))
1092 return PTR_ERR(op);
1093 op->op_type = BPF_MAP_OP_SET_EVSEL;
1094 op->v.evsel = evsel;
1095 return 0;
1096 }
1097
1098 static int
bpf_map__config_event(struct bpf_map * map,struct parse_events_term * term,struct evlist * evlist)1099 bpf_map__config_event(struct bpf_map *map,
1100 struct parse_events_term *term,
1101 struct evlist *evlist)
1102 {
1103 if (!term->err_val) {
1104 pr_debug("Config value not set\n");
1105 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1106 }
1107
1108 if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1109 pr_debug("ERROR: wrong value type for 'event'\n");
1110 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1111 }
1112
1113 return __bpf_map__config_event(map, term, evlist);
1114 }
1115
1116 struct bpf_obj_config__map_func {
1117 const char *config_opt;
1118 int (*config_func)(struct bpf_map *, struct parse_events_term *,
1119 struct evlist *);
1120 };
1121
1122 struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1123 {"value", bpf_map__config_value},
1124 {"event", bpf_map__config_event},
1125 };
1126
1127 static int
config_map_indices_range_check(struct parse_events_term * term,struct bpf_map * map,const char * map_name)1128 config_map_indices_range_check(struct parse_events_term *term,
1129 struct bpf_map *map,
1130 const char *map_name)
1131 {
1132 struct parse_events_array *array = &term->array;
1133 const struct bpf_map_def *def;
1134 unsigned int i;
1135
1136 if (!array->nr_ranges)
1137 return 0;
1138 if (!array->ranges) {
1139 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1140 map_name, (int)array->nr_ranges);
1141 return -BPF_LOADER_ERRNO__INTERNAL;
1142 }
1143
1144 def = bpf_map__def(map);
1145 if (IS_ERR(def)) {
1146 pr_debug("ERROR: Unable to get map definition from '%s'\n",
1147 map_name);
1148 return -BPF_LOADER_ERRNO__INTERNAL;
1149 }
1150
1151 for (i = 0; i < array->nr_ranges; i++) {
1152 unsigned int start = array->ranges[i].start;
1153 size_t length = array->ranges[i].length;
1154 unsigned int idx = start + length - 1;
1155
1156 if (idx >= def->max_entries) {
1157 pr_debug("ERROR: index %d too large\n", idx);
1158 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1159 }
1160 }
1161 return 0;
1162 }
1163
1164 static int
bpf__obj_config_map(struct bpf_object * obj,struct parse_events_term * term,struct evlist * evlist,int * key_scan_pos)1165 bpf__obj_config_map(struct bpf_object *obj,
1166 struct parse_events_term *term,
1167 struct evlist *evlist,
1168 int *key_scan_pos)
1169 {
1170 /* key is "map:<mapname>.<config opt>" */
1171 char *map_name = strdup(term->config + sizeof("map:") - 1);
1172 struct bpf_map *map;
1173 int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1174 char *map_opt;
1175 size_t i;
1176
1177 if (!map_name)
1178 return -ENOMEM;
1179
1180 map_opt = strchr(map_name, '.');
1181 if (!map_opt) {
1182 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1183 goto out;
1184 }
1185
1186 *map_opt++ = '\0';
1187 if (*map_opt == '\0') {
1188 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1189 goto out;
1190 }
1191
1192 map = bpf_object__find_map_by_name(obj, map_name);
1193 if (!map) {
1194 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1195 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1196 goto out;
1197 }
1198
1199 *key_scan_pos += strlen(map_opt);
1200 err = config_map_indices_range_check(term, map, map_name);
1201 if (err)
1202 goto out;
1203 *key_scan_pos -= strlen(map_opt);
1204
1205 for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1206 struct bpf_obj_config__map_func *func =
1207 &bpf_obj_config__map_funcs[i];
1208
1209 if (strcmp(map_opt, func->config_opt) == 0) {
1210 err = func->config_func(map, term, evlist);
1211 goto out;
1212 }
1213 }
1214
1215 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1216 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1217 out:
1218 if (!err)
1219 *key_scan_pos += strlen(map_opt);
1220
1221 free(map_name);
1222 return err;
1223 }
1224
bpf__config_obj(struct bpf_object * obj,struct parse_events_term * term,struct evlist * evlist,int * error_pos)1225 int bpf__config_obj(struct bpf_object *obj,
1226 struct parse_events_term *term,
1227 struct evlist *evlist,
1228 int *error_pos)
1229 {
1230 int key_scan_pos = 0;
1231 int err;
1232
1233 if (!obj || !term || !term->config)
1234 return -EINVAL;
1235
1236 if (strstarts(term->config, "map:")) {
1237 key_scan_pos = sizeof("map:") - 1;
1238 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1239 goto out;
1240 }
1241 err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1242 out:
1243 if (error_pos)
1244 *error_pos = key_scan_pos;
1245 return err;
1246
1247 }
1248
1249 typedef int (*map_config_func_t)(const char *name, int map_fd,
1250 const struct bpf_map_def *pdef,
1251 struct bpf_map_op *op,
1252 void *pkey, void *arg);
1253
1254 static int
foreach_key_array_all(map_config_func_t func,void * arg,const char * name,int map_fd,const struct bpf_map_def * pdef,struct bpf_map_op * op)1255 foreach_key_array_all(map_config_func_t func,
1256 void *arg, const char *name,
1257 int map_fd, const struct bpf_map_def *pdef,
1258 struct bpf_map_op *op)
1259 {
1260 unsigned int i;
1261 int err;
1262
1263 for (i = 0; i < pdef->max_entries; i++) {
1264 err = func(name, map_fd, pdef, op, &i, arg);
1265 if (err) {
1266 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1267 name, i);
1268 return err;
1269 }
1270 }
1271 return 0;
1272 }
1273
1274 static int
foreach_key_array_ranges(map_config_func_t func,void * arg,const char * name,int map_fd,const struct bpf_map_def * pdef,struct bpf_map_op * op)1275 foreach_key_array_ranges(map_config_func_t func, void *arg,
1276 const char *name, int map_fd,
1277 const struct bpf_map_def *pdef,
1278 struct bpf_map_op *op)
1279 {
1280 unsigned int i, j;
1281 int err;
1282
1283 for (i = 0; i < op->k.array.nr_ranges; i++) {
1284 unsigned int start = op->k.array.ranges[i].start;
1285 size_t length = op->k.array.ranges[i].length;
1286
1287 for (j = 0; j < length; j++) {
1288 unsigned int idx = start + j;
1289
1290 err = func(name, map_fd, pdef, op, &idx, arg);
1291 if (err) {
1292 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1293 name, idx);
1294 return err;
1295 }
1296 }
1297 }
1298 return 0;
1299 }
1300
1301 static int
bpf_map_config_foreach_key(struct bpf_map * map,map_config_func_t func,void * arg)1302 bpf_map_config_foreach_key(struct bpf_map *map,
1303 map_config_func_t func,
1304 void *arg)
1305 {
1306 int err, map_fd;
1307 struct bpf_map_op *op;
1308 const struct bpf_map_def *def;
1309 const char *name = bpf_map__name(map);
1310 struct bpf_map_priv *priv = bpf_map__priv(map);
1311
1312 if (IS_ERR(priv)) {
1313 pr_debug("ERROR: failed to get private from map %s\n", name);
1314 return -BPF_LOADER_ERRNO__INTERNAL;
1315 }
1316 if (!priv || list_empty(&priv->ops_list)) {
1317 pr_debug("INFO: nothing to config for map %s\n", name);
1318 return 0;
1319 }
1320
1321 def = bpf_map__def(map);
1322 if (IS_ERR(def)) {
1323 pr_debug("ERROR: failed to get definition from map %s\n", name);
1324 return -BPF_LOADER_ERRNO__INTERNAL;
1325 }
1326 map_fd = bpf_map__fd(map);
1327 if (map_fd < 0) {
1328 pr_debug("ERROR: failed to get fd from map %s\n", name);
1329 return map_fd;
1330 }
1331
1332 list_for_each_entry(op, &priv->ops_list, list) {
1333 switch (def->type) {
1334 case BPF_MAP_TYPE_ARRAY:
1335 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1336 switch (op->key_type) {
1337 case BPF_MAP_KEY_ALL:
1338 err = foreach_key_array_all(func, arg, name,
1339 map_fd, def, op);
1340 break;
1341 case BPF_MAP_KEY_RANGES:
1342 err = foreach_key_array_ranges(func, arg, name,
1343 map_fd, def,
1344 op);
1345 break;
1346 default:
1347 pr_debug("ERROR: keytype for map '%s' invalid\n",
1348 name);
1349 return -BPF_LOADER_ERRNO__INTERNAL;
1350 }
1351 if (err)
1352 return err;
1353 break;
1354 default:
1355 pr_debug("ERROR: type of '%s' incorrect\n", name);
1356 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1357 }
1358 }
1359
1360 return 0;
1361 }
1362
1363 static int
apply_config_value_for_key(int map_fd,void * pkey,size_t val_size,u64 val)1364 apply_config_value_for_key(int map_fd, void *pkey,
1365 size_t val_size, u64 val)
1366 {
1367 int err = 0;
1368
1369 switch (val_size) {
1370 case 1: {
1371 u8 _val = (u8)(val);
1372 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1373 break;
1374 }
1375 case 2: {
1376 u16 _val = (u16)(val);
1377 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1378 break;
1379 }
1380 case 4: {
1381 u32 _val = (u32)(val);
1382 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1383 break;
1384 }
1385 case 8: {
1386 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1387 break;
1388 }
1389 default:
1390 pr_debug("ERROR: invalid value size\n");
1391 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1392 }
1393 if (err && errno)
1394 err = -errno;
1395 return err;
1396 }
1397
1398 static int
apply_config_evsel_for_key(const char * name,int map_fd,void * pkey,struct evsel * evsel)1399 apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1400 struct evsel *evsel)
1401 {
1402 struct xyarray *xy = evsel->core.fd;
1403 struct perf_event_attr *attr;
1404 unsigned int key, events;
1405 bool check_pass = false;
1406 int *evt_fd;
1407 int err;
1408
1409 if (!xy) {
1410 pr_debug("ERROR: evsel not ready for map %s\n", name);
1411 return -BPF_LOADER_ERRNO__INTERNAL;
1412 }
1413
1414 if (xy->row_size / xy->entry_size != 1) {
1415 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1416 name);
1417 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1418 }
1419
1420 attr = &evsel->core.attr;
1421 if (attr->inherit) {
1422 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1423 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1424 }
1425
1426 if (evsel__is_bpf_output(evsel))
1427 check_pass = true;
1428 if (attr->type == PERF_TYPE_RAW)
1429 check_pass = true;
1430 if (attr->type == PERF_TYPE_HARDWARE)
1431 check_pass = true;
1432 if (!check_pass) {
1433 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1434 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1435 }
1436
1437 events = xy->entries / (xy->row_size / xy->entry_size);
1438 key = *((unsigned int *)pkey);
1439 if (key >= events) {
1440 pr_debug("ERROR: there is no event %d for map %s\n",
1441 key, name);
1442 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1443 }
1444 evt_fd = xyarray__entry(xy, key, 0);
1445 err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1446 if (err && errno)
1447 err = -errno;
1448 return err;
1449 }
1450
1451 static int
apply_obj_config_map_for_key(const char * name,int map_fd,const struct bpf_map_def * pdef,struct bpf_map_op * op,void * pkey,void * arg __maybe_unused)1452 apply_obj_config_map_for_key(const char *name, int map_fd,
1453 const struct bpf_map_def *pdef,
1454 struct bpf_map_op *op,
1455 void *pkey, void *arg __maybe_unused)
1456 {
1457 int err;
1458
1459 switch (op->op_type) {
1460 case BPF_MAP_OP_SET_VALUE:
1461 err = apply_config_value_for_key(map_fd, pkey,
1462 pdef->value_size,
1463 op->v.value);
1464 break;
1465 case BPF_MAP_OP_SET_EVSEL:
1466 err = apply_config_evsel_for_key(name, map_fd, pkey,
1467 op->v.evsel);
1468 break;
1469 default:
1470 pr_debug("ERROR: unknown value type for '%s'\n", name);
1471 err = -BPF_LOADER_ERRNO__INTERNAL;
1472 }
1473 return err;
1474 }
1475
1476 static int
apply_obj_config_map(struct bpf_map * map)1477 apply_obj_config_map(struct bpf_map *map)
1478 {
1479 return bpf_map_config_foreach_key(map,
1480 apply_obj_config_map_for_key,
1481 NULL);
1482 }
1483
1484 static int
apply_obj_config_object(struct bpf_object * obj)1485 apply_obj_config_object(struct bpf_object *obj)
1486 {
1487 struct bpf_map *map;
1488 int err;
1489
1490 bpf_object__for_each_map(map, obj) {
1491 err = apply_obj_config_map(map);
1492 if (err)
1493 return err;
1494 }
1495 return 0;
1496 }
1497
bpf__apply_obj_config(void)1498 int bpf__apply_obj_config(void)
1499 {
1500 struct bpf_object *obj, *tmp;
1501 int err;
1502
1503 bpf_object__for_each_safe(obj, tmp) {
1504 err = apply_obj_config_object(obj);
1505 if (err)
1506 return err;
1507 }
1508
1509 return 0;
1510 }
1511
1512 #define bpf__for_each_map(pos, obj, objtmp) \
1513 bpf_object__for_each_safe(obj, objtmp) \
1514 bpf_object__for_each_map(pos, obj)
1515
1516 #define bpf__for_each_map_named(pos, obj, objtmp, name) \
1517 bpf__for_each_map(pos, obj, objtmp) \
1518 if (bpf_map__name(pos) && \
1519 (strcmp(name, \
1520 bpf_map__name(pos)) == 0))
1521
bpf__setup_output_event(struct evlist * evlist,const char * name)1522 struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
1523 {
1524 struct bpf_map_priv *tmpl_priv = NULL;
1525 struct bpf_object *obj, *tmp;
1526 struct evsel *evsel = NULL;
1527 struct bpf_map *map;
1528 int err;
1529 bool need_init = false;
1530
1531 bpf__for_each_map_named(map, obj, tmp, name) {
1532 struct bpf_map_priv *priv = bpf_map__priv(map);
1533
1534 if (IS_ERR(priv))
1535 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1536
1537 /*
1538 * No need to check map type: type should have been
1539 * verified by kernel.
1540 */
1541 if (!need_init && !priv)
1542 need_init = !priv;
1543 if (!tmpl_priv && priv)
1544 tmpl_priv = priv;
1545 }
1546
1547 if (!need_init)
1548 return NULL;
1549
1550 if (!tmpl_priv) {
1551 char *event_definition = NULL;
1552
1553 if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
1554 return ERR_PTR(-ENOMEM);
1555
1556 err = parse_events(evlist, event_definition, NULL);
1557 free(event_definition);
1558
1559 if (err) {
1560 pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
1561 return ERR_PTR(-err);
1562 }
1563
1564 evsel = evlist__last(evlist);
1565 }
1566
1567 bpf__for_each_map_named(map, obj, tmp, name) {
1568 struct bpf_map_priv *priv = bpf_map__priv(map);
1569
1570 if (IS_ERR(priv))
1571 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1572 if (priv)
1573 continue;
1574
1575 if (tmpl_priv) {
1576 priv = bpf_map_priv__clone(tmpl_priv);
1577 if (!priv)
1578 return ERR_PTR(-ENOMEM);
1579
1580 err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
1581 if (err) {
1582 bpf_map_priv__clear(map, priv);
1583 return ERR_PTR(err);
1584 }
1585 } else if (evsel) {
1586 struct bpf_map_op *op;
1587
1588 op = bpf_map__add_newop(map, NULL);
1589 if (IS_ERR(op))
1590 return ERR_CAST(op);
1591 op->op_type = BPF_MAP_OP_SET_EVSEL;
1592 op->v.evsel = evsel;
1593 }
1594 }
1595
1596 return evsel;
1597 }
1598
bpf__setup_stdout(struct evlist * evlist)1599 int bpf__setup_stdout(struct evlist *evlist)
1600 {
1601 struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
1602 return PTR_ERR_OR_ZERO(evsel);
1603 }
1604
1605 #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
1606 #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1607 #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1608
1609 static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1610 [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
1611 [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
1612 [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
1613 [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
1614 [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
1615 [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1616 [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
1617 [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
1618 [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
1619 [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
1620 [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
1621 [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
1622 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
1623 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
1624 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
1625 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
1626 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1627 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
1628 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
1629 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
1630 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
1631 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
1632 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
1633 };
1634
1635 static int
bpf_loader_strerror(int err,char * buf,size_t size)1636 bpf_loader_strerror(int err, char *buf, size_t size)
1637 {
1638 char sbuf[STRERR_BUFSIZE];
1639 const char *msg;
1640
1641 if (!buf || !size)
1642 return -1;
1643
1644 err = err > 0 ? err : -err;
1645
1646 if (err >= __LIBBPF_ERRNO__START)
1647 return libbpf_strerror(err, buf, size);
1648
1649 if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1650 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1651 snprintf(buf, size, "%s", msg);
1652 buf[size - 1] = '\0';
1653 return 0;
1654 }
1655
1656 if (err >= __BPF_LOADER_ERRNO__END)
1657 snprintf(buf, size, "Unknown bpf loader error %d", err);
1658 else
1659 snprintf(buf, size, "%s",
1660 str_error_r(err, sbuf, sizeof(sbuf)));
1661
1662 buf[size - 1] = '\0';
1663 return -1;
1664 }
1665
1666 #define bpf__strerror_head(err, buf, size) \
1667 char sbuf[STRERR_BUFSIZE], *emsg;\
1668 if (!size)\
1669 return 0;\
1670 if (err < 0)\
1671 err = -err;\
1672 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1673 emsg = sbuf;\
1674 switch (err) {\
1675 default:\
1676 scnprintf(buf, size, "%s", emsg);\
1677 break;
1678
1679 #define bpf__strerror_entry(val, fmt...)\
1680 case val: {\
1681 scnprintf(buf, size, fmt);\
1682 break;\
1683 }
1684
1685 #define bpf__strerror_end(buf, size)\
1686 }\
1687 buf[size - 1] = '\0';
1688
bpf__strerror_prepare_load(const char * filename,bool source,int err,char * buf,size_t size)1689 int bpf__strerror_prepare_load(const char *filename, bool source,
1690 int err, char *buf, size_t size)
1691 {
1692 size_t n;
1693 int ret;
1694
1695 n = snprintf(buf, size, "Failed to load %s%s: ",
1696 filename, source ? " from source" : "");
1697 if (n >= size) {
1698 buf[size - 1] = '\0';
1699 return 0;
1700 }
1701 buf += n;
1702 size -= n;
1703
1704 ret = bpf_loader_strerror(err, buf, size);
1705 buf[size - 1] = '\0';
1706 return ret;
1707 }
1708
bpf__strerror_probe(struct bpf_object * obj __maybe_unused,int err,char * buf,size_t size)1709 int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
1710 int err, char *buf, size_t size)
1711 {
1712 bpf__strerror_head(err, buf, size);
1713 case BPF_LOADER_ERRNO__PROGCONF_TERM: {
1714 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
1715 break;
1716 }
1717 bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
1718 bpf__strerror_entry(EACCES, "You need to be root");
1719 bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
1720 bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
1721 bpf__strerror_end(buf, size);
1722 return 0;
1723 }
1724
bpf__strerror_load(struct bpf_object * obj,int err,char * buf,size_t size)1725 int bpf__strerror_load(struct bpf_object *obj,
1726 int err, char *buf, size_t size)
1727 {
1728 bpf__strerror_head(err, buf, size);
1729 case LIBBPF_ERRNO__KVER: {
1730 unsigned int obj_kver = bpf_object__kversion(obj);
1731 unsigned int real_kver;
1732
1733 if (fetch_kernel_version(&real_kver, NULL, 0)) {
1734 scnprintf(buf, size, "Unable to fetch kernel version");
1735 break;
1736 }
1737
1738 if (obj_kver != real_kver) {
1739 scnprintf(buf, size,
1740 "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
1741 KVER_PARAM(obj_kver),
1742 KVER_PARAM(real_kver));
1743 break;
1744 }
1745
1746 scnprintf(buf, size, "Failed to load program for unknown reason");
1747 break;
1748 }
1749 bpf__strerror_end(buf, size);
1750 return 0;
1751 }
1752
bpf__strerror_config_obj(struct bpf_object * obj __maybe_unused,struct parse_events_term * term __maybe_unused,struct evlist * evlist __maybe_unused,int * error_pos __maybe_unused,int err,char * buf,size_t size)1753 int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
1754 struct parse_events_term *term __maybe_unused,
1755 struct evlist *evlist __maybe_unused,
1756 int *error_pos __maybe_unused, int err,
1757 char *buf, size_t size)
1758 {
1759 bpf__strerror_head(err, buf, size);
1760 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
1761 "Can't use this config term with this map type");
1762 bpf__strerror_end(buf, size);
1763 return 0;
1764 }
1765
bpf__strerror_apply_obj_config(int err,char * buf,size_t size)1766 int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
1767 {
1768 bpf__strerror_head(err, buf, size);
1769 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
1770 "Cannot set event to BPF map in multi-thread tracing");
1771 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
1772 "%s (Hint: use -i to turn off inherit)", emsg);
1773 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
1774 "Can only put raw, hardware and BPF output event into a BPF map");
1775 bpf__strerror_end(buf, size);
1776 return 0;
1777 }
1778
bpf__strerror_setup_output_event(struct evlist * evlist __maybe_unused,int err,char * buf,size_t size)1779 int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
1780 int err, char *buf, size_t size)
1781 {
1782 bpf__strerror_head(err, buf, size);
1783 bpf__strerror_end(buf, size);
1784 return 0;
1785 }
1786