• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include "cpumap.h"
3 #include "debug.h"
4 #include "env.h"
5 #include "util/header.h"
6 #include <linux/ctype.h>
7 #include <linux/zalloc.h>
8 #include "cgroup.h"
9 #include <errno.h>
10 #include <sys/utsname.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include "strbuf.h"
14 
15 struct perf_env perf_env;
16 
17 #ifdef HAVE_LIBBPF_SUPPORT
18 #include "bpf-event.h"
19 #include "bpf-utils.h"
20 #include <bpf/libbpf.h>
21 
perf_env__insert_bpf_prog_info(struct perf_env * env,struct bpf_prog_info_node * info_node)22 void perf_env__insert_bpf_prog_info(struct perf_env *env,
23 				    struct bpf_prog_info_node *info_node)
24 {
25 	down_write(&env->bpf_progs.lock);
26 	__perf_env__insert_bpf_prog_info(env, info_node);
27 	up_write(&env->bpf_progs.lock);
28 }
29 
__perf_env__insert_bpf_prog_info(struct perf_env * env,struct bpf_prog_info_node * info_node)30 void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
31 {
32 	__u32 prog_id = info_node->info_linear->info.id;
33 	struct bpf_prog_info_node *node;
34 	struct rb_node *parent = NULL;
35 	struct rb_node **p;
36 
37 	p = &env->bpf_progs.infos.rb_node;
38 
39 	while (*p != NULL) {
40 		parent = *p;
41 		node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
42 		if (prog_id < node->info_linear->info.id) {
43 			p = &(*p)->rb_left;
44 		} else if (prog_id > node->info_linear->info.id) {
45 			p = &(*p)->rb_right;
46 		} else {
47 			pr_debug("duplicated bpf prog info %u\n", prog_id);
48 			return;
49 		}
50 	}
51 
52 	rb_link_node(&info_node->rb_node, parent, p);
53 	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
54 	env->bpf_progs.infos_cnt++;
55 }
56 
perf_env__find_bpf_prog_info(struct perf_env * env,__u32 prog_id)57 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
58 							__u32 prog_id)
59 {
60 	struct bpf_prog_info_node *node = NULL;
61 	struct rb_node *n;
62 
63 	down_read(&env->bpf_progs.lock);
64 	n = env->bpf_progs.infos.rb_node;
65 
66 	while (n) {
67 		node = rb_entry(n, struct bpf_prog_info_node, rb_node);
68 		if (prog_id < node->info_linear->info.id)
69 			n = n->rb_left;
70 		else if (prog_id > node->info_linear->info.id)
71 			n = n->rb_right;
72 		else
73 			goto out;
74 	}
75 	node = NULL;
76 
77 out:
78 	up_read(&env->bpf_progs.lock);
79 	return node;
80 }
81 
perf_env__insert_btf(struct perf_env * env,struct btf_node * btf_node)82 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
83 {
84 	bool ret;
85 
86 	down_write(&env->bpf_progs.lock);
87 	ret = __perf_env__insert_btf(env, btf_node);
88 	up_write(&env->bpf_progs.lock);
89 	return ret;
90 }
91 
__perf_env__insert_btf(struct perf_env * env,struct btf_node * btf_node)92 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
93 {
94 	struct rb_node *parent = NULL;
95 	__u32 btf_id = btf_node->id;
96 	struct btf_node *node;
97 	struct rb_node **p;
98 
99 	p = &env->bpf_progs.btfs.rb_node;
100 
101 	while (*p != NULL) {
102 		parent = *p;
103 		node = rb_entry(parent, struct btf_node, rb_node);
104 		if (btf_id < node->id) {
105 			p = &(*p)->rb_left;
106 		} else if (btf_id > node->id) {
107 			p = &(*p)->rb_right;
108 		} else {
109 			pr_debug("duplicated btf %u\n", btf_id);
110 			return false;
111 		}
112 	}
113 
114 	rb_link_node(&btf_node->rb_node, parent, p);
115 	rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
116 	env->bpf_progs.btfs_cnt++;
117 	return true;
118 }
119 
perf_env__find_btf(struct perf_env * env,__u32 btf_id)120 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
121 {
122 	struct btf_node *res;
123 
124 	down_read(&env->bpf_progs.lock);
125 	res = __perf_env__find_btf(env, btf_id);
126 	up_read(&env->bpf_progs.lock);
127 	return res;
128 }
129 
__perf_env__find_btf(struct perf_env * env,__u32 btf_id)130 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
131 {
132 	struct btf_node *node = NULL;
133 	struct rb_node *n;
134 
135 	n = env->bpf_progs.btfs.rb_node;
136 
137 	while (n) {
138 		node = rb_entry(n, struct btf_node, rb_node);
139 		if (btf_id < node->id)
140 			n = n->rb_left;
141 		else if (btf_id > node->id)
142 			n = n->rb_right;
143 		else
144 			return node;
145 	}
146 	return NULL;
147 }
148 
149 /* purge data in bpf_progs.infos tree */
perf_env__purge_bpf(struct perf_env * env)150 static void perf_env__purge_bpf(struct perf_env *env)
151 {
152 	struct rb_root *root;
153 	struct rb_node *next;
154 
155 	down_write(&env->bpf_progs.lock);
156 
157 	root = &env->bpf_progs.infos;
158 	next = rb_first(root);
159 
160 	while (next) {
161 		struct bpf_prog_info_node *node;
162 
163 		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
164 		next = rb_next(&node->rb_node);
165 		rb_erase(&node->rb_node, root);
166 		free(node->info_linear);
167 		free(node);
168 	}
169 
170 	env->bpf_progs.infos_cnt = 0;
171 
172 	root = &env->bpf_progs.btfs;
173 	next = rb_first(root);
174 
175 	while (next) {
176 		struct btf_node *node;
177 
178 		node = rb_entry(next, struct btf_node, rb_node);
179 		next = rb_next(&node->rb_node);
180 		rb_erase(&node->rb_node, root);
181 		free(node);
182 	}
183 
184 	env->bpf_progs.btfs_cnt = 0;
185 
186 	up_write(&env->bpf_progs.lock);
187 }
188 #else // HAVE_LIBBPF_SUPPORT
perf_env__purge_bpf(struct perf_env * env __maybe_unused)189 static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
190 {
191 }
192 #endif // HAVE_LIBBPF_SUPPORT
193 
perf_env__exit(struct perf_env * env)194 void perf_env__exit(struct perf_env *env)
195 {
196 	int i, j;
197 
198 	perf_env__purge_bpf(env);
199 	perf_env__purge_cgroups(env);
200 	zfree(&env->hostname);
201 	zfree(&env->os_release);
202 	zfree(&env->version);
203 	zfree(&env->arch);
204 	zfree(&env->cpu_desc);
205 	zfree(&env->cpuid);
206 	zfree(&env->cmdline);
207 	zfree(&env->cmdline_argv);
208 	zfree(&env->sibling_dies);
209 	zfree(&env->sibling_cores);
210 	zfree(&env->sibling_threads);
211 	zfree(&env->pmu_mappings);
212 	zfree(&env->cpu);
213 	for (i = 0; i < env->nr_cpu_pmu_caps; i++)
214 		zfree(&env->cpu_pmu_caps[i]);
215 	zfree(&env->cpu_pmu_caps);
216 	zfree(&env->numa_map);
217 
218 	for (i = 0; i < env->nr_numa_nodes; i++)
219 		perf_cpu_map__put(env->numa_nodes[i].map);
220 	zfree(&env->numa_nodes);
221 
222 	for (i = 0; i < env->caches_cnt; i++)
223 		cpu_cache_level__free(&env->caches[i]);
224 	zfree(&env->caches);
225 
226 	for (i = 0; i < env->nr_memory_nodes; i++)
227 		zfree(&env->memory_nodes[i].set);
228 	zfree(&env->memory_nodes);
229 
230 	for (i = 0; i < env->nr_hybrid_nodes; i++) {
231 		zfree(&env->hybrid_nodes[i].pmu_name);
232 		zfree(&env->hybrid_nodes[i].cpus);
233 	}
234 	zfree(&env->hybrid_nodes);
235 
236 	for (i = 0; i < env->nr_pmus_with_caps; i++) {
237 		for (j = 0; j < env->pmu_caps[i].nr_caps; j++)
238 			zfree(&env->pmu_caps[i].caps[j]);
239 		zfree(&env->pmu_caps[i].caps);
240 		zfree(&env->pmu_caps[i].pmu_name);
241 	}
242 	zfree(&env->pmu_caps);
243 }
244 
perf_env__init(struct perf_env * env)245 void perf_env__init(struct perf_env *env)
246 {
247 #ifdef HAVE_LIBBPF_SUPPORT
248 	env->bpf_progs.infos = RB_ROOT;
249 	env->bpf_progs.btfs = RB_ROOT;
250 	init_rwsem(&env->bpf_progs.lock);
251 #endif
252 	env->kernel_is_64_bit = -1;
253 }
254 
perf_env__init_kernel_mode(struct perf_env * env)255 static void perf_env__init_kernel_mode(struct perf_env *env)
256 {
257 	const char *arch = perf_env__raw_arch(env);
258 
259 	if (!strncmp(arch, "x86_64", 6) || !strncmp(arch, "aarch64", 7) ||
260 	    !strncmp(arch, "arm64", 5) || !strncmp(arch, "mips64", 6) ||
261 	    !strncmp(arch, "parisc64", 8) || !strncmp(arch, "riscv64", 7) ||
262 	    !strncmp(arch, "s390x", 5) || !strncmp(arch, "sparc64", 7))
263 		env->kernel_is_64_bit = 1;
264 	else
265 		env->kernel_is_64_bit = 0;
266 }
267 
perf_env__kernel_is_64_bit(struct perf_env * env)268 int perf_env__kernel_is_64_bit(struct perf_env *env)
269 {
270 	if (env->kernel_is_64_bit == -1)
271 		perf_env__init_kernel_mode(env);
272 
273 	return env->kernel_is_64_bit;
274 }
275 
perf_env__set_cmdline(struct perf_env * env,int argc,const char * argv[])276 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
277 {
278 	int i;
279 
280 	/* do not include NULL termination */
281 	env->cmdline_argv = calloc(argc, sizeof(char *));
282 	if (env->cmdline_argv == NULL)
283 		goto out_enomem;
284 
285 	/*
286 	 * Must copy argv contents because it gets moved around during option
287 	 * parsing:
288 	 */
289 	for (i = 0; i < argc ; i++) {
290 		env->cmdline_argv[i] = argv[i];
291 		if (env->cmdline_argv[i] == NULL)
292 			goto out_free;
293 	}
294 
295 	env->nr_cmdline = argc;
296 
297 	return 0;
298 out_free:
299 	zfree(&env->cmdline_argv);
300 out_enomem:
301 	return -ENOMEM;
302 }
303 
perf_env__read_cpu_topology_map(struct perf_env * env)304 int perf_env__read_cpu_topology_map(struct perf_env *env)
305 {
306 	int idx, nr_cpus;
307 
308 	if (env->cpu != NULL)
309 		return 0;
310 
311 	if (env->nr_cpus_avail == 0)
312 		env->nr_cpus_avail = cpu__max_present_cpu().cpu;
313 
314 	nr_cpus = env->nr_cpus_avail;
315 	if (nr_cpus == -1)
316 		return -EINVAL;
317 
318 	env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
319 	if (env->cpu == NULL)
320 		return -ENOMEM;
321 
322 	for (idx = 0; idx < nr_cpus; ++idx) {
323 		struct perf_cpu cpu = { .cpu = idx };
324 
325 		env->cpu[idx].core_id	= cpu__get_core_id(cpu);
326 		env->cpu[idx].socket_id	= cpu__get_socket_id(cpu);
327 		env->cpu[idx].die_id	= cpu__get_die_id(cpu);
328 	}
329 
330 	env->nr_cpus_avail = nr_cpus;
331 	return 0;
332 }
333 
perf_env__read_pmu_mappings(struct perf_env * env)334 int perf_env__read_pmu_mappings(struct perf_env *env)
335 {
336 	struct perf_pmu *pmu = NULL;
337 	u32 pmu_num = 0;
338 	struct strbuf sb;
339 
340 	while ((pmu = perf_pmu__scan(pmu))) {
341 		if (!pmu->name)
342 			continue;
343 		pmu_num++;
344 	}
345 	if (!pmu_num) {
346 		pr_debug("pmu mappings not available\n");
347 		return -ENOENT;
348 	}
349 	env->nr_pmu_mappings = pmu_num;
350 
351 	if (strbuf_init(&sb, 128 * pmu_num) < 0)
352 		return -ENOMEM;
353 
354 	while ((pmu = perf_pmu__scan(pmu))) {
355 		if (!pmu->name)
356 			continue;
357 		if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
358 			goto error;
359 		/* include a NULL character at the end */
360 		if (strbuf_add(&sb, "", 1) < 0)
361 			goto error;
362 	}
363 
364 	env->pmu_mappings = strbuf_detach(&sb, NULL);
365 
366 	return 0;
367 
368 error:
369 	strbuf_release(&sb);
370 	return -1;
371 }
372 
perf_env__read_cpuid(struct perf_env * env)373 int perf_env__read_cpuid(struct perf_env *env)
374 {
375 	char cpuid[128];
376 	int err = get_cpuid(cpuid, sizeof(cpuid));
377 
378 	if (err)
379 		return err;
380 
381 	free(env->cpuid);
382 	env->cpuid = strdup(cpuid);
383 	if (env->cpuid == NULL)
384 		return ENOMEM;
385 	return 0;
386 }
387 
perf_env__read_arch(struct perf_env * env)388 static int perf_env__read_arch(struct perf_env *env)
389 {
390 	struct utsname uts;
391 
392 	if (env->arch)
393 		return 0;
394 
395 	if (!uname(&uts))
396 		env->arch = strdup(uts.machine);
397 
398 	return env->arch ? 0 : -ENOMEM;
399 }
400 
perf_env__read_nr_cpus_avail(struct perf_env * env)401 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
402 {
403 	if (env->nr_cpus_avail == 0)
404 		env->nr_cpus_avail = cpu__max_present_cpu().cpu;
405 
406 	return env->nr_cpus_avail ? 0 : -ENOENT;
407 }
408 
perf_env__raw_arch(struct perf_env * env)409 const char *perf_env__raw_arch(struct perf_env *env)
410 {
411 	return env && !perf_env__read_arch(env) ? env->arch : "unknown";
412 }
413 
perf_env__nr_cpus_avail(struct perf_env * env)414 int perf_env__nr_cpus_avail(struct perf_env *env)
415 {
416 	return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
417 }
418 
cpu_cache_level__free(struct cpu_cache_level * cache)419 void cpu_cache_level__free(struct cpu_cache_level *cache)
420 {
421 	zfree(&cache->type);
422 	zfree(&cache->map);
423 	zfree(&cache->size);
424 }
425 
426 /*
427  * Return architecture name in a normalized form.
428  * The conversion logic comes from the Makefile.
429  */
normalize_arch(char * arch)430 static const char *normalize_arch(char *arch)
431 {
432 	if (!strcmp(arch, "x86_64"))
433 		return "x86";
434 	if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
435 		return "x86";
436 	if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
437 		return "sparc";
438 	if (!strncmp(arch, "aarch64", 7) || !strncmp(arch, "arm64", 5))
439 		return "arm64";
440 	if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
441 		return "arm";
442 	if (!strncmp(arch, "s390", 4))
443 		return "s390";
444 	if (!strncmp(arch, "parisc", 6))
445 		return "parisc";
446 	if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
447 		return "powerpc";
448 	if (!strncmp(arch, "mips", 4))
449 		return "mips";
450 	if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
451 		return "sh";
452 
453 	return arch;
454 }
455 
perf_env__arch(struct perf_env * env)456 const char *perf_env__arch(struct perf_env *env)
457 {
458 	char *arch_name;
459 
460 	if (!env || !env->arch) { /* Assume local operation */
461 		static struct utsname uts = { .machine[0] = '\0', };
462 		if (uts.machine[0] == '\0' && uname(&uts) < 0)
463 			return NULL;
464 		arch_name = uts.machine;
465 	} else
466 		arch_name = env->arch;
467 
468 	return normalize_arch(arch_name);
469 }
470 
perf_env__cpuid(struct perf_env * env)471 const char *perf_env__cpuid(struct perf_env *env)
472 {
473 	int status;
474 
475 	if (!env || !env->cpuid) { /* Assume local operation */
476 		status = perf_env__read_cpuid(env);
477 		if (status)
478 			return NULL;
479 	}
480 
481 	return env->cpuid;
482 }
483 
perf_env__nr_pmu_mappings(struct perf_env * env)484 int perf_env__nr_pmu_mappings(struct perf_env *env)
485 {
486 	int status;
487 
488 	if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
489 		status = perf_env__read_pmu_mappings(env);
490 		if (status)
491 			return 0;
492 	}
493 
494 	return env->nr_pmu_mappings;
495 }
496 
perf_env__pmu_mappings(struct perf_env * env)497 const char *perf_env__pmu_mappings(struct perf_env *env)
498 {
499 	int status;
500 
501 	if (!env || !env->pmu_mappings) { /* Assume local operation */
502 		status = perf_env__read_pmu_mappings(env);
503 		if (status)
504 			return NULL;
505 	}
506 
507 	return env->pmu_mappings;
508 }
509 
perf_env__numa_node(struct perf_env * env,struct perf_cpu cpu)510 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
511 {
512 	if (!env->nr_numa_map) {
513 		struct numa_node *nn;
514 		int i, nr = 0;
515 
516 		for (i = 0; i < env->nr_numa_nodes; i++) {
517 			nn = &env->numa_nodes[i];
518 			nr = max(nr, perf_cpu_map__max(nn->map).cpu);
519 		}
520 
521 		nr++;
522 
523 		/*
524 		 * We initialize the numa_map array to prepare
525 		 * it for missing cpus, which return node -1
526 		 */
527 		env->numa_map = malloc(nr * sizeof(int));
528 		if (!env->numa_map)
529 			return -1;
530 
531 		for (i = 0; i < nr; i++)
532 			env->numa_map[i] = -1;
533 
534 		env->nr_numa_map = nr;
535 
536 		for (i = 0; i < env->nr_numa_nodes; i++) {
537 			struct perf_cpu tmp;
538 			int j;
539 
540 			nn = &env->numa_nodes[i];
541 			perf_cpu_map__for_each_cpu(tmp, j, nn->map)
542 				env->numa_map[tmp.cpu] = i;
543 		}
544 	}
545 
546 	return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
547 }
548 
perf_env__find_pmu_cap(struct perf_env * env,const char * pmu_name,const char * cap)549 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
550 			     const char *cap)
551 {
552 	char *cap_eq;
553 	int cap_size;
554 	char **ptr;
555 	int i, j;
556 
557 	if (!pmu_name || !cap)
558 		return NULL;
559 
560 	cap_size = strlen(cap);
561 	cap_eq = zalloc(cap_size + 2);
562 	if (!cap_eq)
563 		return NULL;
564 
565 	memcpy(cap_eq, cap, cap_size);
566 	cap_eq[cap_size] = '=';
567 
568 	if (!strcmp(pmu_name, "cpu")) {
569 		for (i = 0; i < env->nr_cpu_pmu_caps; i++) {
570 			if (!strncmp(env->cpu_pmu_caps[i], cap_eq, cap_size + 1)) {
571 				free(cap_eq);
572 				return &env->cpu_pmu_caps[i][cap_size + 1];
573 			}
574 		}
575 		goto out;
576 	}
577 
578 	for (i = 0; i < env->nr_pmus_with_caps; i++) {
579 		if (strcmp(env->pmu_caps[i].pmu_name, pmu_name))
580 			continue;
581 
582 		ptr = env->pmu_caps[i].caps;
583 
584 		for (j = 0; j < env->pmu_caps[i].nr_caps; j++) {
585 			if (!strncmp(ptr[j], cap_eq, cap_size + 1)) {
586 				free(cap_eq);
587 				return &ptr[j][cap_size + 1];
588 			}
589 		}
590 	}
591 
592 out:
593 	free(cap_eq);
594 	return NULL;
595 }
596