• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include "cpumap.h"
3 #include "debug.h"
4 #include "env.h"
5 #include "util/header.h"
6 #include <linux/ctype.h>
7 #include <linux/zalloc.h>
8 #include "cgroup.h"
9 #include <errno.h>
10 #include <sys/utsname.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include "strbuf.h"
14 
15 struct perf_env perf_env;
16 
17 #ifdef HAVE_LIBBPF_SUPPORT
18 #include "bpf-event.h"
19 #include <bpf/libbpf.h>
20 
perf_env__insert_bpf_prog_info(struct perf_env * env,struct bpf_prog_info_node * info_node)21 void perf_env__insert_bpf_prog_info(struct perf_env *env,
22 				    struct bpf_prog_info_node *info_node)
23 {
24 	down_write(&env->bpf_progs.lock);
25 	__perf_env__insert_bpf_prog_info(env, info_node);
26 	up_write(&env->bpf_progs.lock);
27 }
28 
__perf_env__insert_bpf_prog_info(struct perf_env * env,struct bpf_prog_info_node * info_node)29 void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
30 {
31 	__u32 prog_id = info_node->info_linear->info.id;
32 	struct bpf_prog_info_node *node;
33 	struct rb_node *parent = NULL;
34 	struct rb_node **p;
35 
36 	p = &env->bpf_progs.infos.rb_node;
37 
38 	while (*p != NULL) {
39 		parent = *p;
40 		node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
41 		if (prog_id < node->info_linear->info.id) {
42 			p = &(*p)->rb_left;
43 		} else if (prog_id > node->info_linear->info.id) {
44 			p = &(*p)->rb_right;
45 		} else {
46 			pr_debug("duplicated bpf prog info %u\n", prog_id);
47 			return;
48 		}
49 	}
50 
51 	rb_link_node(&info_node->rb_node, parent, p);
52 	rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
53 	env->bpf_progs.infos_cnt++;
54 }
55 
perf_env__find_bpf_prog_info(struct perf_env * env,__u32 prog_id)56 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
57 							__u32 prog_id)
58 {
59 	struct bpf_prog_info_node *node = NULL;
60 	struct rb_node *n;
61 
62 	down_read(&env->bpf_progs.lock);
63 	n = env->bpf_progs.infos.rb_node;
64 
65 	while (n) {
66 		node = rb_entry(n, struct bpf_prog_info_node, rb_node);
67 		if (prog_id < node->info_linear->info.id)
68 			n = n->rb_left;
69 		else if (prog_id > node->info_linear->info.id)
70 			n = n->rb_right;
71 		else
72 			goto out;
73 	}
74 	node = NULL;
75 
76 out:
77 	up_read(&env->bpf_progs.lock);
78 	return node;
79 }
80 
perf_env__insert_btf(struct perf_env * env,struct btf_node * btf_node)81 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
82 {
83 	bool ret;
84 
85 	down_write(&env->bpf_progs.lock);
86 	ret = __perf_env__insert_btf(env, btf_node);
87 	up_write(&env->bpf_progs.lock);
88 	return ret;
89 }
90 
__perf_env__insert_btf(struct perf_env * env,struct btf_node * btf_node)91 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
92 {
93 	struct rb_node *parent = NULL;
94 	__u32 btf_id = btf_node->id;
95 	struct btf_node *node;
96 	struct rb_node **p;
97 
98 	p = &env->bpf_progs.btfs.rb_node;
99 
100 	while (*p != NULL) {
101 		parent = *p;
102 		node = rb_entry(parent, struct btf_node, rb_node);
103 		if (btf_id < node->id) {
104 			p = &(*p)->rb_left;
105 		} else if (btf_id > node->id) {
106 			p = &(*p)->rb_right;
107 		} else {
108 			pr_debug("duplicated btf %u\n", btf_id);
109 			return false;
110 		}
111 	}
112 
113 	rb_link_node(&btf_node->rb_node, parent, p);
114 	rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
115 	env->bpf_progs.btfs_cnt++;
116 	return true;
117 }
118 
perf_env__find_btf(struct perf_env * env,__u32 btf_id)119 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
120 {
121 	struct btf_node *res;
122 
123 	down_read(&env->bpf_progs.lock);
124 	res = __perf_env__find_btf(env, btf_id);
125 	up_read(&env->bpf_progs.lock);
126 	return res;
127 }
128 
__perf_env__find_btf(struct perf_env * env,__u32 btf_id)129 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
130 {
131 	struct btf_node *node = NULL;
132 	struct rb_node *n;
133 
134 	n = env->bpf_progs.btfs.rb_node;
135 
136 	while (n) {
137 		node = rb_entry(n, struct btf_node, rb_node);
138 		if (btf_id < node->id)
139 			n = n->rb_left;
140 		else if (btf_id > node->id)
141 			n = n->rb_right;
142 		else
143 			return node;
144 	}
145 	return NULL;
146 }
147 
148 /* purge data in bpf_progs.infos tree */
perf_env__purge_bpf(struct perf_env * env)149 static void perf_env__purge_bpf(struct perf_env *env)
150 {
151 	struct rb_root *root;
152 	struct rb_node *next;
153 
154 	down_write(&env->bpf_progs.lock);
155 
156 	root = &env->bpf_progs.infos;
157 	next = rb_first(root);
158 
159 	while (next) {
160 		struct bpf_prog_info_node *node;
161 
162 		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
163 		next = rb_next(&node->rb_node);
164 		rb_erase(&node->rb_node, root);
165 		free(node->info_linear);
166 		free(node);
167 	}
168 
169 	env->bpf_progs.infos_cnt = 0;
170 
171 	root = &env->bpf_progs.btfs;
172 	next = rb_first(root);
173 
174 	while (next) {
175 		struct btf_node *node;
176 
177 		node = rb_entry(next, struct btf_node, rb_node);
178 		next = rb_next(&node->rb_node);
179 		rb_erase(&node->rb_node, root);
180 		free(node);
181 	}
182 
183 	env->bpf_progs.btfs_cnt = 0;
184 
185 	up_write(&env->bpf_progs.lock);
186 }
187 #else // HAVE_LIBBPF_SUPPORT
perf_env__purge_bpf(struct perf_env * env __maybe_unused)188 static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
189 {
190 }
191 #endif // HAVE_LIBBPF_SUPPORT
192 
perf_env__exit(struct perf_env * env)193 void perf_env__exit(struct perf_env *env)
194 {
195 	int i;
196 
197 	perf_env__purge_bpf(env);
198 	perf_env__purge_cgroups(env);
199 	zfree(&env->hostname);
200 	zfree(&env->os_release);
201 	zfree(&env->version);
202 	zfree(&env->arch);
203 	zfree(&env->cpu_desc);
204 	zfree(&env->cpuid);
205 	zfree(&env->cmdline);
206 	zfree(&env->cmdline_argv);
207 	zfree(&env->sibling_dies);
208 	zfree(&env->sibling_cores);
209 	zfree(&env->sibling_threads);
210 	zfree(&env->pmu_mappings);
211 	zfree(&env->cpu);
212 	zfree(&env->cpu_pmu_caps);
213 	zfree(&env->numa_map);
214 
215 	for (i = 0; i < env->nr_numa_nodes; i++)
216 		perf_cpu_map__put(env->numa_nodes[i].map);
217 	zfree(&env->numa_nodes);
218 
219 	for (i = 0; i < env->caches_cnt; i++)
220 		cpu_cache_level__free(&env->caches[i]);
221 	zfree(&env->caches);
222 
223 	for (i = 0; i < env->nr_memory_nodes; i++)
224 		zfree(&env->memory_nodes[i].set);
225 	zfree(&env->memory_nodes);
226 
227 	for (i = 0; i < env->nr_hybrid_nodes; i++) {
228 		zfree(&env->hybrid_nodes[i].pmu_name);
229 		zfree(&env->hybrid_nodes[i].cpus);
230 	}
231 	zfree(&env->hybrid_nodes);
232 
233 	for (i = 0; i < env->nr_hybrid_cpc_nodes; i++) {
234 		zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps);
235 		zfree(&env->hybrid_cpc_nodes[i].pmu_name);
236 	}
237 	zfree(&env->hybrid_cpc_nodes);
238 }
239 
perf_env__init(struct perf_env * env)240 void perf_env__init(struct perf_env *env)
241 {
242 #ifdef HAVE_LIBBPF_SUPPORT
243 	env->bpf_progs.infos = RB_ROOT;
244 	env->bpf_progs.btfs = RB_ROOT;
245 	init_rwsem(&env->bpf_progs.lock);
246 #endif
247 	env->kernel_is_64_bit = -1;
248 }
249 
perf_env__init_kernel_mode(struct perf_env * env)250 static void perf_env__init_kernel_mode(struct perf_env *env)
251 {
252 	const char *arch = perf_env__raw_arch(env);
253 
254 	if (!strncmp(arch, "x86_64", 6) || !strncmp(arch, "aarch64", 7) ||
255 	    !strncmp(arch, "arm64", 5) || !strncmp(arch, "mips64", 6) ||
256 	    !strncmp(arch, "parisc64", 8) || !strncmp(arch, "riscv64", 7) ||
257 	    !strncmp(arch, "s390x", 5) || !strncmp(arch, "sparc64", 7))
258 		env->kernel_is_64_bit = 1;
259 	else
260 		env->kernel_is_64_bit = 0;
261 }
262 
perf_env__kernel_is_64_bit(struct perf_env * env)263 int perf_env__kernel_is_64_bit(struct perf_env *env)
264 {
265 	if (env->kernel_is_64_bit == -1)
266 		perf_env__init_kernel_mode(env);
267 
268 	return env->kernel_is_64_bit;
269 }
270 
perf_env__set_cmdline(struct perf_env * env,int argc,const char * argv[])271 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
272 {
273 	int i;
274 
275 	/* do not include NULL termination */
276 	env->cmdline_argv = calloc(argc, sizeof(char *));
277 	if (env->cmdline_argv == NULL)
278 		goto out_enomem;
279 
280 	/*
281 	 * Must copy argv contents because it gets moved around during option
282 	 * parsing:
283 	 */
284 	for (i = 0; i < argc ; i++) {
285 		env->cmdline_argv[i] = argv[i];
286 		if (env->cmdline_argv[i] == NULL)
287 			goto out_free;
288 	}
289 
290 	env->nr_cmdline = argc;
291 
292 	return 0;
293 out_free:
294 	zfree(&env->cmdline_argv);
295 out_enomem:
296 	return -ENOMEM;
297 }
298 
perf_env__read_cpu_topology_map(struct perf_env * env)299 int perf_env__read_cpu_topology_map(struct perf_env *env)
300 {
301 	int cpu, nr_cpus;
302 
303 	if (env->cpu != NULL)
304 		return 0;
305 
306 	if (env->nr_cpus_avail == 0)
307 		env->nr_cpus_avail = cpu__max_present_cpu();
308 
309 	nr_cpus = env->nr_cpus_avail;
310 	if (nr_cpus == -1)
311 		return -EINVAL;
312 
313 	env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
314 	if (env->cpu == NULL)
315 		return -ENOMEM;
316 
317 	for (cpu = 0; cpu < nr_cpus; ++cpu) {
318 		env->cpu[cpu].core_id	= cpu_map__get_core_id(cpu);
319 		env->cpu[cpu].socket_id	= cpu_map__get_socket_id(cpu);
320 		env->cpu[cpu].die_id	= cpu_map__get_die_id(cpu);
321 	}
322 
323 	env->nr_cpus_avail = nr_cpus;
324 	return 0;
325 }
326 
perf_env__read_pmu_mappings(struct perf_env * env)327 int perf_env__read_pmu_mappings(struct perf_env *env)
328 {
329 	struct perf_pmu *pmu = NULL;
330 	u32 pmu_num = 0;
331 	struct strbuf sb;
332 
333 	while ((pmu = perf_pmu__scan(pmu))) {
334 		if (!pmu->name)
335 			continue;
336 		pmu_num++;
337 	}
338 	if (!pmu_num) {
339 		pr_debug("pmu mappings not available\n");
340 		return -ENOENT;
341 	}
342 	env->nr_pmu_mappings = pmu_num;
343 
344 	if (strbuf_init(&sb, 128 * pmu_num) < 0)
345 		return -ENOMEM;
346 
347 	while ((pmu = perf_pmu__scan(pmu))) {
348 		if (!pmu->name)
349 			continue;
350 		if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
351 			goto error;
352 		/* include a NULL character at the end */
353 		if (strbuf_add(&sb, "", 1) < 0)
354 			goto error;
355 	}
356 
357 	env->pmu_mappings = strbuf_detach(&sb, NULL);
358 
359 	return 0;
360 
361 error:
362 	strbuf_release(&sb);
363 	return -1;
364 }
365 
perf_env__read_cpuid(struct perf_env * env)366 int perf_env__read_cpuid(struct perf_env *env)
367 {
368 	char cpuid[128];
369 	int err = get_cpuid(cpuid, sizeof(cpuid));
370 
371 	if (err)
372 		return err;
373 
374 	free(env->cpuid);
375 	env->cpuid = strdup(cpuid);
376 	if (env->cpuid == NULL)
377 		return ENOMEM;
378 	return 0;
379 }
380 
perf_env__read_arch(struct perf_env * env)381 static int perf_env__read_arch(struct perf_env *env)
382 {
383 	struct utsname uts;
384 
385 	if (env->arch)
386 		return 0;
387 
388 	if (!uname(&uts))
389 		env->arch = strdup(uts.machine);
390 
391 	return env->arch ? 0 : -ENOMEM;
392 }
393 
perf_env__read_nr_cpus_avail(struct perf_env * env)394 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
395 {
396 	if (env->nr_cpus_avail == 0)
397 		env->nr_cpus_avail = cpu__max_present_cpu();
398 
399 	return env->nr_cpus_avail ? 0 : -ENOENT;
400 }
401 
perf_env__raw_arch(struct perf_env * env)402 const char *perf_env__raw_arch(struct perf_env *env)
403 {
404 	return env && !perf_env__read_arch(env) ? env->arch : "unknown";
405 }
406 
perf_env__nr_cpus_avail(struct perf_env * env)407 int perf_env__nr_cpus_avail(struct perf_env *env)
408 {
409 	return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
410 }
411 
cpu_cache_level__free(struct cpu_cache_level * cache)412 void cpu_cache_level__free(struct cpu_cache_level *cache)
413 {
414 	zfree(&cache->type);
415 	zfree(&cache->map);
416 	zfree(&cache->size);
417 }
418 
419 /*
420  * Return architecture name in a normalized form.
421  * The conversion logic comes from the Makefile.
422  */
normalize_arch(char * arch)423 static const char *normalize_arch(char *arch)
424 {
425 	if (!strcmp(arch, "x86_64"))
426 		return "x86";
427 	if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
428 		return "x86";
429 	if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
430 		return "sparc";
431 	if (!strncmp(arch, "aarch64", 7) || !strncmp(arch, "arm64", 5))
432 		return "arm64";
433 	if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
434 		return "arm";
435 	if (!strncmp(arch, "s390", 4))
436 		return "s390";
437 	if (!strncmp(arch, "parisc", 6))
438 		return "parisc";
439 	if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
440 		return "powerpc";
441 	if (!strncmp(arch, "mips", 4))
442 		return "mips";
443 	if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
444 		return "sh";
445 
446 	return arch;
447 }
448 
perf_env__arch(struct perf_env * env)449 const char *perf_env__arch(struct perf_env *env)
450 {
451 	char *arch_name;
452 
453 	if (!env || !env->arch) { /* Assume local operation */
454 		static struct utsname uts = { .machine[0] = '\0', };
455 		if (uts.machine[0] == '\0' && uname(&uts) < 0)
456 			return NULL;
457 		arch_name = uts.machine;
458 	} else
459 		arch_name = env->arch;
460 
461 	return normalize_arch(arch_name);
462 }
463 
perf_env__cpuid(struct perf_env * env)464 const char *perf_env__cpuid(struct perf_env *env)
465 {
466 	int status;
467 
468 	if (!env || !env->cpuid) { /* Assume local operation */
469 		status = perf_env__read_cpuid(env);
470 		if (status)
471 			return NULL;
472 	}
473 
474 	return env->cpuid;
475 }
476 
perf_env__nr_pmu_mappings(struct perf_env * env)477 int perf_env__nr_pmu_mappings(struct perf_env *env)
478 {
479 	int status;
480 
481 	if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
482 		status = perf_env__read_pmu_mappings(env);
483 		if (status)
484 			return 0;
485 	}
486 
487 	return env->nr_pmu_mappings;
488 }
489 
perf_env__pmu_mappings(struct perf_env * env)490 const char *perf_env__pmu_mappings(struct perf_env *env)
491 {
492 	int status;
493 
494 	if (!env || !env->pmu_mappings) { /* Assume local operation */
495 		status = perf_env__read_pmu_mappings(env);
496 		if (status)
497 			return NULL;
498 	}
499 
500 	return env->pmu_mappings;
501 }
502 
perf_env__numa_node(struct perf_env * env,int cpu)503 int perf_env__numa_node(struct perf_env *env, int cpu)
504 {
505 	if (!env->nr_numa_map) {
506 		struct numa_node *nn;
507 		int i, nr = 0;
508 
509 		for (i = 0; i < env->nr_numa_nodes; i++) {
510 			nn = &env->numa_nodes[i];
511 			nr = max(nr, perf_cpu_map__max(nn->map));
512 		}
513 
514 		nr++;
515 
516 		/*
517 		 * We initialize the numa_map array to prepare
518 		 * it for missing cpus, which return node -1
519 		 */
520 		env->numa_map = malloc(nr * sizeof(int));
521 		if (!env->numa_map)
522 			return -1;
523 
524 		for (i = 0; i < nr; i++)
525 			env->numa_map[i] = -1;
526 
527 		env->nr_numa_map = nr;
528 
529 		for (i = 0; i < env->nr_numa_nodes; i++) {
530 			int tmp, j;
531 
532 			nn = &env->numa_nodes[i];
533 			perf_cpu_map__for_each_cpu(j, tmp, nn->map)
534 				env->numa_map[j] = i;
535 		}
536 	}
537 
538 	return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
539 }
540