• Home
  • Raw
  • Download

Lines Matching full:env

4 #include "env.h"
23 bool perf_env__insert_bpf_prog_info(struct perf_env *env, in perf_env__insert_bpf_prog_info() argument
28 down_write(&env->bpf_progs.lock); in perf_env__insert_bpf_prog_info()
29 ret = __perf_env__insert_bpf_prog_info(env, info_node); in perf_env__insert_bpf_prog_info()
30 up_write(&env->bpf_progs.lock); in perf_env__insert_bpf_prog_info()
35 bool __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node) in __perf_env__insert_bpf_prog_info() argument
42 p = &env->bpf_progs.infos.rb_node; in __perf_env__insert_bpf_prog_info()
58 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos); in __perf_env__insert_bpf_prog_info()
59 env->bpf_progs.infos_cnt++; in __perf_env__insert_bpf_prog_info()
63 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, in perf_env__find_bpf_prog_info() argument
69 down_read(&env->bpf_progs.lock); in perf_env__find_bpf_prog_info()
70 n = env->bpf_progs.infos.rb_node; in perf_env__find_bpf_prog_info()
84 up_read(&env->bpf_progs.lock); in perf_env__find_bpf_prog_info()
88 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node) in perf_env__insert_btf() argument
92 down_write(&env->bpf_progs.lock); in perf_env__insert_btf()
93 ret = __perf_env__insert_btf(env, btf_node); in perf_env__insert_btf()
94 up_write(&env->bpf_progs.lock); in perf_env__insert_btf()
98 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node) in __perf_env__insert_btf() argument
105 p = &env->bpf_progs.btfs.rb_node; in __perf_env__insert_btf()
121 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs); in __perf_env__insert_btf()
122 env->bpf_progs.btfs_cnt++; in __perf_env__insert_btf()
126 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id) in perf_env__find_btf() argument
130 down_read(&env->bpf_progs.lock); in perf_env__find_btf()
131 res = __perf_env__find_btf(env, btf_id); in perf_env__find_btf()
132 up_read(&env->bpf_progs.lock); in perf_env__find_btf()
136 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id) in __perf_env__find_btf() argument
141 n = env->bpf_progs.btfs.rb_node; in __perf_env__find_btf()
156 static void perf_env__purge_bpf(struct perf_env *env) in perf_env__purge_bpf() argument
161 down_write(&env->bpf_progs.lock); in perf_env__purge_bpf()
163 root = &env->bpf_progs.infos; in perf_env__purge_bpf()
176 env->bpf_progs.infos_cnt = 0; in perf_env__purge_bpf()
178 root = &env->bpf_progs.btfs; in perf_env__purge_bpf()
190 env->bpf_progs.btfs_cnt = 0; in perf_env__purge_bpf()
192 up_write(&env->bpf_progs.lock); in perf_env__purge_bpf()
195 static void perf_env__purge_bpf(struct perf_env *env __maybe_unused) in perf_env__purge_bpf()
200 void perf_env__exit(struct perf_env *env) in perf_env__exit() argument
204 perf_env__purge_bpf(env); in perf_env__exit()
205 perf_env__purge_cgroups(env); in perf_env__exit()
206 zfree(&env->hostname); in perf_env__exit()
207 zfree(&env->os_release); in perf_env__exit()
208 zfree(&env->version); in perf_env__exit()
209 zfree(&env->arch); in perf_env__exit()
210 zfree(&env->cpu_desc); in perf_env__exit()
211 zfree(&env->cpuid); in perf_env__exit()
212 zfree(&env->cmdline); in perf_env__exit()
213 zfree(&env->cmdline_argv); in perf_env__exit()
214 zfree(&env->sibling_dies); in perf_env__exit()
215 zfree(&env->sibling_cores); in perf_env__exit()
216 zfree(&env->sibling_threads); in perf_env__exit()
217 zfree(&env->pmu_mappings); in perf_env__exit()
218 zfree(&env->cpu); in perf_env__exit()
219 for (i = 0; i < env->nr_cpu_pmu_caps; i++) in perf_env__exit()
220 zfree(&env->cpu_pmu_caps[i]); in perf_env__exit()
221 zfree(&env->cpu_pmu_caps); in perf_env__exit()
222 zfree(&env->numa_map); in perf_env__exit()
224 for (i = 0; i < env->nr_numa_nodes; i++) in perf_env__exit()
225 perf_cpu_map__put(env->numa_nodes[i].map); in perf_env__exit()
226 zfree(&env->numa_nodes); in perf_env__exit()
228 for (i = 0; i < env->caches_cnt; i++) in perf_env__exit()
229 cpu_cache_level__free(&env->caches[i]); in perf_env__exit()
230 zfree(&env->caches); in perf_env__exit()
232 for (i = 0; i < env->nr_memory_nodes; i++) in perf_env__exit()
233 zfree(&env->memory_nodes[i].set); in perf_env__exit()
234 zfree(&env->memory_nodes); in perf_env__exit()
236 for (i = 0; i < env->nr_hybrid_nodes; i++) { in perf_env__exit()
237 zfree(&env->hybrid_nodes[i].pmu_name); in perf_env__exit()
238 zfree(&env->hybrid_nodes[i].cpus); in perf_env__exit()
240 zfree(&env->hybrid_nodes); in perf_env__exit()
242 for (i = 0; i < env->nr_pmus_with_caps; i++) { in perf_env__exit()
243 for (j = 0; j < env->pmu_caps[i].nr_caps; j++) in perf_env__exit()
244 zfree(&env->pmu_caps[i].caps[j]); in perf_env__exit()
245 zfree(&env->pmu_caps[i].caps); in perf_env__exit()
246 zfree(&env->pmu_caps[i].pmu_name); in perf_env__exit()
248 zfree(&env->pmu_caps); in perf_env__exit()
251 void perf_env__init(struct perf_env *env) in perf_env__init() argument
254 env->bpf_progs.infos = RB_ROOT; in perf_env__init()
255 env->bpf_progs.btfs = RB_ROOT; in perf_env__init()
256 init_rwsem(&env->bpf_progs.lock); in perf_env__init()
258 env->kernel_is_64_bit = -1; in perf_env__init()
261 static void perf_env__init_kernel_mode(struct perf_env *env) in perf_env__init_kernel_mode() argument
263 const char *arch = perf_env__raw_arch(env); in perf_env__init_kernel_mode()
269 env->kernel_is_64_bit = 1; in perf_env__init_kernel_mode()
271 env->kernel_is_64_bit = 0; in perf_env__init_kernel_mode()
274 int perf_env__kernel_is_64_bit(struct perf_env *env) in perf_env__kernel_is_64_bit() argument
276 if (env->kernel_is_64_bit == -1) in perf_env__kernel_is_64_bit()
277 perf_env__init_kernel_mode(env); in perf_env__kernel_is_64_bit()
279 return env->kernel_is_64_bit; in perf_env__kernel_is_64_bit()
282 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]) in perf_env__set_cmdline() argument
287 env->cmdline_argv = calloc(argc, sizeof(char *)); in perf_env__set_cmdline()
288 if (env->cmdline_argv == NULL) in perf_env__set_cmdline()
296 env->cmdline_argv[i] = argv[i]; in perf_env__set_cmdline()
297 if (env->cmdline_argv[i] == NULL) in perf_env__set_cmdline()
301 env->nr_cmdline = argc; in perf_env__set_cmdline()
305 zfree(&env->cmdline_argv); in perf_env__set_cmdline()
310 int perf_env__read_cpu_topology_map(struct perf_env *env) in perf_env__read_cpu_topology_map() argument
314 if (env->cpu != NULL) in perf_env__read_cpu_topology_map()
317 if (env->nr_cpus_avail == 0) in perf_env__read_cpu_topology_map()
318 env->nr_cpus_avail = cpu__max_present_cpu().cpu; in perf_env__read_cpu_topology_map()
320 nr_cpus = env->nr_cpus_avail; in perf_env__read_cpu_topology_map()
324 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0])); in perf_env__read_cpu_topology_map()
325 if (env->cpu == NULL) in perf_env__read_cpu_topology_map()
331 env->cpu[idx].core_id = cpu__get_core_id(cpu); in perf_env__read_cpu_topology_map()
332 env->cpu[idx].socket_id = cpu__get_socket_id(cpu); in perf_env__read_cpu_topology_map()
333 env->cpu[idx].die_id = cpu__get_die_id(cpu); in perf_env__read_cpu_topology_map()
336 env->nr_cpus_avail = nr_cpus; in perf_env__read_cpu_topology_map()
340 int perf_env__read_pmu_mappings(struct perf_env *env) in perf_env__read_pmu_mappings() argument
353 env->nr_pmu_mappings = pmu_num; in perf_env__read_pmu_mappings()
366 env->pmu_mappings = strbuf_detach(&sb, NULL); in perf_env__read_pmu_mappings()
375 int perf_env__read_cpuid(struct perf_env *env) in perf_env__read_cpuid() argument
383 free(env->cpuid); in perf_env__read_cpuid()
384 env->cpuid = strdup(cpuid); in perf_env__read_cpuid()
385 if (env->cpuid == NULL) in perf_env__read_cpuid()
390 static int perf_env__read_arch(struct perf_env *env) in perf_env__read_arch() argument
394 if (env->arch) in perf_env__read_arch()
398 env->arch = strdup(uts.machine); in perf_env__read_arch()
400 return env->arch ? 0 : -ENOMEM; in perf_env__read_arch()
403 static int perf_env__read_nr_cpus_avail(struct perf_env *env) in perf_env__read_nr_cpus_avail() argument
405 if (env->nr_cpus_avail == 0) in perf_env__read_nr_cpus_avail()
406 env->nr_cpus_avail = cpu__max_present_cpu().cpu; in perf_env__read_nr_cpus_avail()
408 return env->nr_cpus_avail ? 0 : -ENOENT; in perf_env__read_nr_cpus_avail()
411 const char *perf_env__raw_arch(struct perf_env *env) in perf_env__raw_arch() argument
413 return env && !perf_env__read_arch(env) ? env->arch : "unknown"; in perf_env__raw_arch()
416 int perf_env__nr_cpus_avail(struct perf_env *env) in perf_env__nr_cpus_avail() argument
418 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0; in perf_env__nr_cpus_avail()
460 const char *perf_env__arch(struct perf_env *env) in perf_env__arch() argument
464 if (!env || !env->arch) { /* Assume local operation */ in perf_env__arch()
470 arch_name = env->arch; in perf_env__arch()
475 const char *perf_env__cpuid(struct perf_env *env) in perf_env__cpuid() argument
479 if (!env || !env->cpuid) { /* Assume local operation */ in perf_env__cpuid()
480 status = perf_env__read_cpuid(env); in perf_env__cpuid()
485 return env->cpuid; in perf_env__cpuid()
488 int perf_env__nr_pmu_mappings(struct perf_env *env) in perf_env__nr_pmu_mappings() argument
492 if (!env || !env->nr_pmu_mappings) { /* Assume local operation */ in perf_env__nr_pmu_mappings()
493 status = perf_env__read_pmu_mappings(env); in perf_env__nr_pmu_mappings()
498 return env->nr_pmu_mappings; in perf_env__nr_pmu_mappings()
501 const char *perf_env__pmu_mappings(struct perf_env *env) in perf_env__pmu_mappings() argument
505 if (!env || !env->pmu_mappings) { /* Assume local operation */ in perf_env__pmu_mappings()
506 status = perf_env__read_pmu_mappings(env); in perf_env__pmu_mappings()
511 return env->pmu_mappings; in perf_env__pmu_mappings()
514 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu) in perf_env__numa_node() argument
516 if (!env->nr_numa_map) { in perf_env__numa_node()
520 for (i = 0; i < env->nr_numa_nodes; i++) { in perf_env__numa_node()
521 nn = &env->numa_nodes[i]; in perf_env__numa_node()
531 env->numa_map = malloc(nr * sizeof(int)); in perf_env__numa_node()
532 if (!env->numa_map) in perf_env__numa_node()
536 env->numa_map[i] = -1; in perf_env__numa_node()
538 env->nr_numa_map = nr; in perf_env__numa_node()
540 for (i = 0; i < env->nr_numa_nodes; i++) { in perf_env__numa_node()
544 nn = &env->numa_nodes[i]; in perf_env__numa_node()
546 env->numa_map[tmp.cpu] = i; in perf_env__numa_node()
550 return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1; in perf_env__numa_node()
553 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name, in perf_env__find_pmu_cap() argument
573 for (i = 0; i < env->nr_cpu_pmu_caps; i++) { in perf_env__find_pmu_cap()
574 if (!strncmp(env->cpu_pmu_caps[i], cap_eq, cap_size + 1)) { in perf_env__find_pmu_cap()
576 return &env->cpu_pmu_caps[i][cap_size + 1]; in perf_env__find_pmu_cap()
582 for (i = 0; i < env->nr_pmus_with_caps; i++) { in perf_env__find_pmu_cap()
583 if (strcmp(env->pmu_caps[i].pmu_name, pmu_name)) in perf_env__find_pmu_cap()
586 ptr = env->pmu_caps[i].caps; in perf_env__find_pmu_cap()
588 for (j = 0; j < env->pmu_caps[i].nr_caps; j++) { in perf_env__find_pmu_cap()