Lines Matching refs:core
102 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
182 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type); in evsel__calc_id_pos()
183 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type); in evsel__calc_id_pos()
189 if (!(evsel->core.attr.sample_type & bit)) { in __evsel__set_sample_bit()
190 evsel->core.attr.sample_type |= bit; in __evsel__set_sample_bit()
199 if (evsel->core.attr.sample_type & bit) { in __evsel__reset_sample_bit()
200 evsel->core.attr.sample_type &= ~bit; in __evsel__reset_sample_bit()
215 evsel->core.attr.read_format |= PERF_FORMAT_ID; in evsel__set_sample_id()
239 perf_evsel__init(&evsel->core, attr); in evsel__init()
271 evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | in evsel__new_idx()
273 evsel->core.attr.sample_period = 1; in evsel__new_idx()
367 BUG_ON(orig->core.fd); in evsel__clone()
376 evsel = evsel__new(&orig->core.attr); in evsel__clone()
380 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus); in evsel__clone()
381 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus); in evsel__clone()
382 evsel->core.threads = perf_thread_map__get(orig->core.threads); in evsel__clone()
383 evsel->core.nr_members = orig->core.nr_members; in evsel__clone()
384 evsel->core.system_wide = orig->core.system_wide; in evsel__clone()
503 struct perf_event_attr *attr = &evsel->core.attr; in perf_evsel__add_modifiers()
538 int r = scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config)); in evsel__hw_name()
564 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config)); in evsel__sw_name()
588 struct perf_event_attr *attr = &evsel->core.attr; in evsel__bp_name()
684 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size); in evsel__hw_cache_name()
690 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config); in evsel__raw_name()
710 switch (evsel->core.attr.type) { in evsel__name()
740 evsel->core.attr.type); in evsel__name()
791 struct perf_event_attr *attr = &evsel->core.attr; in __evsel__config_callchain()
857 struct perf_event_attr *attr = &evsel->core.attr; in perf_evsel__reset_callgraph()
877 struct perf_event_attr *attr = &evsel->core.attr; in evsel__apply_config_terms()
998 evsel->core.attr.mmap_data = track; in evsel__apply_config_terms()
1060 struct perf_event_attr *attr = &evsel->core.attr; in evsel__config()
1084 if (leader->core.nr_members > 1) { in evsel__config()
1111 evsel->core.attr.read_format |= in evsel__config()
1129 evsel->core.attr.exclude_callchain_user = 1; in evsel__config()
1209 evsel->core.attr.read_format |= in evsel__config()
1255 if (evsel->core.own_cpus || evsel->unit) in evsel__config()
1256 evsel->core.attr.read_format |= PERF_FORMAT_ID; in evsel__config()
1328 return perf_evsel__enable_cpu(&evsel->core, cpu); in evsel__enable_cpu()
1333 int err = perf_evsel__enable(&evsel->core); in evsel__enable()
1343 return perf_evsel__disable_cpu(&evsel->core, cpu); in evsel__disable_cpu()
1348 int err = perf_evsel__disable(&evsel->core); in evsel__disable()
1375 assert(list_empty(&evsel->core.node)); in evsel__exit()
1378 perf_evsel__free_fd(&evsel->core); in evsel__exit()
1379 perf_evsel__free_id(&evsel->core); in evsel__exit()
1382 perf_cpu_map__put(evsel->core.cpus); in evsel__exit()
1383 perf_cpu_map__put(evsel->core.own_cpus); in evsel__exit()
1384 perf_thread_map__put(evsel->core.threads); in evsel__exit()
1443 return perf_evsel__read(&evsel->core, cpu, thread, count); in evsel__read_one()
1465 u64 read_format = leader->core.attr.read_format; in perf_evsel__process_group_data()
1471 if (nr != (u64) leader->core.nr_members) in perf_evsel__process_group_data()
1502 u64 read_format = leader->core.attr.read_format; in evsel__read_group()
1503 int size = perf_evsel__read_size(&leader->core); in evsel__read_group()
1531 u64 read_format = evsel->core.attr.read_format; in evsel__read_counter()
1571 BUG_ON(!leader->core.fd); in get_group_fd()
1623 if (evsel->core.system_wide) in ignore_missing_thread()
1669 int precise_ip = evsel->core.attr.precise_ip; in perf_event_open()
1676 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, flags); in perf_event_open()
1688 if (!evsel->core.attr.precise_ip) { in perf_event_open()
1689 evsel->core.attr.precise_ip = precise_ip; in perf_event_open()
1694 evsel->core.attr.precise_ip--; in perf_event_open()
1695 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip); in perf_event_open()
1696 display_attr(&evsel->core.attr); in perf_event_open()
1711 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) || in evsel__open_cpu()
1712 (perf_missing_features.aux_output && evsel->core.attr.aux_output)) in evsel__open_cpu()
1739 if (evsel->core.system_wide) in evsel__open_cpu()
1744 if (evsel->core.fd == NULL && in evsel__open_cpu()
1745 perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0) in evsel__open_cpu()
1755 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */ in evsel__open_cpu()
1757 evsel->core.attr.use_clockid = 0; in evsel__open_cpu()
1758 evsel->core.attr.clockid = 0; in evsel__open_cpu()
1763 evsel->core.attr.mmap2 = 0; in evsel__open_cpu()
1765 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0; in evsel__open_cpu()
1767 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS | in evsel__open_cpu()
1769 if (perf_missing_features.group_read && evsel->core.attr.inherit) in evsel__open_cpu()
1770 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID); in evsel__open_cpu()
1772 evsel->core.attr.ksymbol = 0; in evsel__open_cpu()
1774 evsel->core.attr.bpf_event = 0; in evsel__open_cpu()
1776 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX; in evsel__open_cpu()
1779 evsel->core.attr.sample_id_all = 0; in evsel__open_cpu()
1781 display_attr(&evsel->core.attr); in evsel__open_cpu()
1788 if (!evsel->cgrp && !evsel->core.system_wide) in evsel__open_cpu()
1801 test_attr__open(&evsel->core.attr, pid, cpus->map[cpu], in evsel__open_cpu()
1892 if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) { in evsel__open_cpu()
1897 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) { in evsel__open_cpu()
1901 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) { in evsel__open_cpu()
1905 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) { in evsel__open_cpu()
1909 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) { in evsel__open_cpu()
1913 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) { in evsel__open_cpu()
1917 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) { in evsel__open_cpu()
1921 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) { in evsel__open_cpu()
1929 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) { in evsel__open_cpu()
1934 (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host)) { in evsel__open_cpu()
1943 (evsel->core.attr.branch_sample_type & in evsel__open_cpu()
1950 evsel->core.attr.inherit && in evsel__open_cpu()
1951 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) && in evsel__open_cpu()
1982 perf_evsel__close(&evsel->core); in evsel__close()
1983 perf_evsel__free_id(&evsel->core); in evsel__close()
2004 u64 type = evsel->core.attr.sample_type; in perf_evsel__parse_id_sample()
2093 u64 type = evsel->core.attr.sample_type; in evsel__parse_sample()
2109 data->period = evsel->core.attr.sample_period; in evsel__parse_sample()
2116 if (!evsel->core.attr.sample_id_all) in evsel__parse_sample()
2189 u64 read_format = evsel->core.attr.read_format; in evsel__parse_sample()
2299 u64 mask = evsel->core.attr.sample_regs_user; in evsel__parse_sample()
2355 u64 mask = evsel->core.attr.sample_regs_intr; in evsel__parse_sample()
2396 u64 type = evsel->core.attr.sample_type; in evsel__parse_sample_timestamp()
2407 if (!evsel->core.attr.sample_id_all) in evsel__parse_sample_timestamp()
2513 evsel->core.attr.type == PERF_TYPE_HARDWARE && in evsel__fallback()
2514 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) { in evsel__fallback()
2526 evsel->core.attr.type = PERF_TYPE_SOFTWARE; in evsel__fallback()
2527 evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK; in evsel__fallback()
2531 } else if (err == EACCES && !evsel->core.attr.exclude_kernel && in evsel__fallback()
2538 if (evsel->core.attr.exclude_user) in evsel__fallback()
2555 evsel->core.attr.exclude_kernel = 1; in evsel__fallback()
2556 evsel->core.attr.exclude_hv = 1; in evsel__fallback()
2662 if (evsel->core.attr.aux_output) in evsel__open_strerror()
2666 if (evsel->core.attr.sample_period != 0) in evsel__open_strerror()
2670 if (evsel->core.attr.precise_ip) in evsel__open_strerror()
2674 if (evsel->core.attr.type == PERF_TYPE_HARDWARE) in evsel__open_strerror()
2686 if (evsel->core.attr.write_backward && perf_missing_features.write_backward) in evsel__open_strerror()
2716 for (cpu = 0; cpu < xyarray__max_x(evsel->core.fd); cpu++) { in store_evsel_ids()
2717 for (thread = 0; thread < xyarray__max_y(evsel->core.fd); in store_evsel_ids()
2721 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, in store_evsel_ids()
2732 struct perf_cpu_map *cpus = evsel->core.cpus; in evsel__store_ids()
2733 struct perf_thread_map *threads = evsel->core.threads; in evsel__store_ids()
2735 if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr)) in evsel__store_ids()