| /tools/perf/pmu-events/arch/x86/ |
| D | mapfile.csv | 2 GenuineIntel-6-(97|9A|B7|BA|BF),v1.27,alderlake,core 3 GenuineIntel-6-BE,v1.27,alderlaken,core 4 GenuineIntel-6-(1C|26|27|35|36),v5,bonnell,core 5 GenuineIntel-6-(3D|47),v29,broadwell,core 6 GenuineIntel-6-56,v11,broadwellde,core 7 GenuineIntel-6-4F,v22,broadwellx,core 8 GenuineIntel-6-55-[56789ABCDEF],v1.22,cascadelakex,core 9 GenuineIntel-6-9[6C],v1.05,elkhartlake,core 10 GenuineIntel-6-CF,v1.09,emeraldrapids,core 11 GenuineIntel-6-5[CF],v13,goldmont,core [all …]
|
| /tools/perf/tests/ |
| D | parse-events.c | 39 __u32 type = evsel->core.attr.type; in test_config() 40 __u64 config = evsel->core.attr.config; in test_config() 89 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); in test__checkevent_tracepoint() 91 TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type); in test__checkevent_tracepoint() 93 PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type); in test__checkevent_tracepoint() 94 TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period); in test__checkevent_tracepoint() 102 TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries > 1); in test__checkevent_tracepoint_multi() 107 PERF_TYPE_TRACEPOINT == evsel->core.attr.type); in test__checkevent_tracepoint_multi() 109 PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type); in test__checkevent_tracepoint_multi() 111 1 == evsel->core.attr.sample_period); in test__checkevent_tracepoint_multi() [all …]
|
| D | task-exit.c | 81 perf_evlist__set_maps(&evlist->core, cpus, threads); in test__task_exit() 90 evsel->core.attr.task = 1; in test__task_exit() 92 evsel->core.attr.sample_freq = 1000000; in test__task_exit() 94 evsel->core.attr.sample_freq = 1; in test__task_exit() 96 evsel->core.attr.inherit = 0; in test__task_exit() 97 evsel->core.attr.watermark = 0; in test__task_exit() 98 evsel->core.attr.wakeup_events = 1; in test__task_exit() 99 evsel->core.attr.exclude_kernel = 1; in test__task_exit() 119 if (perf_mmap__read_init(&md->core) < 0) in test__task_exit() 122 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__task_exit() [all …]
|
| D | keep-tracking.c | 40 for (i = 0; i < evlist->core.nr_mmaps; i++) { in find_comm() 42 if (perf_mmap__read_init(&md->core) < 0) in find_comm() 44 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in find_comm() 50 perf_mmap__consume(&md->core); in find_comm() 52 perf_mmap__read_done(&md->core); in find_comm() 90 perf_evlist__set_maps(&evlist->core, cpus, threads); in test__keep_tracking() 99 evsel->core.attr.comm = 1; in test__keep_tracking() 100 evsel->core.attr.disabled = 1; in test__keep_tracking() 101 evsel->core.attr.enable_on_exec = 0; in test__keep_tracking()
|
| D | perf-time-to-tsc.c | 102 perf_evlist__set_maps(&evlist->core, cpus, threads); in test__perf_time_to_tsc() 110 evsel->core.attr.comm = 1; in test__perf_time_to_tsc() 111 evsel->core.attr.disabled = 1; in test__perf_time_to_tsc() 112 evsel->core.attr.enable_on_exec = 0; in test__perf_time_to_tsc() 126 pc = evlist->mmap[0].core.base; in test__perf_time_to_tsc() 148 for (i = 0; i < evlist->core.nr_mmaps; i++) { in test__perf_time_to_tsc() 150 if (perf_mmap__read_init(&md->core) < 0) in test__perf_time_to_tsc() 153 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__perf_time_to_tsc() 172 perf_mmap__consume(&md->core); in test__perf_time_to_tsc() 174 perf_mmap__read_done(&md->core); in test__perf_time_to_tsc()
|
| D | openat-syscall-tp-fields.c | 67 perf_thread_map__set_pid(evlist->core.threads, 0, getpid()); in test__syscall_openat_tp_fields() 93 for (i = 0; i < evlist->core.nr_mmaps; i++) { in test__syscall_openat_tp_fields() 98 if (perf_mmap__read_init(&md->core) < 0) in test__syscall_openat_tp_fields() 101 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__syscall_openat_tp_fields() 109 perf_mmap__consume(&md->core); in test__syscall_openat_tp_fields() 129 perf_mmap__read_done(&md->core); in test__syscall_openat_tp_fields()
|
| /tools/perf/pmu-events/arch/arm64/ |
| D | mapfile.csv | 11 # Type is core, uncore etc 15 0x00000000410fd020,v1,arm/cortex-a34,core 16 0x00000000410fd030,v1,arm/cortex-a53,core 17 0x00000000420f1000,v1,arm/cortex-a53,core 18 0x00000000410fd040,v1,arm/cortex-a35,core 19 0x00000000410fd050,v1,arm/cortex-a55,core 20 0x00000000410fd060,v1,arm/cortex-a65-e1,core 21 0x00000000410fd4a0,v1,arm/cortex-a65-e1,core 22 0x00000000410fd070,v1,arm/cortex-a57-a72,core 23 0x00000000410fd080,v1,arm/cortex-a57-a72,core [all …]
|
| /tools/testing/selftests/kvm/lib/riscv/ |
| D | processor.c | 222 struct kvm_riscv_core core; in vcpu_arch_dump() local 224 vcpu_get_reg(vcpu, RISCV_CORE_REG(mode), &core.mode); in vcpu_arch_dump() 225 vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &core.regs.pc); in vcpu_arch_dump() 226 vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra), &core.regs.ra); in vcpu_arch_dump() 227 vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp), &core.regs.sp); in vcpu_arch_dump() 228 vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp), &core.regs.gp); in vcpu_arch_dump() 229 vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp), &core.regs.tp); in vcpu_arch_dump() 230 vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0), &core.regs.t0); in vcpu_arch_dump() 231 vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1), &core.regs.t1); in vcpu_arch_dump() 232 vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2), &core.regs.t2); in vcpu_arch_dump() [all …]
|
| /tools/perf/arch/x86/tests/ |
| D | hybrid.c | 12 return (evsel->core.attr.config & PERF_HW_EVENT_MASK) == expected_config; in test_config() 22 return (evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT) == expected_config; in test_hybrid_type() 29 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); in test__hybrid_hw_event_with_pmu() 30 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); in test__hybrid_hw_event_with_pmu() 41 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); in test__hybrid_hw_group_event() 42 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); in test__hybrid_hw_group_event() 48 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); in test__hybrid_hw_group_event() 60 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); in test__hybrid_sw_hw_group_event() 61 TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type); in test__hybrid_sw_hw_group_event() 65 TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); in test__hybrid_sw_hw_group_event() [all …]
|
| /tools/perf/pmu-events/arch/s390/ |
| D | mapfile.csv | 2 ^IBM.209[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z10,core 3 ^IBM.281[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z196,core 4 ^IBM.282[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_zec12,core 5 ^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core 6 ^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core 7 ^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core 8 ^IBM.393[12].*$,3,cf_z16,core
|
| /tools/perf/util/ |
| D | sideband_evlist.c | 25 evsel = evsel__new_idx(attr, evlist->core.nr_entries); in evlist__add_sb_event() 58 for (i = 0; i < evlist->core.nr_mmaps; i++) { in perf_evlist__poll_thread() 62 if (perf_mmap__read_init(&map->core)) in perf_evlist__poll_thread() 64 while ((event = perf_mmap__read_event(&map->core)) != NULL) { in perf_evlist__poll_thread() 72 perf_mmap__consume(&map->core); in perf_evlist__poll_thread() 75 perf_mmap__read_done(&map->core); in perf_evlist__poll_thread() 89 evsel->core.attr.sample_id_all = 1; in evlist__set_cb() 90 evsel->core.attr.watermark = 1; in evlist__set_cb() 91 evsel->core.attr.wakeup_watermark = 1; in evlist__set_cb() 107 if (evlist->core.nr_entries > 1) { in evlist__start_sb_thread() [all …]
|
| D | mmap.c | 41 return perf_mmap__mmap_len(&map->core); in mmap__mmap_len() 175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap() 269 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); in perf_mmap__setup_affinity_mask() 271 __set_bit(map->core.cpu.cpu, map->affinity_mask.bits); in perf_mmap__setup_affinity_mask() 278 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) { in mmap__mmap() 294 map->core.flush = mp->flush; in mmap__mmap() 313 &mp->auxtrace_mp, map->core.base, fd)) in mmap__mmap() 322 u64 head = perf_mmap__read_head(&md->core); in perf_mmap__push() 323 unsigned char *data = md->core.base + page_size; in perf_mmap__push() 328 rc = perf_mmap__read_init(&md->core); in perf_mmap__push() [all …]
|
| D | evsel.h | 60 struct perf_evsel core; member 234 return perf_evsel__cpus(&evsel->core); in evsel__cpus() 400 if (evsel->core.attr.type != type) in __evsel__match() 405 return (evsel->core.attr.config & PERF_HW_EVENT_MASK) == config; in __evsel__match() 407 return evsel->core.attr.config == config; in __evsel__match() 414 return (e1->core.attr.type == e2->core.attr.type) && in evsel__match2() 415 (e1->core.attr.config == e2->core.attr.config); in evsel__match2() 456 return list_entry(evsel->core.node.next, struct evsel, core.node); in evsel__next() 461 return list_entry(evsel->core.node.prev, struct evsel, core.node); in evsel__prev() 473 return evsel->core.leader == &evsel->core; in evsel__is_group_leader() [all …]
|
| D | bpf_counter_cgroup.c | 42 #define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0)) 62 skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups; in bperf_load_program() 67 BUG_ON(evlist->core.nr_entries % nr_cgroups != 0); in bperf_load_program() 70 map_size = total_cpus * evlist->core.nr_entries / nr_cgroups; in bperf_load_program() 74 map_size = evlist->core.nr_entries / nr_cgroups; in bperf_load_program() 77 map_size = evlist->core.nr_entries; in bperf_load_program() 91 if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) { in bperf_load_program() 96 perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) { in bperf_load_program() 118 err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1); in bperf_load_program() 123 perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) { in bperf_load_program() [all …]
|
| D | evlist.c | 71 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 72 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 77 perf_evlist__init(&evlist->core); in evlist__init() 78 perf_evlist__set_maps(&evlist->core, cpus, threads); in evlist__init() 113 if (evlist->core.nr_entries > 1) { in evlist__new_default() 165 list_del_init(&pos->core.node); in evlist__purge() 170 evlist->core.nr_entries = 0; in evlist__purge() 178 perf_evlist__exit(&evlist->core); in evlist__exit() 197 perf_evlist__add(&evlist->core, &entry->core); in evlist__add() 199 entry->tracking = !entry->core.idx; in evlist__add() [all …]
|
| D | evsel.c | 144 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 224 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type); in evsel__calc_id_pos() 225 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type); in evsel__calc_id_pos() 231 if (!(evsel->core.attr.sample_type & bit)) { in __evsel__set_sample_bit() 232 evsel->core.attr.sample_type |= bit; in __evsel__set_sample_bit() 241 if (evsel->core.attr.sample_type & bit) { in __evsel__reset_sample_bit() 242 evsel->core.attr.sample_type &= ~bit; in __evsel__reset_sample_bit() 257 evsel->core.attr.read_format |= PERF_FORMAT_ID; in evsel__set_sample_id() 281 perf_evsel__init(&evsel->core, attr, idx); in evsel__init() 313 evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | in evsel__new_idx() [all …]
|
| D | perf_api_probe.c | 32 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags); in perf_do_probe_api() 46 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags); in perf_do_probe_api() 84 evsel->core.attr.sample_type |= PERF_SAMPLE_IDENTIFIER; in perf_probe_sample_identifier() 89 evsel->core.attr.comm_exec = 1; in perf_probe_comm_exec() 94 evsel->core.attr.context_switch = 1; in perf_probe_context_switch() 99 evsel->core.attr.text_poke = 1; in perf_probe_text_poke() 104 evsel->core.attr.build_id = 1; in perf_probe_build_id() 109 evsel->core.attr.cgroup = 1; in perf_probe_cgroup()
|
| D | record.c | 55 struct perf_event_attr *attr = &evsel->core.attr; in evsel__config_leader_sampling() 91 attr->sample_type = read_sampler->core.attr.sample_type | in evsel__config_leader_sampling() 92 leader->core.attr.sample_type; in evsel__config_leader_sampling() 102 if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0) in evlist__config() 110 evsel->core.attr.comm_exec = 1; in evlist__config() 125 } else if (evlist->core.nr_entries > 1) { in evlist__config() 129 if (evsel->core.attr.sample_type == first->core.attr.sample_type) in evlist__config() 240 if (!evlist || perf_cpu_map__is_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) { in evlist__can_select_event() 248 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0); in evlist__can_select_event() 252 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, in evlist__can_select_event()
|
| /tools/perf/arch/x86/util/ |
| D | evsel.c | 34 if ((evsel->core.attr.type == PERF_TYPE_RAW) && in evsel__sys_has_perf_metrics() 52 u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK; in arch_evsel__hw_name() 53 u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT; in arch_evsel__hw_name() 110 if (!evsel->core.attr.precise_ip && in arch_evsel__open_strerror() 115 if (evsel->core.attr.exclude_kernel || evsel->core.attr.exclude_user || in arch_evsel__open_strerror() 116 evsel->core.attr.exclude_hv || evsel->core.attr.exclude_idle || in arch_evsel__open_strerror() 117 evsel->core.attr.exclude_host || evsel->core.attr.exclude_guest) { in arch_evsel__open_strerror()
|
| D | evlist.c | 32 list_add_tail(&evsel->core.node, &head); in ___evlist__add_default_attrs() 43 evsel->core.attr.config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; in ___evlist__add_default_attrs() 45 evsel->core.cpus = cpus; in ___evlist__add_default_attrs() 46 evsel->core.own_cpus = perf_cpu_map__get(cpus); in ___evlist__add_default_attrs() 48 list_add_tail(&evsel->core.node, &head); in ___evlist__add_default_attrs() 99 return lhs->core.idx - rhs->core.idx; in arch_evlist__cmp()
|
| /tools/perf/pmu-events/arch/powerpc/ |
| D | mapfile.csv | 10 # Type is core, uncore etc 14 0x004[bcd][[:xdigit:]]{4},1,power8,core 15 0x0066[[:xdigit:]]{4},1,power8,core 16 0x004e[[:xdigit:]]{4},1,power9,core 17 0x0080[[:xdigit:]]{4},1,power10,core 18 0x0082[[:xdigit:]]{4},1,power10,core
|
| /tools/testing/selftests/bpf/ |
| D | test_kmod.sh | 28 sysctl -w net.core.bpf_jit_enable=$1 2>&1 > /dev/null 29 sysctl -w net.core.bpf_jit_harden=$2 2>&1 > /dev/null 56 JE=`sysctl -n net.core.bpf_jit_enable` 57 JH=`sysctl -n net.core.bpf_jit_harden` 62 sysctl -w net.core.bpf_jit_enable=$JE 2>&1 > /dev/null 63 sysctl -w net.core.bpf_jit_harden=$JH 2>&1 > /dev/null
|
| /tools/perf/pmu-events/arch/riscv/ |
| D | mapfile.csv | 5 # MVENDORID JEDEC code of the core provider 13 # Type is core, uncore etc 17 0x489-0x8000000000000007-0x[[:xdigit:]]+,v1,sifive/u74,core 18 0x5b7-0x0-0x0,v1,thead/c900-legacy,core 19 0x67e-0x80000000db0000[89]0-0x[[:xdigit:]]+,v1,starfive/dubhe-80,core 20 0x31e-0x8000000000008a45-0x[[:xdigit:]]+,v1,andes/ax45,core
|
| /tools/power/x86/intel-speed-select/ |
| D | Build | 1 …-speed-select-y += isst-config.o isst-core.o isst-display.o isst-daemon.o hfi-events.o isst-core-…
|
| /tools/testing/selftests/net/ |
| D | rps_default_mask.sh | 10 readonly INITIAL_RPS_DEFAULT_MASK=$(cat /proc/sys/net/core/rps_default_mask) 21 echo $INITIAL_RPS_DEFAULT_MASK > /proc/sys/net/core/rps_default_mask 50 echo 0 > /proc/sys/net/core/rps_default_mask 55 echo 1 > /proc/sys/net/core/rps_default_mask 59 echo 3 > /proc/sys/net/core/rps_default_mask 73 ip netns exec $NETNS sysctl -qw net.core.rps_default_mask=1
|