/tools/perf/util/ |
D | counts.c | 10 struct perf_counts *counts = zalloc(sizeof(*counts)); in perf_counts__new() local 12 if (counts) { in perf_counts__new() 17 free(counts); in perf_counts__new() 21 counts->values = values; in perf_counts__new() 25 xyarray__delete(counts->values); in perf_counts__new() 26 free(counts); in perf_counts__new() 30 counts->loaded = values; in perf_counts__new() 33 return counts; in perf_counts__new() 36 void perf_counts__delete(struct perf_counts *counts) in perf_counts__delete() argument 38 if (counts) { in perf_counts__delete() [all …]
|
D | counts.h | 21 perf_counts(struct perf_counts *counts, int cpu, int thread) in perf_counts() argument 23 return xyarray__entry(counts->values, cpu, thread); in perf_counts() 27 perf_counts__is_loaded(struct perf_counts *counts, int cpu, int thread) in perf_counts__is_loaded() argument 29 return *((bool *) xyarray__entry(counts->loaded, cpu, thread)); in perf_counts__is_loaded() 33 perf_counts__set_loaded(struct perf_counts *counts, int cpu, int thread, bool loaded) in perf_counts__set_loaded() argument 35 *((bool *) xyarray__entry(counts->loaded, cpu, thread)) = loaded; in perf_counts__set_loaded() 39 void perf_counts__delete(struct perf_counts *counts);
|
D | branch.c | 24 st->counts[flags->type]++; in branch_type_count() 67 total += st->counts[i]; in branch_type_stat_display() 101 if (st->counts[i] > 0) in branch_type_stat_display() 105 (double)st->counts[i] / (double)total); in branch_type_stat_display() 120 total += st->counts[i]; in branch_type_str() 135 if (st->counts[i] > 0) in branch_type_str()
|
D | stat.c | 150 struct perf_counts *counts; in perf_evsel__alloc_prev_raw_counts() local 152 counts = perf_counts__new(ncpus, nthreads); in perf_evsel__alloc_prev_raw_counts() 153 if (counts) in perf_evsel__alloc_prev_raw_counts() 154 evsel->prev_raw_counts = counts; in perf_evsel__alloc_prev_raw_counts() 156 return counts ? 0 : -ENOMEM; in perf_evsel__alloc_prev_raw_counts() 285 struct perf_counts_values *aggr = &evsel->counts->aggr; in process_counter_values() 345 perf_counts(counter->counts, cpu, thread))) in process_counter_maps() 356 struct perf_counts_values *aggr = &counter->counts->aggr; in perf_stat_process_counter() 358 u64 *count = counter->counts->aggr.values; in perf_stat_process_counter() 387 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled); in perf_stat_process_counter() [all …]
|
D | stat-display.c | 430 if (run == 0 || ena == 0 || counter->counts->scaled == -1) { in printout() 504 val += perf_counts(counter->counts, cpu, 0)->val; in aggr_update_shadow() 592 struct perf_counts_values *counts; in aggr_cb() local 599 counts = perf_counts(counter->counts, cpu, 0); in aggr_cb() 604 if (counts->ena == 0 || counts->run == 0 || in aggr_cb() 605 counter->counts->scaled == -1) { in aggr_cb() 610 ad->val += counts->val; in aggr_cb() 611 ad->ena += counts->ena; in aggr_cb() 612 ad->run += counts->run; in aggr_cb() 709 val += perf_counts(counter->counts, cpu, thread)->val; in sort_aggr_thread() [all …]
|
D | branch.h | 47 u64 counts[PERF_BR_MAX]; member
|
D | python-ext-sources | 28 util/counts.c
|
D | evsel.h | 47 struct perf_counts *counts; member
|
/tools/perf/lib/tests/ |
D | test-evsel.c | 36 struct perf_counts_values counts = { .val = 0 }; in test_stat_cpu() local 38 perf_evsel__read(evsel, cpu, 0, &counts); in test_stat_cpu() 39 __T("failed to read value for evsel", counts.val != 0); in test_stat_cpu() 51 struct perf_counts_values counts = { .val = 0 }; in test_stat_thread() local 71 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread() 72 __T("failed to read value for evsel", counts.val != 0); in test_stat_thread() 83 struct perf_counts_values counts = { .val = 0 }; in test_stat_thread_enable() local 104 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread_enable() 105 __T("failed to read value for evsel", counts.val == 0); in test_stat_thread_enable() 110 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread_enable() [all …]
|
D | test-evlist.c | 57 struct perf_counts_values counts = { .val = 0 }; in test_stat_cpu() local 59 perf_evsel__read(evsel, cpu, 0, &counts); in test_stat_cpu() 60 __T("failed to read value for evsel", counts.val != 0); in test_stat_cpu() 73 struct perf_counts_values counts = { .val = 0 }; in test_stat_thread() local 111 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread() 112 __T("failed to read value for evsel", counts.val != 0); in test_stat_thread() 124 struct perf_counts_values counts = { .val = 0 }; in test_stat_thread_enable() local 164 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread_enable() 165 __T("failed to read value for evsel", counts.val == 0); in test_stat_thread_enable() 171 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread_enable() [all …]
|
/tools/testing/selftests/bpf/progs/ |
D | test_btf_newkv.c | 38 struct ipv_counts *counts; in test_long_fname_2() local 44 counts = bpf_map_lookup_elem(&btf_map, &key); in test_long_fname_2() 45 if (!counts) in test_long_fname_2() 48 counts->v6++; in test_long_fname_2() 51 counts = bpf_map_lookup_elem(&btf_map_legacy, &key); in test_long_fname_2() 52 if (!counts) in test_long_fname_2()
|
D | test_btf_nokv.c | 28 struct ipv_counts *counts; in test_long_fname_2() local 34 counts = bpf_map_lookup_elem(&btf_map, &key); in test_long_fname_2() 35 if (!counts) in test_long_fname_2() 38 counts->v6++; in test_long_fname_2()
|
D | test_btf_haskv.c | 30 struct ipv_counts *counts; in test_long_fname_2() local 36 counts = bpf_map_lookup_elem(&btf_map, &key); in test_long_fname_2() 37 if (!counts) in test_long_fname_2() 40 counts->v6++; in test_long_fname_2()
|
/tools/perf/tests/ |
D | openat-syscall.c | 54 if (perf_counts(evsel->counts, 0, 0)->val != nr_openat_calls) { in test__openat_syscall_event() 56 nr_openat_calls, perf_counts(evsel->counts, 0, 0)->val); in test__openat_syscall_event()
|
D | openat-syscall-all-cpus.c | 113 if (perf_counts(evsel->counts, cpu, 0)->val != expected) { in test__openat_syscall_event_on_all_cpus() 115 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); in test__openat_syscall_event_on_all_cpus()
|
/tools/memory-model/scripts/ |
D | cmplitmushist.sh | 36 echo Matching Observation result and counts: $2 71 echo Matching Observation result and counts: $obsline 1>&2
|
/tools/perf/scripts/python/bin/ |
D | syscall-counts-report | 10 perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts.py $comm
|
D | syscall-counts-by-pid-report | 10 perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts-by-pid.py $comm
|
/tools/perf/Documentation/ |
D | perf-stat.txt | 50 'percore' is a event qualifier that sums up the event counts for both 111 Do not aggregate counts across all monitored CPUs. 123 print counts using a CSV-style output to make it easy to import directly into 201 Aggregate counts per processor socket for system-wide mode measurements. This 208 Aggregate counts per processor die for system-wide mode measurements. This 215 Aggregate counts per physical processor for system-wide mode measurements. This 221 Aggregate counts per monitored threads, when monitoring threads (-t option) 251 Aggregate counts per processor socket for system-wide mode measurements. 254 Aggregate counts per processor die for system-wide mode measurements. 257 Aggregate counts per physical processor for system-wide mode measurements. [all …]
|
D | perf-script-python.txt | 32 'syscall-counts' script you see when you list the available perf script 37 The syscall-counts script is a simple script, but demonstrates all the 190 # mv perf-script.py syscall-counts.py 191 # perf script -s syscall-counts.py 260 and having the counts we've tallied as values. 316 # perf script -s syscall-counts.py 356 # cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-record 368 # cat kernel-source/tools/perf/scripts/python/bin/syscall-counts-report 371 # description: system-wide syscall counts 372 perf script -s ~/libexec/perf-core/scripts/python/syscall-counts.py [all …]
|
D | tips.txt | 21 Print event counts in CSV format with: perf stat -x,
|
/tools/usb/ |
D | ffs-test.c | 233 const __le32 counts[]; in descs_to_legacy() member 235 const __le32 *counts = in->counts; in descs_to_legacy() local 255 ret = le32_to_cpu(*counts); \ in descs_to_legacy() 257 ++counts; \ in descs_to_legacy() 267 descs_start = (const void *)counts; in descs_to_legacy()
|
/tools/perf/ |
D | builtin-stat.c | 256 perf_counts(counter->counts, cpu, thread); in read_single_counter() 288 count = perf_counts(counter->counts, cpu, thread); in read_counter() 294 if (!perf_counts__is_loaded(counter->counts, cpu, thread) && in read_counter() 296 counter->counts->scaled = -1; in read_counter() 297 perf_counts(counter->counts, cpu, thread)->ena = 0; in read_counter() 298 perf_counts(counter->counts, cpu, thread)->run = 0; in read_counter() 302 perf_counts__set_loaded(counter->counts, cpu, thread, false); in read_counter()
|
D | design.txt | 251 cpu == -1: the counter counts on all CPUs 255 A 'pid > 0' and 'cpu == -1' counter is a per task counter that counts 260 A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts
|
/tools/kvm/kvm_stat/ |
D | kvm_stat.txt | 15 kvm_stat prints counts of KVM kernel module trace events. These events signify
|