/tools/perf/util/ |
D | stat.c | 198 static void zero_per_pkg(struct perf_evsel *counter) in zero_per_pkg() argument 200 if (counter->per_pkg_mask) in zero_per_pkg() 201 memset(counter->per_pkg_mask, 0, MAX_NR_CPUS); in zero_per_pkg() 204 static int check_per_pkg(struct perf_evsel *counter, in check_per_pkg() argument 207 unsigned long *mask = counter->per_pkg_mask; in check_per_pkg() 208 struct cpu_map *cpus = perf_evsel__cpus(counter); in check_per_pkg() 213 if (!counter->per_pkg) in check_per_pkg() 224 counter->per_pkg_mask = mask; in check_per_pkg() 289 struct perf_evsel *counter) in process_counter_maps() argument 291 int nthreads = thread_map__nr(counter->threads); in process_counter_maps() [all …]
|
D | stat-shadow.c | 102 void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, in perf_stat__update_shadow_stats() argument 105 int ctx = evsel_context(counter); in perf_stat__update_shadow_stats() 107 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) || in perf_stat__update_shadow_stats() 108 perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK)) in perf_stat__update_shadow_stats() 110 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) in perf_stat__update_shadow_stats() 112 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) in perf_stat__update_shadow_stats() 114 else if (perf_stat_evsel__is(counter, TRANSACTION_START)) in perf_stat__update_shadow_stats() 116 else if (perf_stat_evsel__is(counter, ELISION_START)) in perf_stat__update_shadow_stats() 118 else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS)) in perf_stat__update_shadow_stats() 120 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED)) in perf_stat__update_shadow_stats() [all …]
|
D | cgroup.c | 78 struct perf_evsel *counter; in add_cgroup() local 84 evlist__for_each_entry(evlist, counter) { in add_cgroup() 85 cgrp = counter->cgrp; in add_cgroup() 113 evlist__for_each_entry(evlist, counter) { in add_cgroup() 124 counter->cgrp = cgrp; in add_cgroup()
|
D | stat.h | 82 void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count, 99 struct perf_evsel *counter);
|
/tools/perf/ |
D | builtin-stat.c | 280 perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread, in perf_evsel__write_stat_event() argument 283 struct perf_sample_id *sid = SID(counter, cpu, thread); in perf_evsel__write_stat_event() 293 static int read_counter(struct perf_evsel *counter) in read_counter() argument 299 ncpus = perf_evsel__nr_cpus(counter); in read_counter() 303 if (!counter->supported) in read_counter() 306 if (counter->system_wide) in read_counter() 313 count = perf_counts(counter->counts, cpu, thread); in read_counter() 314 if (perf_evsel__read(counter, cpu, thread, count)) { in read_counter() 315 counter->counts->scaled = -1; in read_counter() 316 perf_counts(counter->counts, cpu, thread)->ena = 0; in read_counter() [all …]
|
D | design.txt | 13 hardware capabilities. It provides per task and per CPU counters, counter 19 There's one file descriptor per virtual counter used. 29 VFS system calls: read() can be used to read the counter, fcntl() 35 When creating a new counter fd, 'perf_event_attr' is: 40 * specific (raw) counter configuration data, if unset, the next 71 The 'config' field specifies what the counter should count. It 78 If 'raw_type' is 1, then the counter will count a hardware event 82 If 'raw_type' is 0, then the 'type' field says what kind of counter 91 A counter of PERF_TYPE_HARDWARE will count the hardware event 95 * Generalized performance counter event types, used by the hw_event.event_id [all …]
|
D | builtin-top.c | 186 int counter, u64 ip) in perf_top__record_precise_ip() argument 202 err = hist_entry__inc_addr_samples(he, counter, ip); in perf_top__record_precise_ip() 488 int counter = 0; in perf_top__handle_keypress() local 495 prompt_integer(&counter, "Enter details event counter"); in perf_top__handle_keypress() 497 if (counter >= top->evlist->nr_entries) { in perf_top__handle_keypress() 504 if (top->sym_evsel->idx == counter) in perf_top__handle_keypress() 867 struct perf_evsel *counter; in perf_top__start_counters() local 873 evlist__for_each_entry(evlist, counter) { in perf_top__start_counters() 875 if (perf_evsel__open(counter, top->evlist->cpus, in perf_top__start_counters() 877 if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { in perf_top__start_counters() [all …]
|
D | builtin-script.c | 920 static void __process_stat(struct perf_evsel *counter, u64 tstamp) in __process_stat() argument 922 int nthreads = thread_map__nr(counter->threads); in __process_stat() 923 int ncpus = perf_evsel__nr_cpus(counter); in __process_stat() 927 if (counter->system_wide) in __process_stat() 940 counts = perf_counts(counter->counts, cpu, thread); in __process_stat() 943 counter->cpus->map[cpu], in __process_stat() 944 thread_map__pid(counter->threads, thread), in __process_stat() 949 perf_evsel__name(counter)); in __process_stat() 954 static void process_stat(struct perf_evsel *counter, u64 tstamp) in process_stat() argument 957 scripting_ops->process_stat(&stat_config, counter, tstamp); in process_stat() [all …]
|
/tools/include/asm-generic/ |
D | atomic-gcc.h | 24 return ACCESS_ONCE((v)->counter); in atomic_read() 36 v->counter = i; in atomic_set() 47 __sync_add_and_fetch(&v->counter, 1); in atomic_inc() 60 return __sync_sub_and_fetch(&v->counter, 1) == 0; in atomic_dec_and_test()
|
/tools/arch/x86/include/asm/ |
D | atomic.h | 25 return ACCESS_ONCE((v)->counter); in atomic_read() 37 v->counter = i; in atomic_set() 49 : "+m" (v->counter)); in atomic_inc() 62 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); in atomic_dec_and_test()
|
/tools/perf/Documentation/ |
D | perf-stat.txt | 6 perf-stat - Run a command and gather performance counter statistics 18 This command runs a command and gathers performance counter statistics 70 scale/normalize counter values 106 be more verbose (show counter open errors, etc) 245 Performance counter stats for 'make -j': 270 - counter value 271 - unit of the counter value or empty 273 - run time of counter 274 - percentage of measurement time the counter was running
|
D | perf-kvm.txt | 23 a performance counter profile of guest os in realtime 26 'perf kvm record <command>' to record the performance counter profile 39 'perf kvm report' to display the performance counter profile information 51 'perf kvm stat <command>' to run a command and gather performance counter 98 Be more verbose (show counter open errors, etc).
|
D | examples.txt | 35 Performance counter stats for './hackbench 10': 56 Performance counter stats for './hackbench 10' (5 runs): 164 Performance counter stats for 'sleep 10': 180 Performance counter stats for 'sleep 1' (10 runs):
|
D | perf-data.txt | 35 Be more verbose (show counter open errors, etc).
|
D | perf-top.txt | 15 This command generates and displays a performance counter profile in real time. 54 Put the counters into a counter group. 110 Be more verbose (show counter open errors, etc).
|
D | perf-list.txt | 82 Note: Only the following bit fields can be set in x86 counter 198 ref-cycles. Some special events have restrictions on which counter they 204 other groups. On x86 systems, the NMI watchdog pins a counter by default.
|
D | perf-record.txt | 16 This command runs a command and gathers a performance counter profile 240 Be more verbose (show counter open errors, etc). 363 Capture machine state (registers) at interrupt, i.e., on counter overflows for
|
D | perf.data-file-format.txt | 199 Description of counter groups ({...} in perf syntax)
|
D | perf-report.txt | 15 This command displays the performance counter profile information recorded
|
/tools/perf/arch/x86/tests/ |
D | rdpmc.c | 16 static u64 rdpmc(unsigned int counter) in rdpmc() argument 20 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); in rdpmc()
|
/tools/include/linux/ |
D | types.h | 64 int counter; member
|
/tools/iio/ |
D | iio_utils.h | 72 struct iio_channel_info **ci_array, int *counter);
|
D | iio_utils.c | 315 struct iio_channel_info **ci_array, int *counter) in build_channel_array() argument 326 *counter = 0; in build_channel_array() 364 (*counter)++; in build_channel_array() 375 *ci_array = malloc(sizeof(**ci_array) * (*counter)); in build_channel_array() 522 bsort_channel_array_by_index(*ci_array, *counter); in build_channel_array() 533 *counter = 0; in build_channel_array()
|
/tools/perf/util/scripting-engines/ |
D | trace-event-python.c | 899 process_stat(struct perf_evsel *counter, int cpu, int thread, u64 tstamp, in process_stat() argument 911 counter); in process_stat() 936 struct perf_evsel *counter, u64 tstamp) in python_process_stat() argument 938 struct thread_map *threads = counter->threads; in python_process_stat() 939 struct cpu_map *cpus = counter->cpus; in python_process_stat() 943 process_stat(counter, -1, -1, tstamp, in python_process_stat() 944 &counter->counts->aggr); in python_process_stat() 950 process_stat(counter, cpus->map[cpu], in python_process_stat() 952 perf_counts(counter->counts, cpu, thread)); in python_process_stat()
|
/tools/power/cpupower/po/ |
D | de.po | 57 msgid "North Bridge P1 boolean counter (returns 0 or 1)"
|