/tools/perf/tests/ |
D | parse-events.c | 23 struct perf_evsel *evsel = perf_evlist__first(evlist); in test__checkevent_tracepoint() local 27 TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); in test__checkevent_tracepoint() 29 PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); in test__checkevent_tracepoint() 30 TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); in test__checkevent_tracepoint() 36 struct perf_evsel *evsel; in test__checkevent_tracepoint_multi() local 41 list_for_each_entry(evsel, &evlist->entries, node) { in test__checkevent_tracepoint_multi() 43 PERF_TYPE_TRACEPOINT == evsel->attr.type); in test__checkevent_tracepoint_multi() 45 PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); in test__checkevent_tracepoint_multi() 47 1 == evsel->attr.sample_period); in test__checkevent_tracepoint_multi() 54 struct perf_evsel *evsel = perf_evlist__first(evlist); in test__checkevent_raw() local [all …]
|
D | evsel-tp-sched.c | 5 static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, in perf_evsel__test_field() argument 8 struct format_field *field = perf_evsel__field(evsel, name); in perf_evsel__test_field() 13 pr_debug("%s: \"%s\" field not found!\n", evsel->name, name); in perf_evsel__test_field() 20 evsel->name, name, is_signed, should_be_signed); in perf_evsel__test_field() 26 evsel->name, name, field->size, size); in perf_evsel__test_field() 35 struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); in test__perf_evsel__tp_sched_test() local 38 if (evsel == NULL) { in test__perf_evsel__tp_sched_test() 43 if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) in test__perf_evsel__tp_sched_test() 46 if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) in test__perf_evsel__tp_sched_test() 49 if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) in test__perf_evsel__tp_sched_test() [all …]
|
D | open-syscall.c | 9 struct perf_evsel *evsel; in test__open_syscall_event() local 18 evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); in test__open_syscall_event() 19 if (evsel == NULL) { in test__open_syscall_event() 24 if (perf_evsel__open_per_thread(evsel, threads) < 0) { in test__open_syscall_event() 36 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) { in test__open_syscall_event() 41 if (evsel->counts->cpu[0].val != nr_open_calls) { in test__open_syscall_event() 43 nr_open_calls, evsel->counts->cpu[0].val); in test__open_syscall_event() 49 perf_evsel__close_fd(evsel, 1, threads->nr); in test__open_syscall_event() 51 perf_evsel__delete(evsel); in test__open_syscall_event()
|
D | evsel-roundtrip-name.c | 10 struct perf_evsel *evsel; in perf_evsel__roundtrip_cache_name_test() local 33 evsel = perf_evlist__first(evlist); in perf_evsel__roundtrip_cache_name_test() 44 if (evsel->idx != idx) in perf_evsel__roundtrip_cache_name_test() 49 if (strcmp(perf_evsel__name(evsel), name)) { in perf_evsel__roundtrip_cache_name_test() 50 pr_debug("%s != %s\n", perf_evsel__name(evsel), name); in perf_evsel__roundtrip_cache_name_test() 54 evsel = perf_evsel__next(evsel); in perf_evsel__roundtrip_cache_name_test() 66 struct perf_evsel *evsel; in __perf_evsel__name_array_test() local 82 list_for_each_entry(evsel, &evlist->entries, node) { in __perf_evsel__name_array_test() 83 if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) { in __perf_evsel__name_array_test() 85 pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]); in __perf_evsel__name_array_test()
|
D | open-syscall-all-cpus.c | 11 struct perf_evsel *evsel; in test__open_syscall_event_on_all_cpus() local 29 evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); in test__open_syscall_event_on_all_cpus() 30 if (evsel == NULL) { in test__open_syscall_event_on_all_cpus() 35 if (perf_evsel__open(evsel, cpus, threads) < 0) { in test__open_syscall_event_on_all_cpus() 74 if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) { in test__open_syscall_event_on_all_cpus() 87 if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) { in test__open_syscall_event_on_all_cpus() 94 if (evsel->counts->cpu[cpu].val != expected) { in test__open_syscall_event_on_all_cpus() 96 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val); in test__open_syscall_event_on_all_cpus() 101 perf_evsel__free_counts(evsel); in test__open_syscall_event_on_all_cpus() 103 perf_evsel__close_fd(evsel, 1, threads->nr); in test__open_syscall_event_on_all_cpus() [all …]
|
D | task-exit.c | 29 struct perf_evsel *evsel; in test__task_exit() local 75 evsel = perf_evlist__first(evlist); in test__task_exit() 76 evsel->attr.task = 1; in test__task_exit() 77 evsel->attr.sample_freq = 0; in test__task_exit() 78 evsel->attr.inherit = 0; in test__task_exit() 79 evsel->attr.watermark = 0; in test__task_exit() 80 evsel->attr.wakeup_events = 1; in test__task_exit() 81 evsel->attr.exclude_kernel = 1; in test__task_exit()
|
D | mmap-basic.c | 33 struct perf_evsel *evsels[nsyscalls], *evsel; in test__basic_mmap() local 118 evsel = perf_evlist__id2evsel(evlist, sample.id); in test__basic_mmap() 119 if (evsel == NULL) { in test__basic_mmap() 124 nr_events[evsel->idx]++; in test__basic_mmap() 128 list_for_each_entry(evsel, &evlist->entries, node) { in test__basic_mmap() 129 if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) { in test__basic_mmap() 131 expected_nr_events[evsel->idx], in test__basic_mmap() 132 perf_evsel__name(evsel), nr_events[evsel->idx]); in test__basic_mmap()
|
D | open-syscall-tp-fields.c | 22 struct perf_evsel *evsel; in test__syscall_open_tp_fields() local 30 evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); in test__syscall_open_tp_fields() 31 if (evsel == NULL) { in test__syscall_open_tp_fields() 36 perf_evlist__add(evlist, evsel); in test__syscall_open_tp_fields() 44 perf_evsel__config(evsel, &opts); in test__syscall_open_tp_fields() 83 err = perf_evsel__parse_sample(evsel, event, &sample); in test__syscall_open_tp_fields() 89 tp_flags = perf_evsel__intval(evsel, &sample, "flags"); in test__syscall_open_tp_fields()
|
D | hists_link.c | 199 struct perf_evsel *evsel; in add_hist_entries() local 210 list_for_each_entry(evsel, &evlist->entries, node) { in add_hist_entries() 226 he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1); in add_hist_entries() 250 he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1); in add_hist_entries() 438 struct perf_evsel *evsel, *first; in test__hists_link() local 470 list_for_each_entry(evsel, &evlist->entries, node) { in test__hists_link() 471 hists__collapse_resort(&evsel->hists); in test__hists_link() 474 print_hists(&evsel->hists); in test__hists_link() 478 evsel = perf_evlist__last(evlist); in test__hists_link() 481 hists__match(&first->hists, &evsel->hists); in test__hists_link() [all …]
|
D | sw-clock.c | 26 struct perf_evsel *evsel; in __test__sw_clock_freq() local 45 evsel = perf_evsel__new(&attr, 0); in __test__sw_clock_freq() 46 if (evsel == NULL) { in __test__sw_clock_freq() 50 perf_evlist__add(evlist, evsel); in __test__sw_clock_freq()
|
/tools/perf/util/ |
D | evsel.h | 40 struct perf_evsel *evsel; member 95 void perf_evsel__init(struct perf_evsel *evsel, 97 void perf_evsel__exit(struct perf_evsel *evsel); 98 void perf_evsel__delete(struct perf_evsel *evsel); 100 void perf_evsel__config(struct perf_evsel *evsel, 117 const char *perf_evsel__name(struct perf_evsel *evsel); 118 const char *perf_evsel__group_name(struct perf_evsel *evsel); 119 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size); 121 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); 122 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); [all …]
|
D | evsel.c | 58 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel, in __perf_evsel__set_sample_bit() argument 61 if (!(evsel->attr.sample_type & bit)) { in __perf_evsel__set_sample_bit() 62 evsel->attr.sample_type |= bit; in __perf_evsel__set_sample_bit() 63 evsel->sample_size += sizeof(u64); in __perf_evsel__set_sample_bit() 67 void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel, in __perf_evsel__reset_sample_bit() argument 70 if (evsel->attr.sample_type & bit) { in __perf_evsel__reset_sample_bit() 71 evsel->attr.sample_type &= ~bit; in __perf_evsel__reset_sample_bit() 72 evsel->sample_size -= sizeof(u64); in __perf_evsel__reset_sample_bit() 76 void perf_evsel__set_sample_id(struct perf_evsel *evsel) in perf_evsel__set_sample_id() argument 78 perf_evsel__set_sample_bit(evsel, ID); in perf_evsel__set_sample_id() [all …]
|
D | evlist.c | 54 struct perf_evsel *evsel; in perf_evlist__config() local 65 list_for_each_entry(evsel, &evlist->entries, node) { in perf_evlist__config() 66 perf_evsel__config(evsel, opts); in perf_evlist__config() 69 perf_evsel__set_sample_id(evsel); in perf_evlist__config() 116 struct perf_evsel *evsel, *leader; in __perf_evlist__set_leader() local 119 evsel = list_entry(list->prev, struct perf_evsel, node); in __perf_evlist__set_leader() 121 leader->nr_members = evsel->idx - leader->idx + 1; in __perf_evlist__set_leader() 123 list_for_each_entry(evsel, list, node) { in __perf_evlist__set_leader() 124 evsel->leader = leader; in __perf_evlist__set_leader() 142 struct perf_evsel *evsel; in perf_evlist__add_default() local [all …]
|
D | header.c | 641 struct perf_evsel *evsel; in write_event_desc() local 657 sz = (u32)sizeof(evsel->attr); in write_event_desc() 662 list_for_each_entry(evsel, &evlist->entries, node) { in write_event_desc() 664 ret = do_write(fd, &evsel->attr, sz); in write_event_desc() 674 nri = evsel->ids; in write_event_desc() 682 ret = do_write_string(fd, perf_evsel__name(evsel)); in write_event_desc() 688 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); in write_event_desc() 1101 struct perf_evsel *evsel; in write_group_desc() local 1108 list_for_each_entry(evsel, &evlist->entries, node) { in write_group_desc() 1109 if (perf_evsel__is_group_leader(evsel) && in write_group_desc() [all …]
|
D | parse-events.c | 248 struct perf_evsel *evsel; in __add_event() local 260 evsel = perf_evsel__new(attr, (*idx)++); in __add_event() 261 if (!evsel) { in __add_event() 266 evsel->cpus = cpus; in __add_event() 268 evsel->name = strdup(name); in __add_event() 269 list_add_tail(&evsel->node, list); in __add_event() 362 struct perf_evsel *evsel; in add_tracepoint() local 372 evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++); in add_tracepoint() 373 if (!evsel) { in add_tracepoint() 378 list_add_tail(&evsel->node, list); in add_tracepoint() [all …]
|
D | annotate.h | 143 struct perf_evsel *evsel, bool full_paths, 150 struct perf_evsel *evsel, bool print_lines, 155 struct perf_evsel *evsel, 160 struct perf_evsel *evsel __maybe_unused, in symbol__tui_annotate() 170 struct perf_evsel *evsel, 174 struct perf_evsel *evsel, in hist_entry__gtk_annotate() argument 177 return symbol__gtk_annotate(he->ms.sym, he->ms.map, evsel, hbt); in hist_entry__gtk_annotate() 183 struct perf_evsel *evsel __maybe_unused, in hist_entry__gtk_annotate()
|
/tools/perf/ |
D | builtin-stat.c | 126 static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) in perf_evsel__cpus() argument 128 return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; in perf_evsel__cpus() 131 static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) in perf_evsel__nr_cpus() argument 133 return perf_evsel__cpus(evsel)->nr; in perf_evsel__nr_cpus() 136 static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel) in perf_evsel__reset_stat_priv() argument 138 memset(evsel->priv, 0, sizeof(struct perf_stat)); in perf_evsel__reset_stat_priv() 141 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel) in perf_evsel__alloc_stat_priv() argument 143 evsel->priv = zalloc(sizeof(struct perf_stat)); in perf_evsel__alloc_stat_priv() 144 return evsel->priv == NULL ? -ENOMEM : 0; in perf_evsel__alloc_stat_priv() 147 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) in perf_evsel__free_stat_priv() argument [all …]
|
D | builtin-inject.c | 106 struct perf_evsel *evsel, 112 struct perf_evsel *evsel, in perf_event__repipe_sample() argument 115 if (evsel->handler.func) { in perf_event__repipe_sample() 116 inject_handler f = evsel->handler.func; in perf_event__repipe_sample() 117 return f(tool, event, sample, evsel, machine); in perf_event__repipe_sample() 120 build_id__mark_dso_hit(tool, event, sample, evsel, machine); in perf_event__repipe_sample() 203 struct perf_evsel *evsel __maybe_unused, in perf_event__inject_buildid() 249 struct perf_evsel *evsel __maybe_unused, in perf_inject__sched_process_exit() 269 struct perf_evsel *evsel, in perf_inject__sched_switch() argument 275 perf_inject__sched_process_exit(tool, event, sample, evsel, machine); in perf_inject__sched_switch() [all …]
|
D | builtin-kvm.c | 65 bool (*is_begin_event)(struct perf_evsel *evsel, 68 bool (*is_end_event)(struct perf_evsel *evsel, 106 static void exit_event_get_key(struct perf_evsel *evsel, in exit_event_get_key() argument 111 key->key = perf_evsel__intval(evsel, sample, "exit_reason"); in exit_event_get_key() 114 static bool kvm_exit_event(struct perf_evsel *evsel) in kvm_exit_event() argument 116 return !strcmp(evsel->name, "kvm:kvm_exit"); in kvm_exit_event() 119 static bool exit_event_begin(struct perf_evsel *evsel, in exit_event_begin() argument 122 if (kvm_exit_event(evsel)) { in exit_event_begin() 123 exit_event_get_key(evsel, sample, key); in exit_event_begin() 130 static bool kvm_entry_event(struct perf_evsel *evsel) in kvm_entry_event() argument [all …]
|
D | builtin-report.c | 71 struct perf_evsel *evsel, in perf_report__add_mem_hist_entry() argument 85 err = machine__resolve_callchain(machine, evsel, al->thread, in perf_report__add_mem_hist_entry() 109 he = __hists__add_mem_entry(&evsel->hists, al, parent, mi, cost, cost); in perf_report__add_mem_hist_entry() 121 assert(evsel != NULL); in perf_report__add_mem_hist_entry() 126 err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); in perf_report__add_mem_hist_entry() 142 evsel->idx, in perf_report__add_mem_hist_entry() 148 evsel->hists.stats.total_period += cost; in perf_report__add_mem_hist_entry() 149 hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); in perf_report__add_mem_hist_entry() 164 struct perf_evsel *evsel, in perf_report__add_branch_hist_entry() argument 176 err = machine__resolve_callchain(machine, evsel, al->thread, in perf_report__add_branch_hist_entry() [all …]
|
D | builtin-lock.c | 339 int (*acquire_event)(struct perf_evsel *evsel, 342 int (*acquired_event)(struct perf_evsel *evsel, 345 int (*contended_event)(struct perf_evsel *evsel, 348 int (*release_event)(struct perf_evsel *evsel, 388 static int report_lock_acquire_event(struct perf_evsel *evsel, in report_lock_acquire_event() argument 395 const char *name = perf_evsel__strval(evsel, sample, "name"); in report_lock_acquire_event() 396 u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); in report_lock_acquire_event() 397 int flag = perf_evsel__intval(evsel, sample, "flag"); in report_lock_acquire_event() 461 static int report_lock_acquired_event(struct perf_evsel *evsel, in report_lock_acquired_event() argument 469 const char *name = perf_evsel__strval(evsel, sample, "name"); in report_lock_acquired_event() [all …]
|
D | builtin-annotate.c | 46 static int perf_evsel__add_sample(struct perf_evsel *evsel, in perf_evsel__add_sample() argument 66 he = __hists__add_entry(&evsel->hists, al, NULL, 1, 1); in perf_evsel__add_sample() 76 ret = hist_entry__inc_addr_samples(he, evsel->idx, al->addr); in perf_evsel__add_sample() 79 evsel->hists.stats.total_period += sample->period; in perf_evsel__add_sample() 80 hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE); in perf_evsel__add_sample() 87 struct perf_evsel *evsel, in process_sample_event() argument 103 if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) { in process_sample_event() 113 struct perf_evsel *evsel, in hist_entry__tty_annotate() argument 116 return symbol__tty_annotate(he->ms.sym, he->ms.map, evsel, in hist_entry__tty_annotate() 121 struct perf_evsel *evsel, in hists__find_annotations() argument [all …]
|
D | builtin-sched.c | 103 int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel, 106 int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel, 109 int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel, 112 int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel, 116 struct perf_evsel *evsel, 654 struct perf_evsel *evsel, struct perf_sample *sample, in replay_wakeup_event() argument 657 const char *comm = perf_evsel__strval(evsel, sample, "comm"); in replay_wakeup_event() 658 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); in replay_wakeup_event() 662 printf("sched_wakeup event %p\n", evsel); in replay_wakeup_event() 675 struct perf_evsel *evsel, in replay_switch_event() argument [all …]
|
D | builtin-trace.c | 267 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel, 271 struct perf_evsel *evsel, in trace__syscall_info() argument 274 int id = perf_evsel__intval(evsel, sample, "id"); in trace__syscall_info() 298 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel, in trace__sys_enter() argument 305 struct syscall *sc = trace__syscall_info(trace, evsel, sample); in trace__sys_enter() 311 args = perf_evsel__rawptr(evsel, sample, "args"); in trace__sys_enter() 342 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel, in trace__sys_exit() argument 349 struct syscall *sc = trace__syscall_info(trace, evsel, sample); in trace__sys_exit() 354 ret = perf_evsel__intval(evsel, sample, "ret"); in trace__sys_exit() 395 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel, in trace__sched_stat_runtime() argument [all …]
|
/tools/perf/python/ |
D | twatch.py | 21 evsel = perf.evsel(task = 1, comm = 1, mmap = 0, 25 evsel.open(cpus = cpus, threads = threads); 27 evlist.add(evsel)
|