• Home
  • Raw
  • Download

Lines Matching full:pmu

16 #include "pmu.h"
20 * core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
22 * must have pmu->is_core=1. If there are more than one PMU in
25 * homogeneous PMU, and thus they are treated as homogeneous
28 * matter whether PMU is present per SMT-thread or outside of the
32 * must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1.
66 struct perf_pmu *pmu, *tmp; in perf_pmus__destroy() local
68 list_for_each_entry_safe(pmu, tmp, &core_pmus, list) { in perf_pmus__destroy()
69 list_del(&pmu->list); in perf_pmus__destroy()
71 perf_pmu__delete(pmu); in perf_pmus__destroy()
73 list_for_each_entry_safe(pmu, tmp, &other_pmus, list) { in perf_pmus__destroy()
74 list_del(&pmu->list); in perf_pmus__destroy()
76 perf_pmu__delete(pmu); in perf_pmus__destroy()
84 struct perf_pmu *pmu; in pmu_find() local
86 list_for_each_entry(pmu, &core_pmus, list) { in pmu_find()
87 if (!strcmp(pmu->name, name) || in pmu_find()
88 (pmu->alias_name && !strcmp(pmu->alias_name, name))) in pmu_find()
89 return pmu; in pmu_find()
91 list_for_each_entry(pmu, &other_pmus, list) { in pmu_find()
92 if (!strcmp(pmu->name, name) || in pmu_find()
93 (pmu->alias_name && !strcmp(pmu->alias_name, name))) in pmu_find()
94 return pmu; in pmu_find()
102 struct perf_pmu *pmu; in perf_pmus__find() local
107 * Once PMU is loaded it stays in the list, in perf_pmus__find()
109 * the pmu format definitions. in perf_pmus__find()
111 pmu = pmu_find(name); in perf_pmus__find()
112 if (pmu) in perf_pmus__find()
113 return pmu; in perf_pmus__find()
123 pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name); in perf_pmus__find()
126 return pmu; in perf_pmus__find()
131 struct perf_pmu *pmu; in perf_pmu__find2() local
135 * Once PMU is loaded it stays in the list, in perf_pmu__find2()
137 * the pmu format definitions. in perf_pmu__find2()
139 pmu = pmu_find(name); in perf_pmu__find2()
140 if (pmu) in perf_pmu__find2()
141 return pmu; in perf_pmu__find2()
172 /* Add all pmus in sysfs to pmu list: */
217 struct perf_pmu *pmu; in __perf_pmus__find_by_type() local
219 list_for_each_entry(pmu, &core_pmus, list) { in __perf_pmus__find_by_type()
220 if (pmu->type == type) in __perf_pmus__find_by_type()
221 return pmu; in __perf_pmus__find_by_type()
224 list_for_each_entry(pmu, &other_pmus, list) { in __perf_pmus__find_by_type()
225 if (pmu->type == type) in __perf_pmus__find_by_type()
226 return pmu; in __perf_pmus__find_by_type()
233 struct perf_pmu *pmu = __perf_pmus__find_by_type(type); in perf_pmus__find_by_type() local
235 if (pmu || read_sysfs_all_pmus) in perf_pmus__find_by_type()
236 return pmu; in perf_pmus__find_by_type()
239 pmu = __perf_pmus__find_by_type(type); in perf_pmus__find_by_type()
240 return pmu; in perf_pmus__find_by_type()
244 * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
245 * next pmu. Returns NULL on end.
247 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu) in perf_pmus__scan() argument
249 bool use_core_pmus = !pmu || pmu->is_core; in perf_pmus__scan()
251 if (!pmu) { in perf_pmus__scan()
253 pmu = list_prepare_entry(pmu, &core_pmus, list); in perf_pmus__scan()
256 list_for_each_entry_continue(pmu, &core_pmus, list) in perf_pmus__scan()
257 return pmu; in perf_pmus__scan()
259 pmu = NULL; in perf_pmus__scan()
260 pmu = list_prepare_entry(pmu, &other_pmus, list); in perf_pmus__scan()
262 list_for_each_entry_continue(pmu, &other_pmus, list) in perf_pmus__scan()
263 return pmu; in perf_pmus__scan()
267 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu) in perf_pmus__scan_core() argument
269 if (!pmu) { in perf_pmus__scan_core()
271 pmu = list_prepare_entry(pmu, &core_pmus, list); in perf_pmus__scan_core()
273 list_for_each_entry_continue(pmu, &core_pmus, list) in perf_pmus__scan_core()
274 return pmu; in perf_pmus__scan_core()
279 static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu) in perf_pmus__scan_skip_duplicates() argument
281 bool use_core_pmus = !pmu || pmu->is_core; in perf_pmus__scan_skip_duplicates()
283 const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : ""; in perf_pmus__scan_skip_duplicates()
285 if (!pmu) { in perf_pmus__scan_skip_duplicates()
287 pmu = list_prepare_entry(pmu, &core_pmus, list); in perf_pmus__scan_skip_duplicates()
289 last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", NULL); in perf_pmus__scan_skip_duplicates()
292 list_for_each_entry_continue(pmu, &core_pmus, list) { in perf_pmus__scan_skip_duplicates()
293 int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL); in perf_pmus__scan_skip_duplicates()
296 !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len)) in perf_pmus__scan_skip_duplicates()
299 return pmu; in perf_pmus__scan_skip_duplicates()
301 pmu = NULL; in perf_pmus__scan_skip_duplicates()
302 pmu = list_prepare_entry(pmu, &other_pmus, list); in perf_pmus__scan_skip_duplicates()
304 list_for_each_entry_continue(pmu, &other_pmus, list) { in perf_pmus__scan_skip_duplicates()
305 int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "", /*num=*/NULL); in perf_pmus__scan_skip_duplicates()
308 !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len)) in perf_pmus__scan_skip_duplicates()
311 return pmu; in perf_pmus__scan_skip_duplicates()
318 struct perf_pmu *pmu = NULL; in perf_pmus__pmu_for_pmu_filter() local
320 while ((pmu = perf_pmus__scan(pmu)) != NULL) { in perf_pmus__pmu_for_pmu_filter()
321 if (!strcmp(pmu->name, str)) in perf_pmus__pmu_for_pmu_filter()
322 return pmu; in perf_pmus__pmu_for_pmu_filter()
324 if (!strncmp(pmu->name, "uncore_", 7)) { in perf_pmus__pmu_for_pmu_filter()
325 if (!strcmp(pmu->name + 7, str)) in perf_pmus__pmu_for_pmu_filter()
326 return pmu; in perf_pmus__pmu_for_pmu_filter()
329 if (!strncmp(pmu->name, "cpu_", 4)) { in perf_pmus__pmu_for_pmu_filter()
330 if (!strcmp(pmu->name + 4, str)) in perf_pmus__pmu_for_pmu_filter()
331 return pmu; in perf_pmus__pmu_for_pmu_filter()
345 /** PMU for event. */
346 const struct perf_pmu *pmu; member
375 a_iscpu = as->pmu ? as->pmu->is_core : true; in cmp_sevent()
376 b_iscpu = bs->pmu ? bs->pmu->is_core : true; in cmp_sevent()
380 /* Order by PMU name. */ in cmp_sevent()
381 if (as->pmu != bs->pmu) { in cmp_sevent()
414 pr_err("Unexpected event %s/%s/\n", info->pmu->name, info->name); in perf_pmus__print_pmu_events__callback()
418 s->pmu = info->pmu; in perf_pmus__print_pmu_events__callback()
436 struct perf_pmu *pmu; in perf_pmus__print_pmu_events() local
449 pmu = NULL; in perf_pmus__print_pmu_events()
451 while ((pmu = scan_fn(pmu)) != NULL) in perf_pmus__print_pmu_events()
452 len += perf_pmu__num_events(pmu); in perf_pmus__print_pmu_events()
456 pr_err("FATAL: not enough memory to print PMU events\n"); in perf_pmus__print_pmu_events()
459 pmu = NULL; in perf_pmus__print_pmu_events()
465 while ((pmu = scan_fn(pmu)) != NULL) { in perf_pmus__print_pmu_events()
466 perf_pmu__for_each_event(pmu, skip_duplicate_pmus, &state, in perf_pmus__print_pmu_events()
482 "Kernel PMU event", in perf_pmus__print_pmu_events()
503 struct perf_pmu *pmu = perf_pmus__find(pname); in perf_pmus__have_event() local
505 return pmu && perf_pmu__have_event(pmu, name); in perf_pmus__have_event()
513 struct perf_pmu *pmu = NULL; in perf_pmus__num_core_pmus() local
515 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) in perf_pmus__num_core_pmus()
523 struct perf_pmu *pmu = NULL; in __perf_pmus__supports_extended_type() local
528 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) { in __perf_pmus__supports_extended_type()
529 …if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_P… in __perf_pmus__supports_extended_type()
587 struct perf_pmu *pmu = evsel->pmu; in evsel__find_pmu() local
589 if (!pmu) { in evsel__find_pmu()
590 pmu = perf_pmus__find_by_type(evsel->core.attr.type); in evsel__find_pmu()
591 ((struct evsel *)evsel)->pmu = pmu; in evsel__find_pmu()
593 return pmu; in evsel__find_pmu()