Lines Matching refs:pmu
161 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); in __get_cpu_context()
858 perf_pmu_disable(cpuctx->ctx.pmu); in perf_cgroup_switch()
882 perf_pmu_enable(cpuctx->ctx.pmu); in perf_cgroup_switch()
956 cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); in perf_cgroup_ensure_storage()
1184 struct pmu *pmu = cpuctx->ctx.pmu; in __perf_mux_hrtimer_init() local
1188 if (pmu->task_ctx_nr == perf_sw_context) in __perf_mux_hrtimer_init()
1195 interval = pmu->hrtimer_interval_ms; in __perf_mux_hrtimer_init()
1197 interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER; in __perf_mux_hrtimer_init()
1209 struct pmu *pmu = cpuctx->ctx.pmu; in perf_mux_hrtimer_restart() local
1213 if (pmu->task_ctx_nr == perf_sw_context) in perf_mux_hrtimer_restart()
1232 void perf_pmu_disable(struct pmu *pmu) in perf_pmu_disable() argument
1234 int *count = this_cpu_ptr(pmu->pmu_disable_count); in perf_pmu_disable()
1236 pmu->pmu_disable(pmu); in perf_pmu_disable()
1239 void perf_pmu_enable(struct pmu *pmu) in perf_pmu_enable() argument
1241 int *count = this_cpu_ptr(pmu->pmu_disable_count); in perf_pmu_enable()
1243 pmu->pmu_enable(pmu); in perf_pmu_enable()
1279 static void *alloc_task_ctx_data(struct pmu *pmu) in alloc_task_ctx_data() argument
1281 if (pmu->task_ctx_cache) in alloc_task_ctx_data()
1282 return kmem_cache_zalloc(pmu->task_ctx_cache, GFP_KERNEL); in alloc_task_ctx_data()
1287 static void free_task_ctx_data(struct pmu *pmu, void *task_ctx_data) in free_task_ctx_data() argument
1289 if (pmu->task_ctx_cache && task_ctx_data) in free_task_ctx_data()
1290 kmem_cache_free(pmu->task_ctx_cache, task_ctx_data); in free_task_ctx_data()
1298 free_task_ctx_data(ctx->pmu, ctx->task_ctx_data); in free_ctx()
2120 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2123 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2195 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2321 struct pmu *pmu = event->pmu; in __pmu_filter_match() local
2322 return pmu->filter_match ? pmu->filter_match(event) : 1; in __pmu_filter_match()
2373 perf_pmu_disable(event->pmu); in event_sched_out()
2375 event->pmu->del(event, 0); in event_sched_out()
2410 perf_pmu_enable(event->pmu); in event_sched_out()
2423 perf_pmu_disable(ctx->pmu); in group_sched_out()
2433 perf_pmu_enable(ctx->pmu); in group_sched_out()
2640 perf_pmu_disable(event->pmu); in event_sched_in()
2644 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2662 perf_pmu_enable(event->pmu); in event_sched_in()
2673 struct pmu *pmu = ctx->pmu; in group_sched_in() local
2678 pmu->start_txn(pmu, PERF_PMU_TXN_ADD); in group_sched_in()
2693 if (!pmu->commit_txn(pmu)) in group_sched_in()
2711 pmu->cancel_txn(pmu); in group_sched_in()
2818 perf_pmu_disable(cpuctx->ctx.pmu); in ctx_resched()
2835 perf_pmu_enable(cpuctx->ctx.pmu); in ctx_resched()
2838 void perf_pmu_resched(struct pmu *pmu) in perf_pmu_resched() argument
2840 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_pmu_resched()
3157 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3169 event->pmu->start(event, 0); in __perf_event_stop()
3232 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3390 perf_pmu_disable(ctx->pmu); in ctx_sched_out()
3407 perf_pmu_enable(ctx->pmu); in ctx_sched_out()
3464 event->pmu->read(event); in __perf_event_sync_stat()
3520 struct pmu *pmu; in perf_event_context_sched_out() local
3525 pmu = ctx->pmu; in perf_event_context_sched_out()
3556 perf_pmu_disable(pmu); in perf_event_context_sched_out()
3573 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_out()
3574 pmu->sched_task(ctx, false); in perf_event_context_sched_out()
3582 if (pmu->swap_task_ctx) in perf_event_context_sched_out()
3583 pmu->swap_task_ctx(ctx, next_ctx); in perf_event_context_sched_out()
3587 perf_pmu_enable(pmu); in perf_event_context_sched_out()
3611 perf_pmu_disable(pmu); in perf_event_context_sched_out()
3614 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_out()
3615 pmu->sched_task(ctx, false); in perf_event_context_sched_out()
3618 perf_pmu_enable(pmu); in perf_event_context_sched_out()
3625 void perf_sched_cb_dec(struct pmu *pmu) in perf_sched_cb_dec() argument
3627 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_sched_cb_dec()
3636 void perf_sched_cb_inc(struct pmu *pmu) in perf_sched_cb_inc() argument
3638 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_sched_cb_inc()
3656 struct pmu *pmu; in __perf_pmu_sched_task() local
3658 pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */ in __perf_pmu_sched_task()
3660 if (WARN_ON_ONCE(!pmu->sched_task)) in __perf_pmu_sched_task()
3664 perf_pmu_disable(pmu); in __perf_pmu_sched_task()
3666 pmu->sched_task(cpuctx->task_ctx, sched_in); in __perf_pmu_sched_task()
3668 perf_pmu_enable(pmu); in __perf_pmu_sched_task()
3980 struct pmu *pmu; in perf_event_context_sched_in() local
3988 pmu = ctx->pmu = cpuctx->ctx.pmu; in perf_event_context_sched_in()
4004 perf_pmu_disable(pmu); in perf_event_context_sched_in()
4017 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_in()
4018 pmu->sched_task(cpuctx->task_ctx, true); in perf_event_context_sched_in()
4020 perf_pmu_enable(pmu); in perf_event_context_sched_in()
4164 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
4169 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
4195 perf_pmu_disable(ctx->pmu); in perf_adjust_freq_unthr_context()
4204 perf_pmu_disable(event->pmu); in perf_adjust_freq_unthr_context()
4211 event->pmu->start(event, 0); in perf_adjust_freq_unthr_context()
4220 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_context()
4236 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_context()
4238 perf_pmu_enable(event->pmu); in perf_adjust_freq_unthr_context()
4241 perf_pmu_enable(ctx->pmu); in perf_adjust_freq_unthr_context()
4305 perf_pmu_disable(cpuctx->ctx.pmu); in perf_rotate_context()
4328 perf_pmu_enable(cpuctx->ctx.pmu); in perf_rotate_context()
4492 struct pmu *pmu = event->pmu; in __perf_event_read() local
4518 pmu->read(event); in __perf_event_read()
4523 pmu->start_txn(pmu, PERF_PMU_TXN_READ); in __perf_event_read()
4525 pmu->read(event); in __perf_event_read()
4533 sub->pmu->read(sub); in __perf_event_read()
4537 data->ret = pmu->commit_txn(pmu); in __perf_event_read()
4615 event->pmu->read(event); in perf_event_read_local()
4728 alloc_perf_context(struct pmu *pmu, struct task_struct *task) in alloc_perf_context() argument
4739 ctx->pmu = pmu; in alloc_perf_context()
4768 find_get_context(struct pmu *pmu, struct task_struct *task, in find_get_context() argument
4784 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in find_get_context()
4795 ctxn = pmu->task_ctx_nr; in find_get_context()
4800 task_ctx_data = alloc_task_ctx_data(pmu); in find_get_context()
4822 ctx = alloc_perf_context(pmu, task); in find_get_context()
4858 free_task_ctx_data(pmu, task_ctx_data); in find_get_context()
4862 free_task_ctx_data(pmu, task_ctx_data); in find_get_context()
5017 struct pmu *pmu = event->pmu; in exclusive_event_init() local
5019 if (!is_exclusive_pmu(pmu)) in exclusive_event_init()
5036 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt)) in exclusive_event_init()
5039 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt)) in exclusive_event_init()
5048 struct pmu *pmu = event->pmu; in exclusive_event_destroy() local
5050 if (!is_exclusive_pmu(pmu)) in exclusive_event_destroy()
5055 atomic_dec(&pmu->exclusive_cnt); in exclusive_event_destroy()
5057 atomic_inc(&pmu->exclusive_cnt); in exclusive_event_destroy()
5062 if ((e1->pmu == e2->pmu) && in exclusive_event_match()
5074 struct pmu *pmu = event->pmu; in exclusive_event_installable() local
5078 if (!is_exclusive_pmu(pmu)) in exclusive_event_installable()
5142 module_put(event->pmu->module); in _free_event()
5685 perf_pmu_disable(ctx->pmu); in __perf_event_period()
5694 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
5700 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
5701 perf_pmu_enable(ctx->pmu); in __perf_event_period()
5707 return event->pmu->check_period(event, value); in perf_event_check_period()
5962 return event->pmu->event_idx(event); in perf_event_index()
6198 if (event->pmu->event_mapped) in perf_mmap_open()
6199 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
6221 if (event->pmu->event_unmapped) in perf_mmap_close()
6222 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6540 if (event->pmu->event_mapped) in perf_mmap()
6541 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
6950 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
7115 leader->pmu->read(leader); in perf_output_read_group()
7128 sub->pmu->read(sub); in perf_output_read_group()
7942 struct pmu *pmu = event->ctx->pmu; in __perf_pmu_output_stop() local
7943 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in __perf_pmu_output_stop()
9286 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
9855 static struct pmu perf_swevent = {
10004 static struct pmu perf_tracepoint = {
10056 static struct pmu perf_kprobe = {
10115 static struct pmu perf_uprobe = {
10267 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
10270 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
10274 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
10723 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
10797 event->pmu->read(event); in perf_swevent_hrtimer()
10936 static struct pmu perf_cpu_clock = {
11017 static struct pmu perf_task_clock = {
11030 static void perf_pmu_nop_void(struct pmu *pmu) in perf_pmu_nop_void() argument
11034 static void perf_pmu_nop_txn(struct pmu *pmu, unsigned int flags) in perf_pmu_nop_txn() argument
11038 static int perf_pmu_nop_int(struct pmu *pmu) in perf_pmu_nop_int() argument
11050 static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) in perf_pmu_start_txn() argument
11057 perf_pmu_disable(pmu); in perf_pmu_start_txn()
11060 static int perf_pmu_commit_txn(struct pmu *pmu) in perf_pmu_commit_txn() argument
11069 perf_pmu_enable(pmu); in perf_pmu_commit_txn()
11073 static void perf_pmu_cancel_txn(struct pmu *pmu) in perf_pmu_cancel_txn() argument
11082 perf_pmu_enable(pmu); in perf_pmu_cancel_txn()
11096 struct pmu *pmu; in find_pmu_context() local
11101 list_for_each_entry(pmu, &pmus, entry) { in find_pmu_context()
11102 if (pmu->task_ctx_nr == ctxn) in find_pmu_context()
11103 return pmu->pmu_cpu_context; in find_pmu_context()
11109 static void free_pmu_context(struct pmu *pmu) in free_pmu_context() argument
11116 if (pmu->task_ctx_nr > perf_invalid_context) in free_pmu_context()
11119 free_percpu(pmu->pmu_cpu_context); in free_pmu_context()
11129 struct pmu *pmu = dev_get_drvdata(dev); in nr_addr_filters_show() local
11131 return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters); in nr_addr_filters_show()
11140 struct pmu *pmu = dev_get_drvdata(dev); in type_show() local
11142 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); in type_show()
11151 struct pmu *pmu = dev_get_drvdata(dev); in perf_event_mux_interval_ms_show() local
11153 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms); in perf_event_mux_interval_ms_show()
11163 struct pmu *pmu = dev_get_drvdata(dev); in perf_event_mux_interval_ms_store() local
11174 if (timer == pmu->hrtimer_interval_ms) in perf_event_mux_interval_ms_store()
11178 pmu->hrtimer_interval_ms = timer; in perf_event_mux_interval_ms_store()
11184 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_mux_interval_ms_store()
11206 struct pmu *pmu = dev_get_drvdata(dev); in pmu_dev_is_visible() local
11208 if (n == 2 && !pmu->nr_addr_filters) in pmu_dev_is_visible()
11235 static int pmu_dev_alloc(struct pmu *pmu) in pmu_dev_alloc() argument
11239 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL); in pmu_dev_alloc()
11240 if (!pmu->dev) in pmu_dev_alloc()
11243 pmu->dev->groups = pmu->attr_groups; in pmu_dev_alloc()
11244 device_initialize(pmu->dev); in pmu_dev_alloc()
11246 dev_set_drvdata(pmu->dev, pmu); in pmu_dev_alloc()
11247 pmu->dev->bus = &pmu_bus; in pmu_dev_alloc()
11248 pmu->dev->release = pmu_dev_release; in pmu_dev_alloc()
11250 ret = dev_set_name(pmu->dev, "%s", pmu->name); in pmu_dev_alloc()
11254 ret = device_add(pmu->dev); in pmu_dev_alloc()
11258 if (pmu->attr_update) { in pmu_dev_alloc()
11259 ret = sysfs_update_groups(&pmu->dev->kobj, pmu->attr_update); in pmu_dev_alloc()
11268 device_del(pmu->dev); in pmu_dev_alloc()
11271 put_device(pmu->dev); in pmu_dev_alloc()
11278 int perf_pmu_register(struct pmu *pmu, const char *name, int type) in perf_pmu_register() argument
11284 pmu->pmu_disable_count = alloc_percpu(int); in perf_pmu_register()
11285 if (!pmu->pmu_disable_count) in perf_pmu_register()
11288 pmu->type = -1; in perf_pmu_register()
11291 pmu->name = name; in perf_pmu_register()
11297 ret = idr_alloc(&pmu_idr, pmu, max, 0, GFP_KERNEL); in perf_pmu_register()
11305 pmu->type = type; in perf_pmu_register()
11308 ret = pmu_dev_alloc(pmu); in perf_pmu_register()
11314 if (pmu->task_ctx_nr == perf_hw_context) { in perf_pmu_register()
11323 !(pmu->capabilities & PERF_PMU_CAP_HETEROGENEOUS_CPUS))) in perf_pmu_register()
11324 pmu->task_ctx_nr = perf_invalid_context; in perf_pmu_register()
11329 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr); in perf_pmu_register()
11330 if (pmu->pmu_cpu_context) in perf_pmu_register()
11334 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); in perf_pmu_register()
11335 if (!pmu->pmu_cpu_context) in perf_pmu_register()
11341 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_pmu_register()
11345 cpuctx->ctx.pmu = pmu; in perf_pmu_register()
11355 if (!pmu->start_txn) { in perf_pmu_register()
11356 if (pmu->pmu_enable) { in perf_pmu_register()
11362 pmu->start_txn = perf_pmu_start_txn; in perf_pmu_register()
11363 pmu->commit_txn = perf_pmu_commit_txn; in perf_pmu_register()
11364 pmu->cancel_txn = perf_pmu_cancel_txn; in perf_pmu_register()
11366 pmu->start_txn = perf_pmu_nop_txn; in perf_pmu_register()
11367 pmu->commit_txn = perf_pmu_nop_int; in perf_pmu_register()
11368 pmu->cancel_txn = perf_pmu_nop_void; in perf_pmu_register()
11372 if (!pmu->pmu_enable) { in perf_pmu_register()
11373 pmu->pmu_enable = perf_pmu_nop_void; in perf_pmu_register()
11374 pmu->pmu_disable = perf_pmu_nop_void; in perf_pmu_register()
11377 if (!pmu->check_period) in perf_pmu_register()
11378 pmu->check_period = perf_event_nop_int; in perf_pmu_register()
11380 if (!pmu->event_idx) in perf_pmu_register()
11381 pmu->event_idx = perf_event_idx_default; in perf_pmu_register()
11389 list_add_rcu(&pmu->entry, &pmus); in perf_pmu_register()
11391 list_add_tail_rcu(&pmu->entry, &pmus); in perf_pmu_register()
11393 atomic_set(&pmu->exclusive_cnt, 0); in perf_pmu_register()
11401 device_del(pmu->dev); in perf_pmu_register()
11402 put_device(pmu->dev); in perf_pmu_register()
11405 if (pmu->type != PERF_TYPE_SOFTWARE) in perf_pmu_register()
11406 idr_remove(&pmu_idr, pmu->type); in perf_pmu_register()
11409 free_percpu(pmu->pmu_disable_count); in perf_pmu_register()
11414 void perf_pmu_unregister(struct pmu *pmu) in perf_pmu_unregister() argument
11417 list_del_rcu(&pmu->entry); in perf_pmu_unregister()
11426 free_percpu(pmu->pmu_disable_count); in perf_pmu_unregister()
11427 if (pmu->type != PERF_TYPE_SOFTWARE) in perf_pmu_unregister()
11428 idr_remove(&pmu_idr, pmu->type); in perf_pmu_unregister()
11430 if (pmu->nr_addr_filters) in perf_pmu_unregister()
11431 device_remove_file(pmu->dev, &dev_attr_nr_addr_filters); in perf_pmu_unregister()
11432 device_del(pmu->dev); in perf_pmu_unregister()
11433 put_device(pmu->dev); in perf_pmu_unregister()
11435 free_pmu_context(pmu); in perf_pmu_unregister()
11446 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
11451 if (!try_module_get(pmu->module)) in perf_try_init_event()
11460 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
11470 event->pmu = pmu; in perf_try_init_event()
11471 ret = pmu->event_init(event); in perf_try_init_event()
11477 if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) && in perf_try_init_event()
11481 if (pmu->capabilities & PERF_PMU_CAP_NO_EXCLUDE && in perf_try_init_event()
11490 module_put(pmu->module); in perf_try_init_event()
11495 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event()
11499 struct pmu *pmu; in perf_init_event() local
11504 if (event->parent && event->parent->pmu) { in perf_init_event()
11505 pmu = event->parent->pmu; in perf_init_event()
11506 ret = perf_try_init_event(pmu, event); in perf_init_event()
11528 pmu = idr_find(&pmu_idr, type); in perf_init_event()
11530 if (pmu) { in perf_init_event()
11532 !(pmu->capabilities & PERF_PMU_CAP_EXTENDED_HW_TYPE)) in perf_init_event()
11535 ret = perf_try_init_event(pmu, event); in perf_init_event()
11542 pmu = ERR_PTR(ret); in perf_init_event()
11547 list_for_each_entry_rcu(pmu, &pmus, entry, lockdep_is_held(&pmus_srcu)) { in perf_init_event()
11548 ret = perf_try_init_event(pmu, event); in perf_init_event()
11553 pmu = ERR_PTR(ret); in perf_init_event()
11558 pmu = ERR_PTR(-ENOENT); in perf_init_event()
11562 return pmu; in perf_init_event()
11699 struct pmu *pmu; in perf_event_alloc() local
11751 event->pmu = NULL; in perf_event_alloc()
11806 pmu = NULL; in perf_event_alloc()
11826 pmu = perf_init_event(event); in perf_event_alloc()
11827 if (IS_ERR(pmu)) { in perf_event_alloc()
11828 err = PTR_ERR(pmu); in perf_event_alloc()
11836 if (pmu->task_ctx_nr == perf_invalid_context && cgroup_fd != -1) { in perf_event_alloc()
11842 !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) { in perf_event_alloc()
11858 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
11876 pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range)); in perf_event_alloc()
11917 module_put(pmu->module); in perf_event_alloc()
12107 event->pmu != output_event->pmu) in perf_event_set_output()
12179 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
12267 struct pmu *pmu; in SYSCALL_DEFINE5() local
12372 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
12382 pmu = event->pmu; in SYSCALL_DEFINE5()
12390 if (pmu->task_ctx_nr == perf_sw_context) in SYSCALL_DEFINE5()
12404 pmu = group_leader->ctx->pmu; in SYSCALL_DEFINE5()
12420 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
12756 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
12806 void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) in perf_pmu_migrate_context() argument
12813 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; in perf_pmu_migrate_context()
12814 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; in perf_pmu_migrate_context()
13197 struct pmu *pmu = child_event->pmu; in inherit_event() local
13199 child_ctx->task_ctx_data = alloc_task_ctx_data(pmu); in inherit_event()
13350 child_ctx = alloc_perf_context(parent_ctx->pmu, child); in inherit_task_group()
13544 struct pmu *pmu; in perf_event_exit_cpu_context() local
13547 list_for_each_entry(pmu, &pmus, entry) { in perf_event_exit_cpu_context()
13548 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_exit_cpu_context()
13569 struct pmu *pmu; in perf_event_init_cpu() local
13575 list_for_each_entry(pmu, &pmus, entry) { in perf_event_init_cpu()
13576 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_init_cpu()
13657 struct pmu *pmu; in perf_event_sysfs_init() local
13666 list_for_each_entry(pmu, &pmus, entry) { in perf_event_sysfs_init()
13667 if (!pmu->name || pmu->type < 0) in perf_event_sysfs_init()
13670 ret = pmu_dev_alloc(pmu); in perf_event_sysfs_init()
13671 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret); in perf_event_sysfs_init()