• Home
  • Raw
  • Download

Lines Matching refs:cpuctx

164 static void perf_ctx_lock(struct perf_cpu_context *cpuctx,  in perf_ctx_lock()  argument
167 raw_spin_lock(&cpuctx->ctx.lock); in perf_ctx_lock()
172 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, in perf_ctx_unlock() argument
177 raw_spin_unlock(&cpuctx->ctx.lock); in perf_ctx_unlock()
220 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in event_function() local
221 struct perf_event_context *task_ctx = cpuctx->task_ctx; in event_function()
226 perf_ctx_lock(cpuctx, task_ctx); in event_function()
251 WARN_ON_ONCE(&cpuctx->ctx != ctx); in event_function()
254 efs->func(event, cpuctx, ctx, efs->data); in event_function()
256 perf_ctx_unlock(cpuctx, task_ctx); in event_function()
317 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in event_function_local() local
330 perf_ctx_lock(cpuctx, task_ctx); in event_function_local()
346 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) in event_function_local()
350 WARN_ON_ONCE(&cpuctx->ctx != ctx); in event_function_local()
353 func(event, cpuctx, ctx, data); in event_function_local()
355 perf_ctx_unlock(cpuctx, task_ctx); in event_function_local()
451 static bool perf_rotate_context(struct perf_cpu_context *cpuctx);
574 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
577 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
701 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_cgroup_match() local
708 if (!cpuctx->cgrp) in perf_cgroup_match()
717 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, in perf_cgroup_match()
762 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final) in update_cgrp_time_from_cpuctx() argument
764 struct perf_cgroup *cgrp = cpuctx->cgrp; in update_cgrp_time_from_cpuctx()
843 struct perf_cpu_context *cpuctx, *tmp; in perf_cgroup_switch() local
854 list_for_each_entry_safe(cpuctx, tmp, list, cgrp_cpuctx_entry) { in perf_cgroup_switch()
855 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); in perf_cgroup_switch()
857 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
858 perf_pmu_disable(cpuctx->ctx.pmu); in perf_cgroup_switch()
861 cpu_ctx_sched_out(cpuctx, EVENT_ALL); in perf_cgroup_switch()
866 cpuctx->cgrp = NULL; in perf_cgroup_switch()
870 WARN_ON_ONCE(cpuctx->cgrp); in perf_cgroup_switch()
878 cpuctx->cgrp = perf_cgroup_from_task(task, in perf_cgroup_switch()
879 &cpuctx->ctx); in perf_cgroup_switch()
880 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); in perf_cgroup_switch()
882 perf_pmu_enable(cpuctx->ctx.pmu); in perf_cgroup_switch()
883 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
944 struct perf_cpu_context *cpuctx; in perf_cgroup_ensure_storage() local
956 cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); in perf_cgroup_ensure_storage()
957 if (heap_size <= cpuctx->heap_size) in perf_cgroup_ensure_storage()
967 raw_spin_lock_irq(&cpuctx->ctx.lock); in perf_cgroup_ensure_storage()
968 if (cpuctx->heap_size < heap_size) { in perf_cgroup_ensure_storage()
969 swap(cpuctx->heap, storage); in perf_cgroup_ensure_storage()
970 if (storage == cpuctx->heap_default) in perf_cgroup_ensure_storage()
972 cpuctx->heap_size = heap_size; in perf_cgroup_ensure_storage()
974 raw_spin_unlock_irq(&cpuctx->ctx.lock); in perf_cgroup_ensure_storage()
1025 struct perf_cpu_context *cpuctx; in perf_cgroup_event_enable() local
1034 cpuctx = container_of(ctx, struct perf_cpu_context, ctx); in perf_cgroup_event_enable()
1042 if (ctx->is_active && !cpuctx->cgrp) { in perf_cgroup_event_enable()
1046 cpuctx->cgrp = cgrp; in perf_cgroup_event_enable()
1052 list_add(&cpuctx->cgrp_cpuctx_entry, in perf_cgroup_event_enable()
1059 struct perf_cpu_context *cpuctx; in perf_cgroup_event_disable() local
1068 cpuctx = container_of(ctx, struct perf_cpu_context, ctx); in perf_cgroup_event_disable()
1073 if (ctx->is_active && cpuctx->cgrp) in perf_cgroup_event_disable()
1074 cpuctx->cgrp = NULL; in perf_cgroup_event_disable()
1076 list_del(&cpuctx->cgrp_cpuctx_entry); in perf_cgroup_event_disable()
1099 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, in update_cgrp_time_from_cpuctx() argument
1163 struct perf_cpu_context *cpuctx; in perf_mux_hrtimer_handler() local
1168 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer); in perf_mux_hrtimer_handler()
1169 rotations = perf_rotate_context(cpuctx); in perf_mux_hrtimer_handler()
1171 raw_spin_lock(&cpuctx->hrtimer_lock); in perf_mux_hrtimer_handler()
1173 hrtimer_forward_now(hr, cpuctx->hrtimer_interval); in perf_mux_hrtimer_handler()
1175 cpuctx->hrtimer_active = 0; in perf_mux_hrtimer_handler()
1176 raw_spin_unlock(&cpuctx->hrtimer_lock); in perf_mux_hrtimer_handler()
1181 static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) in __perf_mux_hrtimer_init() argument
1183 struct hrtimer *timer = &cpuctx->hrtimer; in __perf_mux_hrtimer_init()
1184 struct pmu *pmu = cpuctx->ctx.pmu; in __perf_mux_hrtimer_init()
1199 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval); in __perf_mux_hrtimer_init()
1201 raw_spin_lock_init(&cpuctx->hrtimer_lock); in __perf_mux_hrtimer_init()
1206 static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) in perf_mux_hrtimer_restart() argument
1208 struct hrtimer *timer = &cpuctx->hrtimer; in perf_mux_hrtimer_restart()
1209 struct pmu *pmu = cpuctx->ctx.pmu; in perf_mux_hrtimer_restart()
1216 raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags); in perf_mux_hrtimer_restart()
1217 if (!cpuctx->hrtimer_active) { in perf_mux_hrtimer_restart()
1218 cpuctx->hrtimer_active = 1; in perf_mux_hrtimer_restart()
1219 hrtimer_forward_now(timer, cpuctx->hrtimer_interval); in perf_mux_hrtimer_restart()
1222 raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags); in perf_mux_hrtimer_restart()
2128 struct perf_cpu_context *cpuctx,
2134 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_put_aux_event() local
2163 event_sched_out(iter, cpuctx, ctx); in perf_put_aux_event()
2227 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_remove_sibling_event() local
2229 event_sched_out(event, cpuctx, ctx); in perf_remove_sibling_event()
2355 struct perf_cpu_context *cpuctx, in event_sched_out() argument
2402 cpuctx->active_oncpu--; in event_sched_out()
2407 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
2408 cpuctx->exclusive = 0; in event_sched_out()
2415 struct perf_cpu_context *cpuctx, in group_sched_out() argument
2425 event_sched_out(group_event, cpuctx, ctx); in group_sched_out()
2431 event_sched_out(event, cpuctx, ctx); in group_sched_out()
2448 struct perf_cpu_context *cpuctx, in __perf_remove_from_context() argument
2456 update_cgrp_time_from_cpuctx(cpuctx, false); in __perf_remove_from_context()
2465 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
2475 if (ctx == &cpuctx->ctx) in __perf_remove_from_context()
2476 update_cgrp_time_from_cpuctx(cpuctx, true); in __perf_remove_from_context()
2481 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in __perf_remove_from_context()
2482 cpuctx->task_ctx = NULL; in __perf_remove_from_context()
2528 struct perf_cpu_context *cpuctx, in __perf_event_disable() argument
2541 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2543 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2609 struct perf_cpu_context *cpuctx, in event_sched_in() argument
2652 cpuctx->active_oncpu++; in event_sched_in()
2659 cpuctx->exclusive = 1; in event_sched_in()
2669 struct perf_cpu_context *cpuctx, in group_sched_in() argument
2680 if (event_sched_in(group_event, cpuctx, ctx)) in group_sched_in()
2687 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
2706 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2708 event_sched_out(group_event, cpuctx, ctx); in group_sched_in()
2719 struct perf_cpu_context *cpuctx, in group_can_go_on() argument
2731 if (cpuctx->exclusive) in group_can_go_on()
2754 struct perf_cpu_context *cpuctx,
2758 struct perf_cpu_context *cpuctx,
2762 static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, in task_ctx_sched_out() argument
2766 if (!cpuctx->task_ctx) in task_ctx_sched_out()
2769 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) in task_ctx_sched_out()
2772 ctx_sched_out(ctx, cpuctx, event_type); in task_ctx_sched_out()
2775 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, in perf_event_sched_in() argument
2779 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2781 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2782 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2784 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2802 static void ctx_resched(struct perf_cpu_context *cpuctx, in ctx_resched() argument
2818 perf_pmu_disable(cpuctx->ctx.pmu); in ctx_resched()
2820 task_ctx_sched_out(cpuctx, task_ctx, event_type); in ctx_resched()
2830 cpu_ctx_sched_out(cpuctx, ctx_event_type); in ctx_resched()
2832 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in ctx_resched()
2834 perf_event_sched_in(cpuctx, task_ctx, current); in ctx_resched()
2835 perf_pmu_enable(cpuctx->ctx.pmu); in ctx_resched()
2840 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_pmu_resched() local
2841 struct perf_event_context *task_ctx = cpuctx->task_ctx; in perf_pmu_resched()
2843 perf_ctx_lock(cpuctx, task_ctx); in perf_pmu_resched()
2844 ctx_resched(cpuctx, task_ctx, EVENT_ALL|EVENT_CPU); in perf_pmu_resched()
2845 perf_ctx_unlock(cpuctx, task_ctx); in perf_pmu_resched()
2858 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_install_in_context() local
2859 struct perf_event_context *task_ctx = cpuctx->task_ctx; in __perf_install_in_context()
2863 raw_spin_lock(&cpuctx->ctx.lock); in __perf_install_in_context()
2882 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); in __perf_install_in_context()
2900 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_install_in_context()
2902 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_install_in_context()
2908 perf_ctx_unlock(cpuctx, task_ctx); in __perf_install_in_context()
3036 struct perf_cpu_context *cpuctx, in __perf_event_enable() argument
3048 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_event_enable()
3057 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in __perf_event_enable()
3066 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in __perf_event_enable()
3070 task_ctx = cpuctx->task_ctx; in __perf_event_enable()
3074 ctx_resched(cpuctx, task_ctx, get_event_type(event)); in __perf_event_enable()
3336 struct perf_cpu_context *cpuctx, in ctx_sched_out() argument
3350 WARN_ON_ONCE(cpuctx->task_ctx); in ctx_sched_out()
3367 update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx); in ctx_sched_out()
3380 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in ctx_sched_out()
3382 cpuctx->task_ctx = NULL; in ctx_sched_out()
3393 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3398 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3518 struct perf_cpu_context *cpuctx; in perf_event_context_sched_out() local
3526 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_out()
3527 if (!cpuctx->task_ctx) in perf_event_context_sched_out()
3573 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_out()
3614 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_out()
3616 task_ctx_sched_out(cpuctx, ctx, EVENT_ALL); in perf_event_context_sched_out()
3627 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_sched_cb_dec() local
3631 if (!--cpuctx->sched_cb_usage) in perf_sched_cb_dec()
3632 list_del(&cpuctx->sched_cb_entry); in perf_sched_cb_dec()
3638 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_sched_cb_inc() local
3640 if (!cpuctx->sched_cb_usage++) in perf_sched_cb_inc()
3641 list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list)); in perf_sched_cb_inc()
3654 static void __perf_pmu_sched_task(struct perf_cpu_context *cpuctx, bool sched_in) in __perf_pmu_sched_task() argument
3658 pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */ in __perf_pmu_sched_task()
3663 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in __perf_pmu_sched_task()
3666 pmu->sched_task(cpuctx->task_ctx, sched_in); in __perf_pmu_sched_task()
3669 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in __perf_pmu_sched_task()
3676 struct perf_cpu_context *cpuctx; in perf_pmu_sched_task() local
3681 list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) { in perf_pmu_sched_task()
3683 if (cpuctx->task_ctx) in perf_pmu_sched_task()
3686 __perf_pmu_sched_task(cpuctx, sched_in); in perf_pmu_sched_task()
3733 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, in cpu_ctx_sched_out() argument
3736 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); in cpu_ctx_sched_out()
3770 static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, in visit_groups_merge() argument
3784 if (cpuctx) { in visit_groups_merge()
3786 .data = cpuctx->heap, in visit_groups_merge()
3788 .size = cpuctx->heap_size, in visit_groups_merge()
3791 lockdep_assert_held(&cpuctx->ctx.lock); in visit_groups_merge()
3794 if (cpuctx->cgrp) in visit_groups_merge()
3795 css = &cpuctx->cgrp->css; in visit_groups_merge()
3864 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in merge_sched_in() local
3873 if (group_can_go_on(event, cpuctx, *can_add_hw)) { in merge_sched_in()
3874 if (!group_sched_in(event, cpuctx, ctx)) in merge_sched_in()
3885 perf_mux_hrtimer_restart(cpuctx); in merge_sched_in()
3895 struct perf_cpu_context *cpuctx) in ctx_pinned_sched_in() argument
3899 if (ctx != &cpuctx->ctx) in ctx_pinned_sched_in()
3900 cpuctx = NULL; in ctx_pinned_sched_in()
3902 visit_groups_merge(cpuctx, &ctx->pinned_groups, in ctx_pinned_sched_in()
3909 struct perf_cpu_context *cpuctx) in ctx_flexible_sched_in() argument
3913 if (ctx != &cpuctx->ctx) in ctx_flexible_sched_in()
3914 cpuctx = NULL; in ctx_flexible_sched_in()
3916 visit_groups_merge(cpuctx, &ctx->flexible_groups, in ctx_flexible_sched_in()
3923 struct perf_cpu_context *cpuctx, in ctx_sched_in() argument
3948 cpuctx->task_ctx = ctx; in ctx_sched_in()
3950 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in ctx_sched_in()
3960 ctx_pinned_sched_in(ctx, cpuctx); in ctx_sched_in()
3964 ctx_flexible_sched_in(ctx, cpuctx); in ctx_sched_in()
3967 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, in cpu_ctx_sched_in() argument
3971 struct perf_event_context *ctx = &cpuctx->ctx; in cpu_ctx_sched_in()
3973 ctx_sched_in(ctx, cpuctx, event_type, task); in cpu_ctx_sched_in()
3979 struct perf_cpu_context *cpuctx; in perf_event_context_sched_in() local
3982 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_in()
3988 pmu = ctx->pmu = cpuctx->ctx.pmu; in perf_event_context_sched_in()
3990 if (cpuctx->task_ctx == ctx) { in perf_event_context_sched_in()
3991 if (cpuctx->sched_cb_usage) in perf_event_context_sched_in()
3992 __perf_pmu_sched_task(cpuctx, true); in perf_event_context_sched_in()
3996 perf_ctx_lock(cpuctx, ctx); in perf_event_context_sched_in()
4014 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in perf_event_context_sched_in()
4015 perf_event_sched_in(cpuctx, ctx, task); in perf_event_context_sched_in()
4017 if (cpuctx->sched_cb_usage && pmu->sched_task) in perf_event_context_sched_in()
4018 pmu->sched_task(cpuctx->task_ctx, true); in perf_event_context_sched_in()
4023 perf_ctx_unlock(cpuctx, ctx); in perf_event_context_sched_in()
4286 static bool perf_rotate_context(struct perf_cpu_context *cpuctx) in perf_rotate_context() argument
4297 cpu_rotate = cpuctx->ctx.rotate_necessary; in perf_rotate_context()
4298 task_ctx = cpuctx->task_ctx; in perf_rotate_context()
4304 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
4305 perf_pmu_disable(cpuctx->ctx.pmu); in perf_rotate_context()
4310 cpu_event = ctx_event_to_rotate(&cpuctx->ctx); in perf_rotate_context()
4317 ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
4319 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
4324 rotate_ctx(&cpuctx->ctx, cpu_event); in perf_rotate_context()
4326 perf_event_sched_in(cpuctx, task_ctx, current); in perf_rotate_context()
4328 perf_pmu_enable(cpuctx->ctx.pmu); in perf_rotate_context()
4329 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
4373 struct perf_cpu_context *cpuctx; in perf_event_enable_on_exec() local
4383 cpuctx = __get_cpu_context(ctx); in perf_event_enable_on_exec()
4384 perf_ctx_lock(cpuctx, ctx); in perf_event_enable_on_exec()
4385 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in perf_event_enable_on_exec()
4396 ctx_resched(cpuctx, ctx, event_type); in perf_event_enable_on_exec()
4398 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in perf_event_enable_on_exec()
4400 perf_ctx_unlock(cpuctx, ctx); in perf_event_enable_on_exec()
4491 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_read() local
4501 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_read()
4772 struct perf_cpu_context *cpuctx; in find_get_context() local
4784 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in find_get_context()
4785 ctx = &cpuctx->ctx; in find_get_context()
5669 struct perf_cpu_context *cpuctx, in __perf_event_period() argument
7943 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in __perf_pmu_output_stop() local
7949 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false); in __perf_pmu_output_stop()
7950 if (cpuctx->task_ctx) in __perf_pmu_output_stop()
7951 perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop, in __perf_pmu_output_stop()
11183 struct perf_cpu_context *cpuctx; in perf_event_mux_interval_ms_store() local
11184 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_mux_interval_ms_store()
11185 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); in perf_event_mux_interval_ms_store()
11187 cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpuctx); in perf_event_mux_interval_ms_store()
11339 struct perf_cpu_context *cpuctx; in perf_pmu_register() local
11341 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_pmu_register()
11342 __perf_event_init_context(&cpuctx->ctx); in perf_pmu_register()
11343 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); in perf_pmu_register()
11344 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); in perf_pmu_register()
11345 cpuctx->ctx.pmu = pmu; in perf_pmu_register()
11346 cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask); in perf_pmu_register()
11348 __perf_mux_hrtimer_init(cpuctx, cpu); in perf_pmu_register()
11350 cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default); in perf_pmu_register()
11351 cpuctx->heap = cpuctx->heap_default; in perf_pmu_register()
12576 struct perf_cpu_context *cpuctx = in SYSCALL_DEFINE5() local
12579 if (!cpuctx->online) { in SYSCALL_DEFINE5()
12776 struct perf_cpu_context *cpuctx = in perf_event_create_kernel_counter() local
12778 if (!cpuctx->online) { in perf_event_create_kernel_counter()
13530 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_exit_context() local
13534 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_event_exit_context()
13536 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()
13542 struct perf_cpu_context *cpuctx; in perf_event_exit_cpu_context() local
13548 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_exit_cpu_context()
13549 ctx = &cpuctx->ctx; in perf_event_exit_cpu_context()
13553 cpuctx->online = 0; in perf_event_exit_cpu_context()
13567 struct perf_cpu_context *cpuctx; in perf_event_init_cpu() local
13576 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_init_cpu()
13577 ctx = &cpuctx->ctx; in perf_event_init_cpu()
13580 cpuctx->online = 1; in perf_event_init_cpu()