Lines Matching refs:cpuctx
194 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
197 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
222 static void perf_ctx_lock(struct perf_cpu_context *cpuctx, in perf_ctx_lock() argument
225 raw_spin_lock(&cpuctx->ctx.lock); in perf_ctx_lock()
230 static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, in perf_ctx_unlock() argument
235 raw_spin_unlock(&cpuctx->ctx.lock); in perf_ctx_unlock()
270 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_cgroup_match() local
277 if (!cpuctx->cgrp) in perf_cgroup_match()
286 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup, in perf_cgroup_match()
332 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) in update_cgrp_time_from_cpuctx() argument
334 struct perf_cgroup *cgrp_out = cpuctx->cgrp; in update_cgrp_time_from_cpuctx()
389 struct perf_cpu_context *cpuctx; in perf_cgroup_switch() local
407 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_cgroup_switch()
408 if (cpuctx->unique_pmu != pmu) in perf_cgroup_switch()
418 if (cpuctx->ctx.nr_cgroups > 0) { in perf_cgroup_switch()
419 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
420 perf_pmu_disable(cpuctx->ctx.pmu); in perf_cgroup_switch()
423 cpu_ctx_sched_out(cpuctx, EVENT_ALL); in perf_cgroup_switch()
428 cpuctx->cgrp = NULL; in perf_cgroup_switch()
432 WARN_ON_ONCE(cpuctx->cgrp); in perf_cgroup_switch()
438 cpuctx->cgrp = perf_cgroup_from_task(task); in perf_cgroup_switch()
439 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); in perf_cgroup_switch()
441 perf_pmu_enable(cpuctx->ctx.pmu); in perf_cgroup_switch()
442 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_cgroup_switch()
609 static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx) in update_cgrp_time_from_cpuctx() argument
686 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_pmu_rotate_start() local
691 if (list_empty(&cpuctx->rotation_list)) { in perf_pmu_rotate_start()
693 list_add(&cpuctx->rotation_list, head); in perf_pmu_rotate_start()
1143 struct perf_cpu_context *cpuctx; in list_del_event() local
1154 cpuctx = __get_cpu_context(ctx); in list_del_event()
1161 cpuctx->cgrp = NULL; in list_del_event()
1244 struct perf_cpu_context *cpuctx, in event_sched_out() argument
1275 cpuctx->active_oncpu--; in event_sched_out()
1279 if (event->attr.exclusive || !cpuctx->active_oncpu) in event_sched_out()
1280 cpuctx->exclusive = 0; in event_sched_out()
1285 struct perf_cpu_context *cpuctx, in group_sched_out() argument
1291 event_sched_out(group_event, cpuctx, ctx); in group_sched_out()
1297 event_sched_out(event, cpuctx, ctx); in group_sched_out()
1300 cpuctx->exclusive = 0; in group_sched_out()
1313 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_remove_from_context() local
1316 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
1318 if (!ctx->nr_events && cpuctx->task_ctx == ctx) { in __perf_remove_from_context()
1320 cpuctx->task_ctx = NULL; in __perf_remove_from_context()
1386 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_disable() local
1395 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_disable()
1409 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1411 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
1530 struct perf_cpu_context *cpuctx, in event_sched_in() argument
1567 cpuctx->active_oncpu++; in event_sched_in()
1573 cpuctx->exclusive = 1; in event_sched_in()
1580 struct perf_cpu_context *cpuctx, in group_sched_in() argument
1593 if (event_sched_in(group_event, cpuctx, ctx)) { in group_sched_in()
1602 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
1634 event_sched_out(event, cpuctx, ctx); in group_sched_in()
1637 event_sched_out(group_event, cpuctx, ctx); in group_sched_in()
1648 struct perf_cpu_context *cpuctx, in group_can_go_on() argument
1660 if (cpuctx->exclusive) in group_can_go_on()
1666 if (event->attr.exclusive && cpuctx->active_oncpu) in group_can_go_on()
1690 struct perf_cpu_context *cpuctx,
1694 static void perf_event_sched_in(struct perf_cpu_context *cpuctx, in perf_event_sched_in() argument
1698 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
1700 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
1701 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
1703 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
1715 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_install_in_context() local
1716 struct perf_event_context *task_ctx = cpuctx->task_ctx; in __perf_install_in_context()
1719 perf_ctx_lock(cpuctx, task_ctx); in __perf_install_in_context()
1720 perf_pmu_disable(cpuctx->ctx.pmu); in __perf_install_in_context()
1740 cpuctx->task_ctx = task_ctx; in __perf_install_in_context()
1744 cpu_ctx_sched_out(cpuctx, EVENT_ALL); in __perf_install_in_context()
1759 perf_event_sched_in(cpuctx, task_ctx, task); in __perf_install_in_context()
1761 perf_pmu_enable(cpuctx->ctx.pmu); in __perf_install_in_context()
1762 perf_ctx_unlock(cpuctx, task_ctx); in __perf_install_in_context()
1850 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_enable() local
1882 if (!group_can_go_on(event, cpuctx, 1)) { in __perf_event_enable()
1886 err = group_sched_in(event, cpuctx, ctx); in __perf_event_enable()
1888 err = event_sched_in(event, cpuctx, ctx); in __perf_event_enable()
1897 group_sched_out(leader, cpuctx, ctx); in __perf_event_enable()
2020 struct perf_cpu_context *cpuctx, in ctx_sched_out() argument
2031 update_cgrp_time_from_cpuctx(cpuctx); in ctx_sched_out()
2038 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2043 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
2148 struct perf_cpu_context *cpuctx; in perf_event_context_sched_out() local
2154 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_out()
2155 if (!cpuctx->task_ctx) in perf_event_context_sched_out()
2194 ctx_sched_out(ctx, cpuctx, EVENT_ALL); in perf_event_context_sched_out()
2195 cpuctx->task_ctx = NULL; in perf_event_context_sched_out()
2233 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in task_ctx_sched_out() local
2235 if (!cpuctx->task_ctx) in task_ctx_sched_out()
2238 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) in task_ctx_sched_out()
2241 ctx_sched_out(ctx, cpuctx, EVENT_ALL); in task_ctx_sched_out()
2242 cpuctx->task_ctx = NULL; in task_ctx_sched_out()
2248 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, in cpu_ctx_sched_out() argument
2251 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); in cpu_ctx_sched_out()
2256 struct perf_cpu_context *cpuctx) in ctx_pinned_sched_in() argument
2270 if (group_can_go_on(event, cpuctx, 1)) in ctx_pinned_sched_in()
2271 group_sched_in(event, cpuctx, ctx); in ctx_pinned_sched_in()
2286 struct perf_cpu_context *cpuctx) in ctx_flexible_sched_in() argument
2306 if (group_can_go_on(event, cpuctx, can_add_hw)) { in ctx_flexible_sched_in()
2307 if (group_sched_in(event, cpuctx, ctx)) in ctx_flexible_sched_in()
2315 struct perf_cpu_context *cpuctx, in ctx_sched_in() argument
2334 ctx_pinned_sched_in(ctx, cpuctx); in ctx_sched_in()
2338 ctx_flexible_sched_in(ctx, cpuctx); in ctx_sched_in()
2341 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, in cpu_ctx_sched_in() argument
2345 struct perf_event_context *ctx = &cpuctx->ctx; in cpu_ctx_sched_in()
2347 ctx_sched_in(ctx, cpuctx, event_type, task); in cpu_ctx_sched_in()
2353 struct perf_cpu_context *cpuctx; in perf_event_context_sched_in() local
2355 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_in()
2356 if (cpuctx->task_ctx == ctx) in perf_event_context_sched_in()
2359 perf_ctx_lock(cpuctx, ctx); in perf_event_context_sched_in()
2366 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in perf_event_context_sched_in()
2369 cpuctx->task_ctx = ctx; in perf_event_context_sched_in()
2371 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task); in perf_event_context_sched_in()
2374 perf_ctx_unlock(cpuctx, ctx); in perf_event_context_sched_in()
2402 struct perf_cpu_context *cpuctx; in perf_branch_stack_sched_in() local
2415 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_branch_stack_sched_in()
2421 if (cpuctx->ctx.nr_branch_stack > 0 in perf_branch_stack_sched_in()
2424 pmu = cpuctx->ctx.pmu; in perf_branch_stack_sched_in()
2426 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_branch_stack_sched_in()
2434 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_branch_stack_sched_in()
2671 static void perf_rotate_context(struct perf_cpu_context *cpuctx) in perf_rotate_context() argument
2676 if (cpuctx->ctx.nr_events) { in perf_rotate_context()
2678 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) in perf_rotate_context()
2682 ctx = cpuctx->task_ctx; in perf_rotate_context()
2692 perf_ctx_lock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
2693 perf_pmu_disable(cpuctx->ctx.pmu); in perf_rotate_context()
2695 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
2697 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); in perf_rotate_context()
2699 rotate_ctx(&cpuctx->ctx); in perf_rotate_context()
2703 perf_event_sched_in(cpuctx, ctx, current); in perf_rotate_context()
2705 perf_pmu_enable(cpuctx->ctx.pmu); in perf_rotate_context()
2706 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); in perf_rotate_context()
2709 list_del_init(&cpuctx->rotation_list); in perf_rotate_context()
2725 struct perf_cpu_context *cpuctx, *tmp; in perf_event_task_tick() local
2734 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { in perf_event_task_tick()
2735 ctx = &cpuctx->ctx; in perf_event_task_tick()
2738 ctx = cpuctx->task_ctx; in perf_event_task_tick()
2742 if (cpuctx->jiffies_interval == 1 || in perf_event_task_tick()
2743 !(jiffies % cpuctx->jiffies_interval)) in perf_event_task_tick()
2744 perf_rotate_context(cpuctx); in perf_event_task_tick()
2819 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_read() local
2828 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_read()
2946 struct perf_cpu_context *cpuctx; in find_get_context() local
2963 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in find_get_context()
2964 ctx = &cpuctx->ctx; in find_get_context()
4648 struct perf_cpu_context *cpuctx; in perf_event_aux() local
4655 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); in perf_event_aux()
4656 if (cpuctx->unique_pmu != pmu) in perf_event_aux()
4658 perf_event_aux_ctx(&cpuctx->ctx, match, output, data); in perf_event_aux()
6092 struct perf_cpu_context *cpuctx; in update_pmu_context() local
6094 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in update_pmu_context()
6096 if (cpuctx->unique_pmu == old_pmu) in update_pmu_context()
6097 cpuctx->unique_pmu = pmu; in update_pmu_context()
6219 struct perf_cpu_context *cpuctx; in perf_pmu_register() local
6221 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_pmu_register()
6222 __perf_event_init_context(&cpuctx->ctx); in perf_pmu_register()
6223 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); in perf_pmu_register()
6224 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); in perf_pmu_register()
6225 cpuctx->ctx.pmu = pmu; in perf_pmu_register()
6226 cpuctx->jiffies_interval = 1; in perf_pmu_register()
6227 INIT_LIST_HEAD(&cpuctx->rotation_list); in perf_pmu_register()
6228 cpuctx->unique_pmu = pmu; in perf_pmu_register()
7633 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); in perf_pmu_rotate_stop() local
7637 list_del_init(&cpuctx->rotation_list); in perf_pmu_rotate_stop()