Lines Matching refs:ctx
159 __get_cpu_context(struct perf_event_context *ctx) in __get_cpu_context() argument
161 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); in __get_cpu_context()
165 struct perf_event_context *ctx) in perf_ctx_lock() argument
167 raw_spin_lock(&cpuctx->ctx.lock); in perf_ctx_lock()
168 if (ctx) in perf_ctx_lock()
169 raw_spin_lock(&ctx->lock); in perf_ctx_lock()
173 struct perf_event_context *ctx) in perf_ctx_unlock() argument
175 if (ctx) in perf_ctx_unlock()
176 raw_spin_unlock(&ctx->lock); in perf_ctx_unlock()
177 raw_spin_unlock(&cpuctx->ctx.lock); in perf_ctx_unlock()
219 struct perf_event_context *ctx = event->ctx; in event_function() local
220 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in event_function()
231 if (ctx->task) { in event_function()
232 if (ctx->task != current) { in event_function()
244 WARN_ON_ONCE(!ctx->is_active); in event_function()
249 WARN_ON_ONCE(task_ctx != ctx); in event_function()
251 WARN_ON_ONCE(&cpuctx->ctx != ctx); in event_function()
254 efs->func(event, cpuctx, ctx, efs->data); in event_function()
263 struct perf_event_context *ctx = event->ctx; in event_function_call() local
264 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ in event_function_call()
277 lockdep_assert_held(&ctx->mutex); in event_function_call()
292 raw_spin_lock_irq(&ctx->lock); in event_function_call()
297 task = ctx->task; in event_function_call()
299 raw_spin_unlock_irq(&ctx->lock); in event_function_call()
302 if (ctx->is_active) { in event_function_call()
303 raw_spin_unlock_irq(&ctx->lock); in event_function_call()
306 func(event, NULL, ctx, data); in event_function_call()
307 raw_spin_unlock_irq(&ctx->lock); in event_function_call()
316 struct perf_event_context *ctx = event->ctx; in event_function_local() local
317 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in event_function_local()
318 struct task_struct *task = READ_ONCE(ctx->task); in event_function_local()
327 task_ctx = ctx; in event_function_local()
332 task = ctx->task; in event_function_local()
342 if (ctx->is_active) { in event_function_local()
346 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) in event_function_local()
350 WARN_ON_ONCE(&cpuctx->ctx != ctx); in event_function_local()
353 func(event, cpuctx, ctx, data); in event_function_local()
581 static void update_context_time(struct perf_event_context *ctx);
700 struct perf_event_context *ctx = event->ctx; in perf_cgroup_match() local
701 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_cgroup_match()
794 cgrp = perf_cgroup_from_task(current, event->ctx); in update_cgrp_time_from_event()
806 struct perf_event_context *ctx) in perf_cgroup_set_timestamp() argument
817 if (!task || !ctx->nr_cgroups) in perf_cgroup_set_timestamp()
820 cgrp = perf_cgroup_from_task(task, ctx); in perf_cgroup_set_timestamp()
825 __update_cgrp_time(info, ctx->timestamp, false); in perf_cgroup_set_timestamp()
855 WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0); in perf_cgroup_switch()
858 perf_pmu_disable(cpuctx->ctx.pmu); in perf_cgroup_switch()
879 &cpuctx->ctx); in perf_cgroup_switch()
882 perf_pmu_enable(cpuctx->ctx.pmu); in perf_cgroup_switch()
967 raw_spin_lock_irq(&cpuctx->ctx.lock); in perf_cgroup_ensure_storage()
974 raw_spin_unlock_irq(&cpuctx->ctx.lock); in perf_cgroup_ensure_storage()
1023 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1034 cpuctx = container_of(ctx, struct perf_cpu_context, ctx); in perf_cgroup_event_enable()
1042 if (ctx->is_active && !cpuctx->cgrp) { in perf_cgroup_event_enable()
1043 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); in perf_cgroup_event_enable()
1049 if (ctx->nr_cgroups++) in perf_cgroup_event_enable()
1057 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1068 cpuctx = container_of(ctx, struct perf_cpu_context, ctx); in perf_cgroup_event_disable()
1070 if (--ctx->nr_cgroups) in perf_cgroup_event_disable()
1073 if (ctx->is_active && cpuctx->cgrp) in perf_cgroup_event_disable()
1123 struct perf_event_context *ctx) in perf_cgroup_set_timestamp() argument
1143 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1148 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1184 struct pmu *pmu = cpuctx->ctx.pmu; in __perf_mux_hrtimer_init()
1209 struct pmu *pmu = cpuctx->ctx.pmu; in perf_mux_hrtimer_restart()
1254 static void perf_event_ctx_activate(struct perf_event_context *ctx) in perf_event_ctx_activate() argument
1260 WARN_ON(!list_empty(&ctx->active_ctx_list)); in perf_event_ctx_activate()
1262 list_add(&ctx->active_ctx_list, head); in perf_event_ctx_activate()
1265 static void perf_event_ctx_deactivate(struct perf_event_context *ctx) in perf_event_ctx_deactivate() argument
1269 WARN_ON(list_empty(&ctx->active_ctx_list)); in perf_event_ctx_deactivate()
1271 list_del_init(&ctx->active_ctx_list); in perf_event_ctx_deactivate()
1274 static void get_ctx(struct perf_event_context *ctx) in get_ctx() argument
1276 refcount_inc(&ctx->refcount); in get_ctx()
1295 struct perf_event_context *ctx; in free_ctx() local
1297 ctx = container_of(head, struct perf_event_context, rcu_head); in free_ctx()
1298 free_task_ctx_data(ctx->pmu, ctx->task_ctx_data); in free_ctx()
1299 kfree(ctx); in free_ctx()
1302 static void put_ctx(struct perf_event_context *ctx) in put_ctx() argument
1304 if (refcount_dec_and_test(&ctx->refcount)) { in put_ctx()
1305 if (ctx->parent_ctx) in put_ctx()
1306 put_ctx(ctx->parent_ctx); in put_ctx()
1307 if (ctx->task && ctx->task != TASK_TOMBSTONE) in put_ctx()
1308 put_task_struct(ctx->task); in put_ctx()
1309 call_rcu(&ctx->rcu_head, free_ctx); in put_ctx()
1382 struct perf_event_context *ctx; in perf_event_ctx_lock_nested() local
1386 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1387 if (!refcount_inc_not_zero(&ctx->refcount)) { in perf_event_ctx_lock_nested()
1393 mutex_lock_nested(&ctx->mutex, nesting); in perf_event_ctx_lock_nested()
1394 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1395 mutex_unlock(&ctx->mutex); in perf_event_ctx_lock_nested()
1396 put_ctx(ctx); in perf_event_ctx_lock_nested()
1400 return ctx; in perf_event_ctx_lock_nested()
1410 struct perf_event_context *ctx) in perf_event_ctx_unlock() argument
1412 mutex_unlock(&ctx->mutex); in perf_event_ctx_unlock()
1413 put_ctx(ctx); in perf_event_ctx_unlock()
1422 unclone_ctx(struct perf_event_context *ctx) in unclone_ctx() argument
1424 struct perf_event_context *parent_ctx = ctx->parent_ctx; in unclone_ctx()
1426 lockdep_assert_held(&ctx->lock); in unclone_ctx()
1429 ctx->parent_ctx = NULL; in unclone_ctx()
1430 ctx->generation++; in unclone_ctx()
1485 struct perf_event_context *ctx; in perf_lock_task_context() local
1499 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); in perf_lock_task_context()
1500 if (ctx) { in perf_lock_task_context()
1511 raw_spin_lock(&ctx->lock); in perf_lock_task_context()
1512 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { in perf_lock_task_context()
1513 raw_spin_unlock(&ctx->lock); in perf_lock_task_context()
1519 if (ctx->task == TASK_TOMBSTONE || in perf_lock_task_context()
1520 !refcount_inc_not_zero(&ctx->refcount)) { in perf_lock_task_context()
1521 raw_spin_unlock(&ctx->lock); in perf_lock_task_context()
1522 ctx = NULL; in perf_lock_task_context()
1524 WARN_ON_ONCE(ctx->task != task); in perf_lock_task_context()
1528 if (!ctx) in perf_lock_task_context()
1530 return ctx; in perf_lock_task_context()
1541 struct perf_event_context *ctx; in perf_pin_task_context() local
1544 ctx = perf_lock_task_context(task, ctxn, &flags); in perf_pin_task_context()
1545 if (ctx) { in perf_pin_task_context()
1546 ++ctx->pin_count; in perf_pin_task_context()
1547 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_pin_task_context()
1549 return ctx; in perf_pin_task_context()
1552 static void perf_unpin_context(struct perf_event_context *ctx) in perf_unpin_context() argument
1556 raw_spin_lock_irqsave(&ctx->lock, flags); in perf_unpin_context()
1557 --ctx->pin_count; in perf_unpin_context()
1558 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_unpin_context()
1564 static void __update_context_time(struct perf_event_context *ctx, bool adv) in __update_context_time() argument
1569 ctx->time += now - ctx->timestamp; in __update_context_time()
1570 ctx->timestamp = now; in __update_context_time()
1581 WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp); in __update_context_time()
1584 static void update_context_time(struct perf_event_context *ctx) in update_context_time() argument
1586 __update_context_time(ctx, true); in update_context_time()
1591 struct perf_event_context *ctx = event->ctx; in perf_event_time() local
1593 if (unlikely(!ctx)) in perf_event_time()
1599 return ctx->time; in perf_event_time()
1604 struct perf_event_context *ctx = event->ctx; in perf_event_time_now() local
1606 if (unlikely(!ctx)) in perf_event_time_now()
1612 if (!(__load_acquire(&ctx->is_active) & EVENT_TIME)) in perf_event_time_now()
1613 return ctx->time; in perf_event_time_now()
1615 now += READ_ONCE(ctx->timeoffset); in perf_event_time_now()
1621 struct perf_event_context *ctx = event->ctx; in get_event_type() local
1624 lockdep_assert_held(&ctx->lock); in get_event_type()
1634 if (!ctx->task) in get_event_type()
1654 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1657 return &ctx->pinned_groups; in get_event_groups()
1659 return &ctx->flexible_groups; in get_event_groups()
1776 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1780 groups = get_event_groups(event, ctx); in add_event_to_groups()
1802 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1806 groups = get_event_groups(event, ctx); in del_event_from_groups()
1863 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1865 lockdep_assert_held(&ctx->lock); in list_add_event()
1879 add_event_to_groups(event, ctx); in list_add_event()
1882 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1883 ctx->nr_events++; in list_add_event()
1885 ctx->nr_stat++; in list_add_event()
1888 perf_cgroup_event_enable(event, ctx); in list_add_event()
1890 ctx->generation++; in list_add_event()
2045 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
2058 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
2077 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
2079 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
2080 lockdep_assert_held(&ctx->lock); in list_del_event()
2090 ctx->nr_events--; in list_del_event()
2092 ctx->nr_stat--; in list_del_event()
2097 del_event_from_groups(event, ctx); in list_del_event()
2107 perf_cgroup_event_disable(event, ctx); in list_del_event()
2111 ctx->generation++; in list_del_event()
2129 struct perf_event_context *ctx);
2133 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event() local
2134 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_put_aux_event()
2163 event_sched_out(iter, cpuctx, ctx); in perf_put_aux_event()
2214 struct perf_event_context *ctx = event->ctx; in get_event_list() local
2215 return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; in get_event_list()
2226 struct perf_event_context *ctx = event->ctx; in perf_remove_sibling_event() local
2227 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in perf_remove_sibling_event()
2229 event_sched_out(event, cpuctx, ctx); in perf_remove_sibling_event()
2237 struct perf_event_context *ctx = event->ctx; in perf_group_detach() local
2239 lockdep_assert_held(&ctx->lock); in perf_group_detach()
2278 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2284 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2356 struct perf_event_context *ctx) in event_sched_out() argument
2360 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2361 lockdep_assert_held(&ctx->lock); in event_sched_out()
2380 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2396 local_dec(&event->ctx->nr_pending); in event_sched_out()
2403 if (!--ctx->nr_active) in event_sched_out()
2404 perf_event_ctx_deactivate(ctx); in event_sched_out()
2406 ctx->nr_freq--; in event_sched_out()
2416 struct perf_event_context *ctx) in group_sched_out() argument
2423 perf_pmu_disable(ctx->pmu); in group_sched_out()
2425 event_sched_out(group_event, cpuctx, ctx); in group_sched_out()
2431 event_sched_out(event, cpuctx, ctx); in group_sched_out()
2433 perf_pmu_enable(ctx->pmu); in group_sched_out()
2449 struct perf_event_context *ctx, in __perf_remove_from_context() argument
2454 if (ctx->is_active & EVENT_TIME) { in __perf_remove_from_context()
2455 update_context_time(ctx); in __perf_remove_from_context()
2465 event_sched_out(event, cpuctx, ctx); in __perf_remove_from_context()
2470 list_del_event(event, ctx); in __perf_remove_from_context()
2474 if (!ctx->nr_events && ctx->is_active) { in __perf_remove_from_context()
2475 if (ctx == &cpuctx->ctx) in __perf_remove_from_context()
2478 ctx->is_active = 0; in __perf_remove_from_context()
2479 ctx->rotate_necessary = 0; in __perf_remove_from_context()
2480 if (ctx->task) { in __perf_remove_from_context()
2481 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in __perf_remove_from_context()
2499 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context() local
2501 lockdep_assert_held(&ctx->mutex); in perf_remove_from_context()
2508 raw_spin_lock_irq(&ctx->lock); in perf_remove_from_context()
2513 if (!ctx->is_active && !is_cgroup_event(event)) { in perf_remove_from_context()
2514 __perf_remove_from_context(event, __get_cpu_context(ctx), in perf_remove_from_context()
2515 ctx, (void *)flags); in perf_remove_from_context()
2516 raw_spin_unlock_irq(&ctx->lock); in perf_remove_from_context()
2519 raw_spin_unlock_irq(&ctx->lock); in perf_remove_from_context()
2529 struct perf_event_context *ctx, in __perf_event_disable() argument
2535 if (ctx->is_active & EVENT_TIME) { in __perf_event_disable()
2536 update_context_time(ctx); in __perf_event_disable()
2541 group_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2543 event_sched_out(event, cpuctx, ctx); in __perf_event_disable()
2546 perf_cgroup_event_disable(event, ctx); in __perf_event_disable()
2565 struct perf_event_context *ctx = event->ctx; in _perf_event_disable() local
2567 raw_spin_lock_irq(&ctx->lock); in _perf_event_disable()
2569 raw_spin_unlock_irq(&ctx->lock); in _perf_event_disable()
2572 raw_spin_unlock_irq(&ctx->lock); in _perf_event_disable()
2588 struct perf_event_context *ctx; in perf_event_disable() local
2590 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2592 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2610 struct perf_event_context *ctx) in event_sched_in() argument
2614 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2616 lockdep_assert_held(&ctx->lock); in event_sched_in()
2653 if (!ctx->nr_active++) in event_sched_in()
2654 perf_event_ctx_activate(ctx); in event_sched_in()
2656 ctx->nr_freq++; in event_sched_in()
2670 struct perf_event_context *ctx) in group_sched_in() argument
2673 struct pmu *pmu = ctx->pmu; in group_sched_in()
2680 if (event_sched_in(group_event, cpuctx, ctx)) in group_sched_in()
2687 if (event_sched_in(event, cpuctx, ctx)) { in group_sched_in()
2706 event_sched_out(event, cpuctx, ctx); in group_sched_in()
2708 event_sched_out(group_event, cpuctx, ctx); in group_sched_in()
2747 struct perf_event_context *ctx) in add_event_to_ctx() argument
2749 list_add_event(event, ctx); in add_event_to_ctx()
2753 static void ctx_sched_out(struct perf_event_context *ctx,
2757 ctx_sched_in(struct perf_event_context *ctx,
2763 struct perf_event_context *ctx, in task_ctx_sched_out() argument
2769 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) in task_ctx_sched_out()
2772 ctx_sched_out(ctx, cpuctx, event_type); in task_ctx_sched_out()
2776 struct perf_event_context *ctx, in perf_event_sched_in() argument
2780 if (ctx) in perf_event_sched_in()
2781 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2783 if (ctx) in perf_event_sched_in()
2784 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2818 perf_pmu_disable(cpuctx->ctx.pmu); in ctx_resched()
2835 perf_pmu_enable(cpuctx->ctx.pmu); in ctx_resched()
2857 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context() local
2858 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_install_in_context()
2863 raw_spin_lock(&cpuctx->ctx.lock); in __perf_install_in_context()
2864 if (ctx->task) { in __perf_install_in_context()
2865 raw_spin_lock(&ctx->lock); in __perf_install_in_context()
2866 task_ctx = ctx; in __perf_install_in_context()
2868 reprogram = (ctx->task == current); in __perf_install_in_context()
2877 if (task_curr(ctx->task) && !reprogram) { in __perf_install_in_context()
2882 WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx); in __perf_install_in_context()
2893 struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); in __perf_install_in_context()
2900 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_install_in_context()
2901 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2904 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2914 struct perf_event_context *ctx);
2922 perf_install_in_context(struct perf_event_context *ctx, in perf_install_in_context() argument
2926 struct task_struct *task = READ_ONCE(ctx->task); in perf_install_in_context()
2928 lockdep_assert_held(&ctx->mutex); in perf_install_in_context()
2930 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2939 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2952 ctx->nr_events && !is_cgroup_event(event)) { in perf_install_in_context()
2953 raw_spin_lock_irq(&ctx->lock); in perf_install_in_context()
2954 if (ctx->task == TASK_TOMBSTONE) { in perf_install_in_context()
2955 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
2958 add_event_to_ctx(event, ctx); in perf_install_in_context()
2959 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
3009 raw_spin_lock_irq(&ctx->lock); in perf_install_in_context()
3010 task = ctx->task; in perf_install_in_context()
3017 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
3025 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
3028 add_event_to_ctx(event, ctx); in perf_install_in_context()
3029 raw_spin_unlock_irq(&ctx->lock); in perf_install_in_context()
3037 struct perf_event_context *ctx, in __perf_event_enable() argument
3047 if (ctx->is_active) in __perf_event_enable()
3048 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_event_enable()
3051 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
3053 if (!ctx->is_active) in __perf_event_enable()
3057 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in __perf_event_enable()
3066 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in __perf_event_enable()
3071 if (ctx->task) in __perf_event_enable()
3072 WARN_ON_ONCE(task_ctx != ctx); in __perf_event_enable()
3088 struct perf_event_context *ctx = event->ctx; in _perf_event_enable() local
3090 raw_spin_lock_irq(&ctx->lock); in _perf_event_enable()
3094 raw_spin_unlock_irq(&ctx->lock); in _perf_event_enable()
3115 raw_spin_unlock_irq(&ctx->lock); in _perf_event_enable()
3125 struct perf_event_context *ctx; in perf_event_enable() local
3127 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3129 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3258 struct perf_event_context *ctx; in perf_event_refresh() local
3261 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3263 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3312 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_modify_attr()
3335 static void ctx_sched_out(struct perf_event_context *ctx, in ctx_sched_out() argument
3340 int is_active = ctx->is_active; in ctx_sched_out()
3342 lockdep_assert_held(&ctx->lock); in ctx_sched_out()
3344 if (likely(!ctx->nr_events)) { in ctx_sched_out()
3348 WARN_ON_ONCE(ctx->is_active); in ctx_sched_out()
3349 if (ctx->task) in ctx_sched_out()
3366 update_context_time(ctx); in ctx_sched_out()
3367 update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx); in ctx_sched_out()
3375 ctx->is_active &= ~event_type; in ctx_sched_out()
3376 if (!(ctx->is_active & EVENT_ALL)) in ctx_sched_out()
3377 ctx->is_active = 0; in ctx_sched_out()
3379 if (ctx->task) { in ctx_sched_out()
3380 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in ctx_sched_out()
3381 if (!ctx->is_active) in ctx_sched_out()
3385 is_active ^= ctx->is_active; /* changed bits */ in ctx_sched_out()
3387 if (!ctx->nr_active || !(is_active & EVENT_ALL)) in ctx_sched_out()
3390 perf_pmu_disable(ctx->pmu); in ctx_sched_out()
3392 list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) in ctx_sched_out()
3393 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3397 list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) in ctx_sched_out()
3398 group_sched_out(event, cpuctx, ctx); in ctx_sched_out()
3405 ctx->rotate_necessary = 0; in ctx_sched_out()
3407 perf_pmu_enable(ctx->pmu); in ctx_sched_out()
3486 static void perf_event_sync_stat(struct perf_event_context *ctx, in perf_event_sync_stat() argument
3491 if (!ctx->nr_stat) in perf_event_sync_stat()
3494 update_context_time(ctx); in perf_event_sync_stat()
3496 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3502 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3515 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; in perf_event_context_sched_out() local
3522 if (likely(!ctx)) in perf_event_context_sched_out()
3525 pmu = ctx->pmu; in perf_event_context_sched_out()
3526 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_out()
3535 parent = rcu_dereference(ctx->parent_ctx); in perf_event_context_sched_out()
3542 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { in perf_event_context_sched_out()
3552 raw_spin_lock(&ctx->lock); in perf_event_context_sched_out()
3554 if (context_equiv(ctx, next_ctx)) { in perf_event_context_sched_out()
3559 if (local_read(&ctx->nr_pending) || in perf_event_context_sched_out()
3570 WRITE_ONCE(ctx->task, next); in perf_event_context_sched_out()
3574 pmu->sched_task(ctx, false); in perf_event_context_sched_out()
3583 pmu->swap_task_ctx(ctx, next_ctx); in perf_event_context_sched_out()
3585 swap(ctx->task_ctx_data, next_ctx->task_ctx_data); in perf_event_context_sched_out()
3597 RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx); in perf_event_context_sched_out()
3601 perf_event_sync_stat(ctx, next_ctx); in perf_event_context_sched_out()
3604 raw_spin_unlock(&ctx->lock); in perf_event_context_sched_out()
3610 raw_spin_lock(&ctx->lock); in perf_event_context_sched_out()
3615 pmu->sched_task(ctx, false); in perf_event_context_sched_out()
3616 task_ctx_sched_out(cpuctx, ctx, EVENT_ALL); in perf_event_context_sched_out()
3619 raw_spin_unlock(&ctx->lock); in perf_event_context_sched_out()
3658 pmu = cpuctx->ctx.pmu; /* software PMUs will not have sched_task */ in __perf_pmu_sched_task()
3736 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); in cpu_ctx_sched_out()
3791 lockdep_assert_held(&cpuctx->ctx.lock); in visit_groups_merge()
3863 struct perf_event_context *ctx = event->ctx; in merge_sched_in() local
3864 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in merge_sched_in()
3874 if (!group_sched_in(event, cpuctx, ctx)) in merge_sched_in()
3881 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
3884 ctx->rotate_necessary = 1; in merge_sched_in()
3894 ctx_pinned_sched_in(struct perf_event_context *ctx, in ctx_pinned_sched_in() argument
3899 if (ctx != &cpuctx->ctx) in ctx_pinned_sched_in()
3902 visit_groups_merge(cpuctx, &ctx->pinned_groups, in ctx_pinned_sched_in()
3908 ctx_flexible_sched_in(struct perf_event_context *ctx, in ctx_flexible_sched_in() argument
3913 if (ctx != &cpuctx->ctx) in ctx_flexible_sched_in()
3916 visit_groups_merge(cpuctx, &ctx->flexible_groups, in ctx_flexible_sched_in()
3922 ctx_sched_in(struct perf_event_context *ctx, in ctx_sched_in() argument
3927 int is_active = ctx->is_active; in ctx_sched_in()
3929 lockdep_assert_held(&ctx->lock); in ctx_sched_in()
3931 if (likely(!ctx->nr_events)) in ctx_sched_in()
3936 __update_context_time(ctx, false); in ctx_sched_in()
3937 perf_cgroup_set_timestamp(task, ctx); in ctx_sched_in()
3945 ctx->is_active |= (event_type | EVENT_TIME); in ctx_sched_in()
3946 if (ctx->task) { in ctx_sched_in()
3948 cpuctx->task_ctx = ctx; in ctx_sched_in()
3950 WARN_ON_ONCE(cpuctx->task_ctx != ctx); in ctx_sched_in()
3953 is_active ^= ctx->is_active; /* changed bits */ in ctx_sched_in()
3960 ctx_pinned_sched_in(ctx, cpuctx); in ctx_sched_in()
3964 ctx_flexible_sched_in(ctx, cpuctx); in ctx_sched_in()
3971 struct perf_event_context *ctx = &cpuctx->ctx; in cpu_ctx_sched_in() local
3973 ctx_sched_in(ctx, cpuctx, event_type, task); in cpu_ctx_sched_in()
3976 static void perf_event_context_sched_in(struct perf_event_context *ctx, in perf_event_context_sched_in() argument
3982 cpuctx = __get_cpu_context(ctx); in perf_event_context_sched_in()
3988 pmu = ctx->pmu = cpuctx->ctx.pmu; in perf_event_context_sched_in()
3990 if (cpuctx->task_ctx == ctx) { in perf_event_context_sched_in()
3996 perf_ctx_lock(cpuctx, ctx); in perf_event_context_sched_in()
4001 if (!ctx->nr_events) in perf_event_context_sched_in()
4013 if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) in perf_event_context_sched_in()
4015 perf_event_sched_in(cpuctx, ctx, task); in perf_event_context_sched_in()
4023 perf_ctx_unlock(cpuctx, ctx); in perf_event_context_sched_in()
4040 struct perf_event_context *ctx; in __perf_event_task_sched_in() local
4054 ctx = task->perf_event_ctxp[ctxn]; in __perf_event_task_sched_in()
4055 if (likely(!ctx)) in __perf_event_task_sched_in()
4058 perf_event_context_sched_in(ctx, task); in __perf_event_task_sched_in()
4178 static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, in perf_adjust_freq_unthr_context() argument
4191 if (!(ctx->nr_freq || needs_unthr)) in perf_adjust_freq_unthr_context()
4194 raw_spin_lock(&ctx->lock); in perf_adjust_freq_unthr_context()
4195 perf_pmu_disable(ctx->pmu); in perf_adjust_freq_unthr_context()
4197 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_adjust_freq_unthr_context()
4241 perf_pmu_enable(ctx->pmu); in perf_adjust_freq_unthr_context()
4242 raw_spin_unlock(&ctx->lock); in perf_adjust_freq_unthr_context()
4248 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4254 if (ctx->rotate_disable) in rotate_ctx()
4257 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4258 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4263 ctx_event_to_rotate(struct perf_event_context *ctx) in ctx_event_to_rotate() argument
4268 event = list_first_entry_or_null(&ctx->flexible_active, in ctx_event_to_rotate()
4273 event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree), in ctx_event_to_rotate()
4281 ctx->rotate_necessary = 0; in ctx_event_to_rotate()
4297 cpu_rotate = cpuctx->ctx.rotate_necessary; in perf_rotate_context()
4305 perf_pmu_disable(cpuctx->ctx.pmu); in perf_rotate_context()
4310 cpu_event = ctx_event_to_rotate(&cpuctx->ctx); in perf_rotate_context()
4324 rotate_ctx(&cpuctx->ctx, cpu_event); in perf_rotate_context()
4328 perf_pmu_enable(cpuctx->ctx.pmu); in perf_rotate_context()
4337 struct perf_event_context *ctx, *tmp; in perf_event_task_tick() local
4346 list_for_each_entry_safe(ctx, tmp, head, active_ctx_list) in perf_event_task_tick()
4347 perf_adjust_freq_unthr_context(ctx, throttled); in perf_event_task_tick()
4351 struct perf_event_context *ctx) in event_enable_on_exec() argument
4371 struct perf_event_context *ctx, *clone_ctx = NULL; in perf_event_enable_on_exec() local
4379 ctx = current->perf_event_ctxp[ctxn]; in perf_event_enable_on_exec()
4380 if (!ctx || !ctx->nr_events) in perf_event_enable_on_exec()
4383 cpuctx = __get_cpu_context(ctx); in perf_event_enable_on_exec()
4384 perf_ctx_lock(cpuctx, ctx); in perf_event_enable_on_exec()
4385 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in perf_event_enable_on_exec()
4386 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4387 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4395 clone_ctx = unclone_ctx(ctx); in perf_event_enable_on_exec()
4396 ctx_resched(cpuctx, ctx, event_type); in perf_event_enable_on_exec()
4398 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current); in perf_event_enable_on_exec()
4400 perf_ctx_unlock(cpuctx, ctx); in perf_event_enable_on_exec()
4411 struct perf_event_context *ctx);
4419 struct perf_event_context *ctx, *clone_ctx = NULL; in perf_event_remove_on_exec() local
4425 ctx = perf_pin_task_context(current, ctxn); in perf_event_remove_on_exec()
4426 if (!ctx) in perf_event_remove_on_exec()
4429 mutex_lock(&ctx->mutex); in perf_event_remove_on_exec()
4431 if (WARN_ON_ONCE(ctx->task != current)) in perf_event_remove_on_exec()
4434 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { in perf_event_remove_on_exec()
4443 perf_event_exit_event(event, ctx); in perf_event_remove_on_exec()
4446 raw_spin_lock_irqsave(&ctx->lock, flags); in perf_event_remove_on_exec()
4448 clone_ctx = unclone_ctx(ctx); in perf_event_remove_on_exec()
4449 --ctx->pin_count; in perf_event_remove_on_exec()
4450 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_event_remove_on_exec()
4453 mutex_unlock(&ctx->mutex); in perf_event_remove_on_exec()
4455 put_ctx(ctx); in perf_event_remove_on_exec()
4490 struct perf_event_context *ctx = event->ctx; in __perf_event_read() local
4491 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_read()
4501 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_read()
4504 raw_spin_lock(&ctx->lock); in __perf_event_read()
4505 if (ctx->is_active & EVENT_TIME) { in __perf_event_read()
4506 update_context_time(ctx); in __perf_event_read()
4540 raw_spin_unlock(&ctx->lock); in __perf_event_read()
4683 struct perf_event_context *ctx = event->ctx; in perf_event_read() local
4686 raw_spin_lock_irqsave(&ctx->lock, flags); in perf_event_read()
4689 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_event_read()
4697 if (ctx->is_active & EVENT_TIME) { in perf_event_read()
4698 update_context_time(ctx); in perf_event_read()
4705 raw_spin_unlock_irqrestore(&ctx->lock, flags); in perf_event_read()
4714 static void __perf_event_init_context(struct perf_event_context *ctx) in __perf_event_init_context() argument
4716 raw_spin_lock_init(&ctx->lock); in __perf_event_init_context()
4717 mutex_init(&ctx->mutex); in __perf_event_init_context()
4718 INIT_LIST_HEAD(&ctx->active_ctx_list); in __perf_event_init_context()
4719 perf_event_groups_init(&ctx->pinned_groups); in __perf_event_init_context()
4720 perf_event_groups_init(&ctx->flexible_groups); in __perf_event_init_context()
4721 INIT_LIST_HEAD(&ctx->event_list); in __perf_event_init_context()
4722 INIT_LIST_HEAD(&ctx->pinned_active); in __perf_event_init_context()
4723 INIT_LIST_HEAD(&ctx->flexible_active); in __perf_event_init_context()
4724 refcount_set(&ctx->refcount, 1); in __perf_event_init_context()
4730 struct perf_event_context *ctx; in alloc_perf_context() local
4732 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); in alloc_perf_context()
4733 if (!ctx) in alloc_perf_context()
4736 __perf_event_init_context(ctx); in alloc_perf_context()
4738 ctx->task = get_task_struct(task); in alloc_perf_context()
4739 ctx->pmu = pmu; in alloc_perf_context()
4741 return ctx; in alloc_perf_context()
4771 struct perf_event_context *ctx, *clone_ctx = NULL; in find_get_context() local
4785 ctx = &cpuctx->ctx; in find_get_context()
4786 get_ctx(ctx); in find_get_context()
4787 raw_spin_lock_irqsave(&ctx->lock, flags); in find_get_context()
4788 ++ctx->pin_count; in find_get_context()
4789 raw_spin_unlock_irqrestore(&ctx->lock, flags); in find_get_context()
4791 return ctx; in find_get_context()
4808 ctx = perf_lock_task_context(task, ctxn, &flags); in find_get_context()
4809 if (ctx) { in find_get_context()
4810 clone_ctx = unclone_ctx(ctx); in find_get_context()
4811 ++ctx->pin_count; in find_get_context()
4813 if (task_ctx_data && !ctx->task_ctx_data) { in find_get_context()
4814 ctx->task_ctx_data = task_ctx_data; in find_get_context()
4817 raw_spin_unlock_irqrestore(&ctx->lock, flags); in find_get_context()
4822 ctx = alloc_perf_context(pmu, task); in find_get_context()
4824 if (!ctx) in find_get_context()
4828 ctx->task_ctx_data = task_ctx_data; in find_get_context()
4843 get_ctx(ctx); in find_get_context()
4844 ++ctx->pin_count; in find_get_context()
4845 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); in find_get_context()
4850 put_ctx(ctx); in find_get_context()
4859 return ctx; in find_get_context()
5071 struct perf_event_context *ctx) in exclusive_event_installable() argument
5076 lockdep_assert_held(&ctx->mutex); in exclusive_event_installable()
5081 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { in exclusive_event_installable()
5138 if (event->ctx) in _free_event()
5139 put_ctx(event->ctx); in _free_event()
5229 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel() local
5237 if (!ctx) { in perf_event_release_kernel()
5246 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
5247 WARN_ON_ONCE(ctx->parent_ctx); in perf_event_release_kernel()
5262 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
5272 ctx = READ_ONCE(child->ctx); in perf_event_release_kernel()
5281 get_ctx(ctx); in perf_event_release_kernel()
5289 mutex_lock(&ctx->mutex); in perf_event_release_kernel()
5310 mutex_unlock(&ctx->mutex); in perf_event_release_kernel()
5311 put_ctx(ctx); in perf_event_release_kernel()
5317 void *var = &child->ctx->refcount; in perf_event_release_kernel()
5376 struct perf_event_context *ctx; in perf_event_read_value() local
5379 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5381 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
5390 struct perf_event_context *ctx = leader->ctx; in __perf_read_group_add() local
5400 raw_spin_lock_irqsave(&ctx->lock, flags); in __perf_read_group_add()
5458 raw_spin_unlock_irqrestore(&ctx->lock, flags); in __perf_read_group_add()
5466 struct perf_event_context *ctx = leader->ctx; in perf_read_group() local
5470 lockdep_assert_held(&ctx->mutex); in perf_read_group()
5558 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
5571 struct perf_event_context *ctx; in perf_read() local
5578 ctx = perf_event_ctx_lock(event); in perf_read()
5580 perf_event_ctx_unlock(event, ctx); in perf_read()
5618 struct perf_event_context *ctx; in perf_event_pause() local
5621 ctx = perf_event_ctx_lock(event); in perf_event_pause()
5627 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
5644 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
5656 struct perf_event_context *ctx = event->ctx; in perf_event_for_each() local
5659 lockdep_assert_held(&ctx->mutex); in perf_event_for_each()
5670 struct perf_event_context *ctx, in __perf_event_period() argument
5685 perf_pmu_disable(ctx->pmu); in __perf_event_period()
5701 perf_pmu_enable(ctx->pmu); in __perf_event_period()
5734 struct perf_event_context *ctx; in perf_event_period() local
5737 ctx = perf_event_ctx_lock(event); in perf_event_period()
5739 perf_event_ctx_unlock(event, ctx); in perf_event_period()
5885 struct perf_event_context *ctx; in perf_ioctl() local
5893 ctx = perf_event_ctx_lock(event); in perf_ioctl()
5895 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
5924 struct perf_event_context *ctx; in perf_event_task_enable() local
5929 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
5931 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
5940 struct perf_event_context *ctx; in perf_event_task_disable() local
5945 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
5947 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
6429 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
6605 if (WARN_ON_ONCE(event->ctx->task != current)) in perf_sigtrap()
6640 local_dec(&event->ctx->nr_pending); in __perf_pending_irq()
6713 local_dec(&event->ctx->nr_pending); in perf_pending_task()
7490 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7775 perf_iterate_ctx(struct perf_event_context *ctx, in perf_iterate_ctx() argument
7781 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
7804 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
7825 struct perf_event_context *ctx; in perf_iterate_sb() local
7844 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); in perf_iterate_sb()
7845 if (ctx) in perf_iterate_sb()
7846 perf_iterate_ctx(ctx, output, data, false); in perf_iterate_sb()
7888 struct perf_event_context *ctx; in perf_event_exec() local
7896 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); in perf_event_exec()
7897 if (ctx) { in perf_event_exec()
7898 perf_iterate_ctx(ctx, perf_event_addr_filters_exec, in perf_event_exec()
7942 struct pmu *pmu = event->ctx->pmu; in __perf_pmu_output_stop()
7949 perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false); in __perf_pmu_output_stop()
8731 struct perf_event_context *ctx; in perf_addr_filters_adjust() local
8743 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); in perf_addr_filters_adjust()
8744 if (!ctx) in perf_addr_filters_adjust()
8747 perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true); in perf_addr_filters_adjust()
8885 if (event->ctx->task) { in perf_event_switch_output()
8903 if (event->ctx->task) in perf_event_switch_output()
9388 local_inc(&event->ctx->nr_pending); in __perf_event_overflow()
9602 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
9948 struct perf_event_context *ctx; in perf_tp_event() local
9952 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); in perf_tp_event()
9953 if (!ctx) in perf_tp_event()
9956 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_tp_event()
10177 struct bpf_perf_event_data_kern ctx = { in bpf_overflow_handler() local
10184 ctx.regs = perf_arch_bpf_user_pt_regs(regs); in bpf_overflow_handler()
10190 ret = bpf_prog_run(prog, &ctx); in bpf_overflow_handler()
10442 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
10661 if (!event->ctx->task) in perf_event_parse_addr_filter()
10714 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
10755 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter() local
10768 mutex_unlock(&ctx->mutex); in perf_event_set_filter()
10770 mutex_lock(&ctx->mutex); in perf_event_set_filter()
10965 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
10972 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
10992 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
10993 u64 time = event->ctx->time + delta; in task_clock_event_read()
11342 __perf_event_init_context(&cpuctx->ctx); in perf_pmu_register()
11343 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); in perf_pmu_register()
11344 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock); in perf_pmu_register()
11345 cpuctx->ctx.pmu = pmu; in perf_pmu_register()
11448 struct perf_event_context *ctx = NULL; in perf_try_init_event() local
11465 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
11467 BUG_ON(!ctx); in perf_try_init_event()
11473 if (ctx) in perf_try_init_event()
11474 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
12191 struct perf_event_context *ctx) in __perf_event_ctx_lock_double() argument
12197 gctx = READ_ONCE(group_leader->ctx); in __perf_event_ctx_lock_double()
12204 mutex_lock_double(&gctx->mutex, &ctx->mutex); in __perf_event_ctx_lock_double()
12206 if (group_leader->ctx != gctx) { in __perf_event_ctx_lock_double()
12207 mutex_unlock(&ctx->mutex); in __perf_event_ctx_lock_double()
12263 struct perf_event_context *ctx, *gctx; in SYSCALL_DEFINE5() local
12404 pmu = group_leader->ctx->pmu; in SYSCALL_DEFINE5()
12420 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
12421 if (IS_ERR(ctx)) { in SYSCALL_DEFINE5()
12422 err = PTR_ERR(ctx); in SYSCALL_DEFINE5()
12455 if (group_leader->ctx->task != ctx->task) in SYSCALL_DEFINE5()
12466 if (!move_group && group_leader->ctx != ctx) in SYSCALL_DEFINE5()
12507 gctx = __perf_event_ctx_lock_double(group_leader, ctx); in SYSCALL_DEFINE5()
12524 if (gctx != ctx) { in SYSCALL_DEFINE5()
12538 if (!exclusive_event_installable(group_leader, ctx)) in SYSCALL_DEFINE5()
12542 if (!exclusive_event_installable(sibling, ctx)) in SYSCALL_DEFINE5()
12546 mutex_lock(&ctx->mutex); in SYSCALL_DEFINE5()
12552 if (group_leader && group_leader->ctx != ctx) { in SYSCALL_DEFINE5()
12559 if (ctx->task == TASK_TOMBSTONE) { in SYSCALL_DEFINE5()
12577 container_of(ctx, struct perf_cpu_context, ctx); in SYSCALL_DEFINE5()
12594 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
12599 WARN_ON_ONCE(ctx->parent_ctx); in SYSCALL_DEFINE5()
12637 perf_install_in_context(ctx, sibling, sibling->cpu); in SYSCALL_DEFINE5()
12638 get_ctx(ctx); in SYSCALL_DEFINE5()
12647 perf_install_in_context(ctx, group_leader, group_leader->cpu); in SYSCALL_DEFINE5()
12648 get_ctx(ctx); in SYSCALL_DEFINE5()
12662 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
12663 perf_unpin_context(ctx); in SYSCALL_DEFINE5()
12667 mutex_unlock(&ctx->mutex); in SYSCALL_DEFINE5()
12691 mutex_unlock(&ctx->mutex); in SYSCALL_DEFINE5()
12698 perf_unpin_context(ctx); in SYSCALL_DEFINE5()
12699 put_ctx(ctx); in SYSCALL_DEFINE5()
12732 struct perf_event_context *ctx; in perf_event_create_kernel_counter() local
12756 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
12757 if (IS_ERR(ctx)) { in perf_event_create_kernel_counter()
12758 err = PTR_ERR(ctx); in perf_event_create_kernel_counter()
12762 WARN_ON_ONCE(ctx->parent_ctx); in perf_event_create_kernel_counter()
12763 mutex_lock(&ctx->mutex); in perf_event_create_kernel_counter()
12764 if (ctx->task == TASK_TOMBSTONE) { in perf_event_create_kernel_counter()
12777 container_of(ctx, struct perf_cpu_context, ctx); in perf_event_create_kernel_counter()
12784 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
12789 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
12790 perf_unpin_context(ctx); in perf_event_create_kernel_counter()
12791 mutex_unlock(&ctx->mutex); in perf_event_create_kernel_counter()
12796 mutex_unlock(&ctx->mutex); in perf_event_create_kernel_counter()
12797 perf_unpin_context(ctx); in perf_event_create_kernel_counter()
12798 put_ctx(ctx); in perf_event_create_kernel_counter()
12813 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; in perf_pmu_migrate_context()
12814 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; in perf_pmu_migrate_context()
12877 struct task_struct *task = child_event->ctx->task; in sync_child_event()
12896 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) in perf_event_exit_event() argument
12920 raw_spin_lock_irq(&ctx->lock); in perf_event_exit_event()
12923 raw_spin_unlock_irq(&ctx->lock); in perf_event_exit_event()
13044 struct perf_event_context *ctx) in perf_free_event() argument
13057 raw_spin_lock_irq(&ctx->lock); in perf_free_event()
13059 list_del_event(event, ctx); in perf_free_event()
13060 raw_spin_unlock_irq(&ctx->lock); in perf_free_event()
13073 struct perf_event_context *ctx; in perf_event_free_task() local
13078 ctx = task->perf_event_ctxp[ctxn]; in perf_event_free_task()
13079 if (!ctx) in perf_event_free_task()
13082 mutex_lock(&ctx->mutex); in perf_event_free_task()
13083 raw_spin_lock_irq(&ctx->lock); in perf_event_free_task()
13091 WRITE_ONCE(ctx->task, TASK_TOMBSTONE); in perf_event_free_task()
13093 raw_spin_unlock_irq(&ctx->lock); in perf_event_free_task()
13095 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
13096 perf_free_event(event, ctx); in perf_event_free_task()
13098 mutex_unlock(&ctx->mutex); in perf_event_free_task()
13114 wait_var_event(&ctx->refcount, refcount_read(&ctx->refcount) == 1); in perf_event_free_task()
13115 put_ctx(ctx); /* must be last */ in perf_event_free_task()
13243 child_event->ctx = child_ctx; in inherit_event()
13529 struct perf_event_context *ctx = __info; in __perf_event_exit_context() local
13530 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); in __perf_event_exit_context()
13533 raw_spin_lock(&ctx->lock); in __perf_event_exit_context()
13534 ctx_sched_out(ctx, cpuctx, EVENT_TIME); in __perf_event_exit_context()
13535 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
13536 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()
13537 raw_spin_unlock(&ctx->lock); in __perf_event_exit_context()
13543 struct perf_event_context *ctx; in perf_event_exit_cpu_context() local
13549 ctx = &cpuctx->ctx; in perf_event_exit_cpu_context()
13551 mutex_lock(&ctx->mutex); in perf_event_exit_cpu_context()
13552 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1); in perf_event_exit_cpu_context()
13554 mutex_unlock(&ctx->mutex); in perf_event_exit_cpu_context()
13568 struct perf_event_context *ctx; in perf_event_init_cpu() local
13577 ctx = &cpuctx->ctx; in perf_event_init_cpu()
13579 mutex_lock(&ctx->mutex); in perf_event_init_cpu()
13581 mutex_unlock(&ctx->mutex); in perf_event_init_cpu()