Lines Matching refs:task
226 if (ctx->task) { in event_function()
227 if (ctx->task != current) { in event_function()
259 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ in event_function_call() local
275 if (!task) { in event_function_call()
280 if (task == TASK_TOMBSTONE) in event_function_call()
284 if (!task_function_call(task, event_function, &efs)) in event_function_call()
292 task = ctx->task; in event_function_call()
293 if (task == TASK_TOMBSTONE) { in event_function_call()
313 struct task_struct *task = READ_ONCE(ctx->task); in event_function_local() local
318 if (task) { in event_function_local()
319 if (task == TASK_TOMBSTONE) in event_function_local()
327 task = ctx->task; in event_function_local()
328 if (task == TASK_TOMBSTONE) in event_function_local()
331 if (task) { in event_function_local()
338 if (WARN_ON_ONCE(task != current)) in event_function_local()
572 struct task_struct *task);
767 perf_cgroup_set_timestamp(struct task_struct *task, in perf_cgroup_set_timestamp() argument
779 if (!task || !ctx->nr_cgroups) in perf_cgroup_set_timestamp()
782 cgrp = perf_cgroup_from_task(task, ctx); in perf_cgroup_set_timestamp()
802 static void perf_cgroup_switch(struct task_struct *task, int mode) in perf_cgroup_switch() argument
839 cpuctx->cgrp = perf_cgroup_from_task(task, in perf_cgroup_switch()
841 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); in perf_cgroup_switch()
850 static inline void perf_cgroup_sched_out(struct task_struct *task, in perf_cgroup_sched_out() argument
862 cgrp1 = perf_cgroup_from_task(task, NULL); in perf_cgroup_sched_out()
871 perf_cgroup_switch(task, PERF_CGROUP_SWOUT); in perf_cgroup_sched_out()
877 struct task_struct *task) in perf_cgroup_sched_in() argument
888 cgrp1 = perf_cgroup_from_task(task, NULL); in perf_cgroup_sched_in()
897 perf_cgroup_switch(task, PERF_CGROUP_SWIN); in perf_cgroup_sched_in()
1072 static inline void perf_cgroup_sched_out(struct task_struct *task, in perf_cgroup_sched_out() argument
1078 struct task_struct *task) in perf_cgroup_sched_in() argument
1090 perf_cgroup_set_timestamp(struct task_struct *task, in perf_cgroup_set_timestamp() argument
1096 perf_cgroup_switch(struct task_struct *task, struct task_struct *next) in perf_cgroup_switch() argument
1275 if (ctx->task && ctx->task != TASK_TOMBSTONE) in put_ctx()
1276 put_task_struct(ctx->task); in put_ctx()
1451 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) in perf_lock_task_context() argument
1467 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); in perf_lock_task_context()
1480 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { in perf_lock_task_context()
1487 if (ctx->task == TASK_TOMBSTONE || in perf_lock_task_context()
1492 WARN_ON_ONCE(ctx->task != task); in perf_lock_task_context()
1507 perf_pin_task_context(struct task_struct *task, int ctxn) in perf_pin_task_context() argument
1512 ctx = perf_lock_task_context(task, ctxn, &flags); in perf_pin_task_context()
1565 if (!ctx->task) in get_event_type()
2369 if (ctx->task) { in __perf_remove_from_context()
2686 struct task_struct *task);
2703 struct task_struct *task) in perf_event_sched_in() argument
2705 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2707 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); in perf_event_sched_in()
2708 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2710 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task); in perf_event_sched_in()
2790 if (ctx->task) { in __perf_install_in_context()
2794 reprogram = (ctx->task == current); in __perf_install_in_context()
2803 if (task_curr(ctx->task) && !reprogram) { in __perf_install_in_context()
2852 struct task_struct *task = READ_ONCE(ctx->task); in perf_install_in_context() local
2877 if (ctx->task == TASK_TOMBSTONE) { in perf_install_in_context()
2886 if (!task) { in perf_install_in_context()
2894 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) in perf_install_in_context()
2929 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2933 task = ctx->task; in perf_install_in_context()
2934 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) { in perf_install_in_context()
2947 if (task_curr(task)) { in perf_install_in_context()
2994 if (ctx->task) in __perf_event_enable()
3236 if (ctx->task) in ctx_sched_out()
3245 if (ctx->task) { in ctx_sched_out()
3394 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, in perf_event_context_sched_out() argument
3397 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; in perf_event_context_sched_out()
3438 WRITE_ONCE(ctx->task, next); in perf_event_context_sched_out()
3439 WRITE_ONCE(next_ctx->task, task); in perf_event_context_sched_out()
3466 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx); in perf_event_context_sched_out()
3559 static void perf_event_switch(struct task_struct *task,
3576 void __perf_event_task_sched_out(struct task_struct *task, in __perf_event_task_sched_out() argument
3582 perf_pmu_sched_task(task, next, false); in __perf_event_task_sched_out()
3585 perf_event_switch(task, next, false); in __perf_event_task_sched_out()
3588 perf_event_context_sched_out(task, ctxn, next); in __perf_event_task_sched_out()
3596 perf_cgroup_sched_out(task, next); in __perf_event_task_sched_out()
3788 struct task_struct *task) in ctx_sched_in() argument
3799 if (ctx->task) { in ctx_sched_in()
3812 perf_cgroup_set_timestamp(task, ctx); in ctx_sched_in()
3829 struct task_struct *task) in cpu_ctx_sched_in() argument
3833 ctx_sched_in(ctx, cpuctx, event_type, task); in cpu_ctx_sched_in()
3837 struct task_struct *task) in perf_event_context_sched_in() argument
3868 perf_event_sched_in(cpuctx, ctx, task); in perf_event_context_sched_in()
3891 struct task_struct *task) in __perf_event_task_sched_in() argument
3904 perf_cgroup_sched_in(prev, task); in __perf_event_task_sched_in()
3907 ctx = task->perf_event_ctxp[ctxn]; in __perf_event_task_sched_in()
3911 perf_event_context_sched_in(ctx, task); in __perf_event_task_sched_in()
3915 perf_event_switch(task, prev, true); in __perf_event_task_sched_in()
3918 perf_pmu_sched_task(prev, task, true); in __perf_event_task_sched_in()
4303 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_read()
4519 alloc_perf_context(struct pmu *pmu, struct task_struct *task) in alloc_perf_context() argument
4528 if (task) in alloc_perf_context()
4529 ctx->task = get_task_struct(task); in alloc_perf_context()
4538 struct task_struct *task; in find_lively_task_by_vpid() local
4542 task = current; in find_lively_task_by_vpid()
4544 task = find_task_by_vpid(vpid); in find_lively_task_by_vpid()
4545 if (task) in find_lively_task_by_vpid()
4546 get_task_struct(task); in find_lively_task_by_vpid()
4549 if (!task) in find_lively_task_by_vpid()
4552 return task; in find_lively_task_by_vpid()
4559 find_get_context(struct pmu *pmu, struct task_struct *task, in find_get_context() argument
4569 if (!task) { in find_get_context()
4599 ctx = perf_lock_task_context(task, ctxn, &flags); in find_get_context()
4613 ctx = alloc_perf_context(pmu, task); in find_get_context()
4624 mutex_lock(&task->perf_event_mutex); in find_get_context()
4629 if (task->flags & PF_EXITING) in find_get_context()
4631 else if (task->perf_event_ctxp[ctxn]) in find_get_context()
4636 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); in find_get_context()
4638 mutex_unlock(&task->perf_event_mutex); in find_get_context()
4695 attr->task || attr->ksymbol || in is_sb_event()
4756 if (event->attr.task) in unaccount_event()
7122 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7366 struct task_struct *task) in perf_event_read_event() argument
7376 .pid = perf_event_pid(event, task), in perf_event_read_event()
7377 .tid = perf_event_tid(event, task), in perf_event_read_event()
7616 struct task_struct *task; member
7634 event->attr.task; in perf_event_task_match()
7643 struct task_struct *task = task_event->task; in perf_event_task_output() local
7656 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
7657 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
7661 task->real_parent); in perf_event_task_output()
7663 task->real_parent); in perf_event_task_output()
7680 static void perf_event_task(struct task_struct *task, in perf_event_task() argument
7692 .task = task, in perf_event_task()
7713 void perf_event_fork(struct task_struct *task) in perf_event_fork() argument
7715 perf_event_task(task, NULL, 1); in perf_event_fork()
7716 perf_event_namespaces(task); in perf_event_fork()
7724 struct task_struct *task; member
7760 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
7761 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
7780 strlcpy(comm, comm_event->task->comm, sizeof(comm)); in perf_event_comm_event()
7793 void perf_event_comm(struct task_struct *task, bool exec) in perf_event_comm() argument
7801 .task = task, in perf_event_comm()
7823 struct task_struct *task; member
7860 namespaces_event->task); in perf_event_namespaces_output()
7862 namespaces_event->task); in perf_event_namespaces_output()
7874 struct task_struct *task, in perf_fill_ns_link_info() argument
7881 error = ns_get_path(&ns_path, task, ns_ops); in perf_fill_ns_link_info()
7890 void perf_event_namespaces(struct task_struct *task) in perf_event_namespaces() argument
7899 .task = task, in perf_event_namespaces()
7916 task, &mntns_operations); in perf_event_namespaces()
7920 task, &userns_operations); in perf_event_namespaces()
7924 task, &netns_operations); in perf_event_namespaces()
7928 task, &utsns_operations); in perf_event_namespaces()
7932 task, &ipcns_operations); in perf_event_namespaces()
7936 task, &pidns_operations); in perf_event_namespaces()
7940 task, &cgroupns_operations); in perf_event_namespaces()
8466 struct task_struct *task; member
8492 if (event->ctx->task) { in perf_event_switch_output()
8510 if (event->ctx->task) in perf_event_switch_output()
8520 static void perf_event_switch(struct task_struct *task, in perf_event_switch() argument
8528 .task = task, in perf_event_switch()
8541 if (!sched_in && task->state == TASK_RUNNING) in perf_event_switch()
9487 struct task_struct *task) in perf_trace_run_bpf_submit() argument
9497 rctx, task); in perf_trace_run_bpf_submit()
9503 struct task_struct *task) in perf_tp_event() argument
9529 if (task && task != current) { in perf_tp_event()
9534 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); in perf_tp_event()
10031 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply() local
10041 if (task == TASK_TOMBSTONE) in perf_event_addr_filters_apply()
10045 mm = get_task_mm(task); in perf_event_addr_filters_apply()
10250 if (!event->ctx->task) in perf_event_parse_addr_filter()
11210 if (event->attr.task) in account_event()
11267 struct task_struct *task, in perf_event_alloc() argument
11279 if (!task || cpu != -1) in perf_event_alloc()
11328 if (task) { in perf_event_alloc()
11335 event->hw.target = get_task_struct(task); in perf_event_alloc()
11786 struct task_struct *task = NULL; in SYSCALL_DEFINE5() local
11868 task = find_lively_task_by_vpid(pid); in SYSCALL_DEFINE5()
11869 if (IS_ERR(task)) { in SYSCALL_DEFINE5()
11870 err = PTR_ERR(task); in SYSCALL_DEFINE5()
11875 if (task && group_leader && in SYSCALL_DEFINE5()
11884 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
11940 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
11975 if (group_leader->ctx->task != ctx->task) in SYSCALL_DEFINE5()
12010 if (task) { in SYSCALL_DEFINE5()
12011 err = down_read_interruptible(&task->signal->exec_update_lock); in SYSCALL_DEFINE5()
12024 if (!perfmon_capable() && !ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) in SYSCALL_DEFINE5()
12031 if (gctx->task == TASK_TOMBSTONE) { in SYSCALL_DEFINE5()
12081 if (ctx->task == TASK_TOMBSTONE) { in SYSCALL_DEFINE5()
12091 if (!task) { in SYSCALL_DEFINE5()
12191 if (task) { in SYSCALL_DEFINE5()
12192 up_read(&task->signal->exec_update_lock); in SYSCALL_DEFINE5()
12193 put_task_struct(task); in SYSCALL_DEFINE5()
12215 if (task) in SYSCALL_DEFINE5()
12216 up_read(&task->signal->exec_update_lock); in SYSCALL_DEFINE5()
12230 if (task) in SYSCALL_DEFINE5()
12231 put_task_struct(task); in SYSCALL_DEFINE5()
12248 struct task_struct *task, in perf_event_create_kernel_counter() argument
12263 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
12276 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
12284 if (ctx->task == TASK_TOMBSTONE) { in perf_event_create_kernel_counter()
12289 if (!task) { in perf_event_create_kernel_counter()
12506 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE); in perf_event_exit_task_context()
12595 void perf_event_free_task(struct task_struct *task) in perf_event_free_task() argument
12602 ctx = task->perf_event_ctxp[ctxn]; in perf_event_free_task()
12614 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); in perf_event_free_task()
12615 WRITE_ONCE(ctx->task, TASK_TOMBSTONE); in perf_event_free_task()
12616 put_task_struct(task); /* cannot be last */ in perf_event_free_task()
12643 void perf_event_delayed_put(struct task_struct *task) in perf_event_delayed_put() argument
12648 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); in perf_event_delayed_put()
13233 struct task_struct *task = info; in __perf_cgroup_move() local
13235 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN); in __perf_cgroup_move()
13242 struct task_struct *task; in perf_cgroup_attach() local
13245 cgroup_taskset_for_each(task, css, tset) in perf_cgroup_attach()
13246 task_function_call(task, __perf_cgroup_move, task); in perf_cgroup_attach()