• Home
  • Raw
  • Download

Lines Matching refs:task

231 	if (ctx->task) {  in event_function()
232 if (ctx->task != current) { in event_function()
264 struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */ in event_function_call() local
280 if (!task) { in event_function_call()
285 if (task == TASK_TOMBSTONE) in event_function_call()
289 if (!task_function_call(task, event_function, &efs)) in event_function_call()
297 task = ctx->task; in event_function_call()
298 if (task == TASK_TOMBSTONE) { in event_function_call()
318 struct task_struct *task = READ_ONCE(ctx->task); in event_function_local() local
323 if (task) { in event_function_local()
324 if (task == TASK_TOMBSTONE) in event_function_local()
332 task = ctx->task; in event_function_local()
333 if (task == TASK_TOMBSTONE) in event_function_local()
336 if (task) { in event_function_local()
343 if (WARN_ON_ONCE(task != current)) in event_function_local()
831 static void perf_cgroup_switch(struct task_struct *task) in perf_cgroup_switch() argument
844 cgrp = perf_cgroup_from_task(task, NULL); in perf_cgroup_switch()
1057 static void perf_cgroup_switch(struct task_struct *task) in perf_cgroup_switch() argument
1216 if (ctx->task && ctx->task != TASK_TOMBSTONE) in put_ctx()
1217 put_task_struct(ctx->task); in put_ctx()
1392 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) in perf_lock_task_context() argument
1408 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); in perf_lock_task_context()
1421 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { in perf_lock_task_context()
1428 if (ctx->task == TASK_TOMBSTONE || in perf_lock_task_context()
1433 WARN_ON_ONCE(ctx->task != task); in perf_lock_task_context()
1448 perf_pin_task_context(struct task_struct *task, int ctxn) in perf_pin_task_context() argument
1453 ctx = perf_lock_task_context(task, ctxn, &flags); in perf_pin_task_context()
1545 if (!ctx->task) in get_event_type()
2414 if (ctx->task) { in __perf_remove_from_context()
2796 if (ctx->task) { in __perf_install_in_context()
2800 reprogram = (ctx->task == current); in __perf_install_in_context()
2809 if (task_curr(ctx->task) && !reprogram) { in __perf_install_in_context()
2858 struct task_struct *task = READ_ONCE(ctx->task); in perf_install_in_context() local
2886 if (ctx->task == TASK_TOMBSTONE) { in perf_install_in_context()
2895 if (!task) { in perf_install_in_context()
2903 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) in perf_install_in_context()
2938 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
2942 task = ctx->task; in perf_install_in_context()
2943 if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) { in perf_install_in_context()
2956 if (task_curr(task)) { in perf_install_in_context()
3003 if (ctx->task) in __perf_event_enable()
3281 if (ctx->task) in ctx_sched_out()
3311 if (ctx->task) { in ctx_sched_out()
3444 static void perf_event_context_sched_out(struct task_struct *task, int ctxn, in perf_event_context_sched_out() argument
3447 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn]; in perf_event_context_sched_out()
3502 WRITE_ONCE(ctx->task, next); in perf_event_context_sched_out()
3503 WRITE_ONCE(next_ctx->task, task); in perf_event_context_sched_out()
3528 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx); in perf_event_context_sched_out()
3622 static void perf_event_switch(struct task_struct *task,
3639 void __perf_event_task_sched_out(struct task_struct *task, in __perf_event_task_sched_out() argument
3645 perf_pmu_sched_task(task, next, false); in __perf_event_task_sched_out()
3648 perf_event_switch(task, next, false); in __perf_event_task_sched_out()
3651 perf_event_context_sched_out(task, ctxn, next); in __perf_event_task_sched_out()
3877 if (ctx->task) { in ctx_sched_in()
3907 struct task_struct *task) in perf_event_context_sched_in() argument
3968 struct task_struct *task) in __perf_event_task_sched_in() argument
3974 ctx = task->perf_event_ctxp[ctxn]; in __perf_event_task_sched_in()
3978 perf_event_context_sched_in(ctx, task); in __perf_event_task_sched_in()
3982 perf_event_switch(task, prev, true); in __perf_event_task_sched_in()
3985 perf_pmu_sched_task(prev, task, true); in __perf_event_task_sched_in()
4350 if (WARN_ON_ONCE(ctx->task != current)) in perf_event_remove_on_exec()
4420 if (ctx->task && cpuctx->task_ctx != ctx) in __perf_event_read()
4647 alloc_perf_context(struct pmu *pmu, struct task_struct *task) in alloc_perf_context() argument
4656 if (task) in alloc_perf_context()
4657 ctx->task = get_task_struct(task); in alloc_perf_context()
4666 struct task_struct *task; in find_lively_task_by_vpid() local
4670 task = current; in find_lively_task_by_vpid()
4672 task = find_task_by_vpid(vpid); in find_lively_task_by_vpid()
4673 if (task) in find_lively_task_by_vpid()
4674 get_task_struct(task); in find_lively_task_by_vpid()
4677 if (!task) in find_lively_task_by_vpid()
4680 return task; in find_lively_task_by_vpid()
4687 find_get_context(struct pmu *pmu, struct task_struct *task, in find_get_context() argument
4697 if (!task) { in find_get_context()
4727 ctx = perf_lock_task_context(task, ctxn, &flags); in find_get_context()
4741 ctx = alloc_perf_context(pmu, task); in find_get_context()
4752 mutex_lock(&task->perf_event_mutex); in find_get_context()
4757 if (task->flags & PF_EXITING) in find_get_context()
4759 else if (task->perf_event_ctxp[ctxn]) in find_get_context()
4764 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); in find_get_context()
4766 mutex_unlock(&task->perf_event_mutex); in find_get_context()
4822 attr->task || attr->ksymbol || in is_sb_event()
4885 if (event->attr.task) in unaccount_event()
6530 if (WARN_ON_ONCE(event->ctx->task != current)) in perf_sigtrap()
7416 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7693 struct task_struct *task) in perf_event_read_event() argument
7703 .pid = perf_event_pid(event, task), in perf_event_read_event()
7704 .tid = perf_event_tid(event, task), in perf_event_read_event()
7943 struct task_struct *task; member
7961 event->attr.task; in perf_event_task_match()
7970 struct task_struct *task = task_event->task; in perf_event_task_output() local
7983 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
7984 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
7988 task->real_parent); in perf_event_task_output()
7990 task->real_parent); in perf_event_task_output()
8007 static void perf_event_task(struct task_struct *task, in perf_event_task() argument
8019 .task = task, in perf_event_task()
8040 void perf_event_fork(struct task_struct *task) in perf_event_fork() argument
8042 perf_event_task(task, NULL, 1); in perf_event_fork()
8043 perf_event_namespaces(task); in perf_event_fork()
8051 struct task_struct *task; member
8087 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
8088 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
8107 strlcpy(comm, comm_event->task->comm, sizeof(comm)); in perf_event_comm_event()
8120 void perf_event_comm(struct task_struct *task, bool exec) in perf_event_comm() argument
8128 .task = task, in perf_event_comm()
8150 struct task_struct *task; member
8187 namespaces_event->task); in perf_event_namespaces_output()
8189 namespaces_event->task); in perf_event_namespaces_output()
8201 struct task_struct *task, in perf_fill_ns_link_info() argument
8208 error = ns_get_path(&ns_path, task, ns_ops); in perf_fill_ns_link_info()
8217 void perf_event_namespaces(struct task_struct *task) in perf_event_namespaces() argument
8226 .task = task, in perf_event_namespaces()
8243 task, &mntns_operations); in perf_event_namespaces()
8247 task, &userns_operations); in perf_event_namespaces()
8251 task, &netns_operations); in perf_event_namespaces()
8255 task, &utsns_operations); in perf_event_namespaces()
8259 task, &ipcns_operations); in perf_event_namespaces()
8263 task, &pidns_operations); in perf_event_namespaces()
8267 task, &cgroupns_operations); in perf_event_namespaces()
8807 struct task_struct *task; member
8833 if (event->ctx->task) { in perf_event_switch_output()
8851 if (event->ctx->task) in perf_event_switch_output()
8861 static void perf_event_switch(struct task_struct *task, in perf_event_switch() argument
8869 .task = task, in perf_event_switch()
8882 if (!sched_in && task->on_rq) { in perf_event_switch()
9906 struct task_struct *task) in perf_trace_run_bpf_submit() argument
9916 rctx, task); in perf_trace_run_bpf_submit()
9922 struct task_struct *task) in perf_tp_event() argument
9949 if (task && task != current) { in perf_tp_event()
9954 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); in perf_tp_event()
10457 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply() local
10467 if (task == TASK_TOMBSTONE) in perf_event_addr_filters_apply()
10471 mm = get_task_mm(task); in perf_event_addr_filters_apply()
10674 if (!event->ctx->task) in perf_event_parse_addr_filter()
11649 if (event->attr.task) in account_event()
11706 struct task_struct *task, in perf_event_alloc() argument
11719 if (!task || cpu != -1) in perf_event_alloc()
11722 if (attr->sigtrap && !task) { in perf_event_alloc()
11777 if (task) { in perf_event_alloc()
11784 event->hw.target = get_task_struct(task); in perf_event_alloc()
12228 perf_check_permission(struct perf_event_attr *attr, struct task_struct *task) in perf_check_permission() argument
12239 is_capable &= ns_capable(__task_cred(task)->user_ns, CAP_KILL); in perf_check_permission()
12255 return is_capable || ptrace_may_access(task, ptrace_mode); in perf_check_permission()
12277 struct task_struct *task = NULL; in SYSCALL_DEFINE5() local
12359 task = find_lively_task_by_vpid(pid); in SYSCALL_DEFINE5()
12360 if (IS_ERR(task)) { in SYSCALL_DEFINE5()
12361 err = PTR_ERR(task); in SYSCALL_DEFINE5()
12366 if (task && group_leader && in SYSCALL_DEFINE5()
12375 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
12431 ctx = find_get_context(pmu, task, event); in SYSCALL_DEFINE5()
12466 if (group_leader->ctx->task != ctx->task) in SYSCALL_DEFINE5()
12501 if (task) { in SYSCALL_DEFINE5()
12502 err = down_read_interruptible(&task->signal->exec_update_lock); in SYSCALL_DEFINE5()
12513 if (!perf_check_permission(&attr, task)) in SYSCALL_DEFINE5()
12520 if (gctx->task == TASK_TOMBSTONE) { in SYSCALL_DEFINE5()
12570 if (ctx->task == TASK_TOMBSTONE) { in SYSCALL_DEFINE5()
12580 if (!task) { in SYSCALL_DEFINE5()
12680 if (task) { in SYSCALL_DEFINE5()
12681 up_read(&task->signal->exec_update_lock); in SYSCALL_DEFINE5()
12682 put_task_struct(task); in SYSCALL_DEFINE5()
12704 if (task) in SYSCALL_DEFINE5()
12705 up_read(&task->signal->exec_update_lock); in SYSCALL_DEFINE5()
12719 if (task) in SYSCALL_DEFINE5()
12720 put_task_struct(task); in SYSCALL_DEFINE5()
12739 struct task_struct *task, in perf_event_create_kernel_counter() argument
12754 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
12767 ctx = find_get_context(event->pmu, task, event); in perf_event_create_kernel_counter()
12775 if (ctx->task == TASK_TOMBSTONE) { in perf_event_create_kernel_counter()
12780 if (!task) { in perf_event_create_kernel_counter()
12888 struct task_struct *task = child_event->ctx->task; in sync_child_event() local
12890 if (task && task != TASK_TOMBSTONE) in sync_child_event()
12891 perf_event_read_event(child_event, task); in sync_child_event()
12993 WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE); in perf_event_exit_task_context()
13082 void perf_event_free_task(struct task_struct *task) in perf_event_free_task() argument
13089 ctx = task->perf_event_ctxp[ctxn]; in perf_event_free_task()
13101 RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL); in perf_event_free_task()
13102 WRITE_ONCE(ctx->task, TASK_TOMBSTONE); in perf_event_free_task()
13103 put_task_struct(task); /* cannot be last */ in perf_event_free_task()
13130 void perf_event_delayed_put(struct task_struct *task) in perf_event_delayed_put() argument
13135 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); in perf_event_delayed_put()
13729 struct task_struct *task = info; in __perf_cgroup_move() local
13731 perf_cgroup_switch(task); in __perf_cgroup_move()
13738 struct task_struct *task; in perf_cgroup_attach() local
13741 cgroup_taskset_for_each(task, css, tset) in perf_cgroup_attach()
13742 task_function_call(task, __perf_cgroup_move, task); in perf_cgroup_attach()