/kernel/ |
D | tsacct.c | 22 struct taskstats *stats, struct task_struct *tsk) in bacct_add_tsk() argument 34 delta = now_ns - tsk->group_leader->start_time; in bacct_add_tsk() 38 delta = now_ns - tsk->start_time; in bacct_add_tsk() 46 if (tsk->flags & PF_EXITING) in bacct_add_tsk() 47 stats->ac_exitcode = tsk->exit_code; in bacct_add_tsk() 48 if (thread_group_leader(tsk) && (tsk->flags & PF_FORKNOEXEC)) in bacct_add_tsk() 50 if (tsk->flags & PF_SUPERPRIV) in bacct_add_tsk() 52 if (tsk->flags & PF_DUMPCORE) in bacct_add_tsk() 54 if (tsk->flags & PF_SIGNALED) in bacct_add_tsk() 56 stats->ac_nice = task_nice(tsk); in bacct_add_tsk() [all …]
|
D | exit.c | 144 static void __exit_signal(struct task_struct *tsk) in __exit_signal() argument 146 struct signal_struct *sig = tsk->signal; in __exit_signal() 147 bool group_dead = thread_group_leader(tsk); in __exit_signal() 152 sighand = rcu_dereference_check(tsk->sighand, in __exit_signal() 157 posix_cpu_timers_exit(tsk); in __exit_signal() 159 posix_cpu_timers_exit_group(tsk); in __exit_signal() 173 if (tsk == sig->curr_target) in __exit_signal() 174 sig->curr_target = next_thread(tsk); in __exit_signal() 177 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, in __exit_signal() 186 task_cputime(tsk, &utime, &stime); in __exit_signal() [all …]
|
D | delayacct.c | 90 void __delayacct_tsk_init(struct task_struct *tsk) in __delayacct_tsk_init() argument 92 tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL); in __delayacct_tsk_init() 93 if (tsk->delays) in __delayacct_tsk_init() 94 raw_spin_lock_init(&tsk->delays->lock); in __delayacct_tsk_init() 131 int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) in delayacct_add_tsk() argument 138 task_cputime(tsk, &utime, &stime); in delayacct_add_tsk() 143 task_cputime_scaled(tsk, &utimescaled, &stimescaled); in delayacct_add_tsk() 153 t1 = tsk->sched_info.pcount; in delayacct_add_tsk() 154 t2 = tsk->sched_info.run_delay; in delayacct_add_tsk() 155 t3 = tsk->se.sum_exec_runtime; in delayacct_add_tsk() [all …]
|
D | fork.c | 169 void __weak arch_release_task_struct(struct task_struct *tsk) in arch_release_task_struct() argument 181 static inline void free_task_struct(struct task_struct *tsk) in free_task_struct() argument 183 kmem_cache_free(task_struct_cachep, tsk); in free_task_struct() 230 static void thread_stack_delayed_free(struct task_struct *tsk) in thread_stack_delayed_free() argument 232 struct vm_stack *vm_stack = tsk->stack; in thread_stack_delayed_free() 234 vm_stack->stack_vm_area = tsk->stack_vm_area; in thread_stack_delayed_free() 281 static int alloc_thread_stack_node(struct task_struct *tsk, int node) in alloc_thread_stack_node() argument 308 tsk->stack_vm_area = s; in alloc_thread_stack_node() 309 tsk->stack = stack; in alloc_thread_stack_node() 336 tsk->stack_vm_area = vm; in alloc_thread_stack_node() [all …]
|
D | smpboot.c | 32 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get() local 34 if (!tsk) in idle_thread_get() 36 return tsk; in idle_thread_get() 52 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init() local 54 if (!tsk) { in idle_init() 55 tsk = fork_idle(cpu); in idle_init() 56 if (IS_ERR(tsk)) in idle_init() 59 per_cpu(idle_threads, cpu) = tsk; in idle_init() 172 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in __smpboot_create_thread() local 175 if (tsk) in __smpboot_create_thread() [all …]
|
D | taskstats.c | 157 static void exe_add_tsk(struct taskstats *stats, struct task_struct *tsk) in exe_add_tsk() argument 160 struct file *exe_file = get_task_exe_file(tsk); in exe_add_tsk() 176 struct task_struct *tsk, struct taskstats *stats) in fill_stats() argument 186 delayacct_add_tsk(stats, tsk); in fill_stats() 190 stats->nvcsw = tsk->nvcsw; in fill_stats() 191 stats->nivcsw = tsk->nivcsw; in fill_stats() 192 bacct_add_tsk(user_ns, pid_ns, stats, tsk); in fill_stats() 195 xacct_add_tsk(stats, tsk); in fill_stats() 198 exe_add_tsk(stats, tsk); in fill_stats() 203 struct task_struct *tsk; in fill_stats_for_pid() local [all …]
|
D | cred.c | 162 void exit_creds(struct task_struct *tsk) in exit_creds() argument 166 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred, in exit_creds() 167 atomic_read(&tsk->cred->usage), in exit_creds() 168 read_cred_subscribers(tsk->cred)); in exit_creds() 170 cred = (struct cred *) tsk->real_cred; in exit_creds() 171 tsk->real_cred = NULL; in exit_creds() 176 cred = (struct cred *) tsk->cred; in exit_creds() 177 tsk->cred = NULL; in exit_creds() 183 key_put(tsk->cached_requested_key); in exit_creds() 184 tsk->cached_requested_key = NULL; in exit_creds() [all …]
|
D | scs.c | 114 int scs_prepare(struct task_struct *tsk, int node) in scs_prepare() argument 125 task_scs(tsk) = task_scs_sp(tsk) = s; in scs_prepare() 129 static void scs_check_usage(struct task_struct *tsk) in scs_check_usage() argument 138 for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) { in scs_check_usage() 149 tsk->comm, task_pid_nr(tsk), used); in scs_check_usage() 157 void scs_release(struct task_struct *tsk) in scs_release() argument 159 void *s = task_scs(tsk); in scs_release() 164 WARN(task_scs_end_corrupted(tsk), in scs_release() 166 scs_check_usage(tsk); in scs_release()
|
D | nsproxy.c | 68 struct task_struct *tsk, struct user_namespace *user_ns, in create_new_namespaces() argument 78 new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, user_ns, new_fs); in create_new_namespaces() 84 new_nsp->uts_ns = copy_utsname(flags, user_ns, tsk->nsproxy->uts_ns); in create_new_namespaces() 90 new_nsp->ipc_ns = copy_ipcs(flags, user_ns, tsk->nsproxy->ipc_ns); in create_new_namespaces() 97 copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children); in create_new_namespaces() 104 tsk->nsproxy->cgroup_ns); in create_new_namespaces() 110 new_nsp->net_ns = copy_net_ns(flags, user_ns, tsk->nsproxy->net_ns); in create_new_namespaces() 117 tsk->nsproxy->time_ns_for_children); in create_new_namespaces() 122 new_nsp->time_ns = get_time_ns(tsk->nsproxy->time_ns); in create_new_namespaces() 151 int copy_namespaces(unsigned long flags, struct task_struct *tsk) in copy_namespaces() argument [all …]
|
D | stacktrace.c | 136 unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store, in stack_trace_save_tsk() argument 144 .skip = skipnr + (current == tsk), in stack_trace_save_tsk() 147 if (!try_get_task_stack(tsk)) in stack_trace_save_tsk() 150 arch_stack_walk(consume_entry, &c, tsk, NULL); in stack_trace_save_tsk() 151 put_task_stack(tsk); in stack_trace_save_tsk() 193 int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store, in stack_trace_save_tsk_reliable() argument 207 if (!try_get_task_stack(tsk)) in stack_trace_save_tsk_reliable() 210 ret = arch_stack_walk_reliable(consume_entry, &c, tsk); in stack_trace_save_tsk_reliable() 211 put_task_stack(tsk); in stack_trace_save_tsk_reliable() 250 save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) in save_stack_trace_tsk() argument [all …]
|
D | signal.c | 518 struct task_struct *tsk = current; in flush_itimer_signals() local 521 spin_lock_irqsave(&tsk->sighand->siglock, flags); in flush_itimer_signals() 522 __flush_itimer_signals(&tsk->pending); in flush_itimer_signals() 523 __flush_itimer_signals(&tsk->signal->shared_pending); in flush_itimer_signals() 524 spin_unlock_irqrestore(&tsk->sighand->siglock, flags); in flush_itimer_signals() 559 bool unhandled_signal(struct task_struct *tsk, int sig) in unhandled_signal() argument 561 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; in unhandled_signal() 562 if (is_global_init(tsk)) in unhandled_signal() 569 if (fatal_signal_pending(tsk)) in unhandled_signal() 573 return !tsk->ptrace; in unhandled_signal() [all …]
|
D | latencytop.c | 112 account_global_scheduler_latency(struct task_struct *tsk, in account_global_scheduler_latency() argument 119 if (!tsk->mm) in account_global_scheduler_latency() 177 __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) in __account_scheduler_latency() argument 197 stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0); in __account_scheduler_latency() 201 account_global_scheduler_latency(tsk, &lat); in __account_scheduler_latency() 203 for (i = 0; i < tsk->latency_record_count; i++) { in __account_scheduler_latency() 207 mylat = &tsk->latency_record[i]; in __account_scheduler_latency() 232 if (tsk->latency_record_count >= LT_SAVECOUNT) in __account_scheduler_latency() 236 i = tsk->latency_record_count++; in __account_scheduler_latency() 237 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); in __account_scheduler_latency()
|
D | kthread.c | 98 void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk) in get_kthread_comm() argument 100 struct kthread *kthread = to_kthread(tsk); in get_kthread_comm() 103 __get_task_comm(buf, buf_size, tsk); in get_kthread_comm() 392 int tsk_fork_get_node(struct task_struct *tsk) in tsk_fork_get_node() argument 395 if (tsk == kthreadd_task) in tsk_fork_get_node() 396 return tsk->pref_node_fork; in tsk_fork_get_node() 732 struct task_struct *tsk = current; in kthreadd() local 735 set_task_comm(tsk, "kthreadd"); in kthreadd() 736 ignore_signals(tsk); in kthreadd() 737 set_cpus_allowed_ptr(tsk, housekeeping_cpumask(HK_TYPE_KTHREAD)); in kthreadd() [all …]
|
D | auditsc.c | 377 static int audit_field_compare(struct task_struct *tsk, in audit_field_compare() argument 394 return audit_compare_uid(audit_get_loginuid(tsk), name, f, ctx); in audit_field_compare() 406 audit_get_loginuid(tsk)); in audit_field_compare() 415 return audit_uid_comparator(audit_get_loginuid(tsk), f->op, in audit_field_compare() 418 return audit_uid_comparator(audit_get_loginuid(tsk), f->op, in audit_field_compare() 421 return audit_uid_comparator(audit_get_loginuid(tsk), f->op, in audit_field_compare() 461 static int audit_filter_rules(struct task_struct *tsk, in audit_filter_rules() argument 476 cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation); in audit_filter_rules() 486 pid = task_tgid_nr(tsk); in audit_filter_rules() 492 ctx->ppid = task_ppid_nr(tsk); in audit_filter_rules() [all …]
|
D | ptrace.c | 43 int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, in ptrace_access_vm() argument 49 mm = get_task_mm(tsk); in ptrace_access_vm() 53 if (!tsk->ptrace || in ptrace_access_vm() 54 (current != tsk->parent) || in ptrace_access_vm() 56 !ptracer_capable(tsk, mm->user_ns))) { in ptrace_access_vm() 626 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) in ptrace_readdata() argument 635 retval = ptrace_access_vm(tsk, src, buf, this_len, FOLL_FORCE); in ptrace_readdata() 652 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len) in ptrace_writedata() argument 663 retval = ptrace_access_vm(tsk, dst, buf, this_len, in ptrace_writedata() 1306 int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, in generic_ptrace_peekdata() argument [all …]
|
/kernel/time/ |
D | posix-cpu-timers.c | 78 struct task_struct *tsk = pid_task(pid, PIDTYPE_PID); in pid_for_clock() local 79 return (tsk && same_thread_group(tsk, current)) ? pid : NULL; in pid_for_clock() 274 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples) in thread_group_sample_cputime() argument 276 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; in thread_group_sample_cputime() 277 struct posix_cputimers *pct = &tsk->signal->posix_cputimers; in thread_group_sample_cputime() 296 static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples) in thread_group_start_cputime() argument 298 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; in thread_group_start_cputime() 299 struct posix_cputimers *pct = &tsk->signal->posix_cputimers; in thread_group_start_cputime() 301 lockdep_assert_task_sighand_held(tsk); in thread_group_start_cputime() 312 thread_group_cputime(tsk, &sum); in thread_group_start_cputime() [all …]
|
D | itimer.c | 47 static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, in get_cpu_itimer() argument 51 struct cpu_itimer *it = &tsk->signal->it[clock_id]; in get_cpu_itimer() 53 spin_lock_irq(&tsk->sighand->siglock); in get_cpu_itimer() 60 thread_group_sample_cputime(tsk, samples); in get_cpu_itimer() 70 spin_unlock_irq(&tsk->sighand->siglock); in get_cpu_itimer() 78 struct task_struct *tsk = current; in do_getitimer() local 82 spin_lock_irq(&tsk->sighand->siglock); in do_getitimer() 83 value->it_value = itimer_get_remtime(&tsk->signal->real_timer); in do_getitimer() 85 ktime_to_timespec64(tsk->signal->it_real_incr); in do_getitimer() 86 spin_unlock_irq(&tsk->sighand->siglock); in do_getitimer() [all …]
|
D | tick-sched.c | 362 static void tick_nohz_kick_task(struct task_struct *tsk) in tick_nohz_kick_task() argument 378 if (!sched_task_on_rq(tsk)) in tick_nohz_kick_task() 394 cpu = task_cpu(tsk); in tick_nohz_kick_task() 482 void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit) in tick_nohz_dep_set_task() argument 484 if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) in tick_nohz_dep_set_task() 485 tick_nohz_kick_task(tsk); in tick_nohz_dep_set_task() 489 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit) in tick_nohz_dep_clear_task() argument 491 atomic_andnot(BIT(bit), &tsk->tick_dep_mask); in tick_nohz_dep_clear_task() 499 void tick_nohz_dep_set_signal(struct task_struct *tsk, in tick_nohz_dep_set_signal() argument 503 struct signal_struct *sig = tsk->signal; in tick_nohz_dep_set_signal() [all …]
|
/kernel/sched/ |
D | cputime.c | 323 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) in thread_group_cputime() argument 325 struct signal_struct *sig = tsk->signal; in thread_group_cputime() 339 if (same_thread_group(current, tsk)) in thread_group_cputime() 352 for_each_thread(tsk, t) { in thread_group_cputime() 452 void vtime_account_irq(struct task_struct *tsk, unsigned int offset) in vtime_account_irq() argument 457 vtime_account_hardirq(tsk); in vtime_account_irq() 459 vtime_account_softirq(tsk); in vtime_account_irq() 461 is_idle_task(tsk)) { in vtime_account_irq() 462 vtime_account_idle(tsk); in vtime_account_irq() 464 vtime_account_kernel(tsk); in vtime_account_irq() [all …]
|
D | cpuacct.c | 37 static inline struct cpuacct *task_ca(struct task_struct *tsk) in task_ca() argument 39 return css_ca(task_css(tsk, cpuacct_cgrp_id)); in task_ca() 334 void cpuacct_charge(struct task_struct *tsk, u64 cputime) in cpuacct_charge() argument 336 unsigned int cpu = task_cpu(tsk); in cpuacct_charge() 341 for (ca = task_ca(tsk); ca; ca = parent_ca(ca)) in cpuacct_charge() 350 void cpuacct_account_field(struct task_struct *tsk, int index, u64 val) in cpuacct_account_field() argument 354 for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca)) in cpuacct_account_field()
|
/kernel/futex/ |
D | core.c | 1029 static void futex_cleanup(struct task_struct *tsk) in futex_cleanup() argument 1031 if (unlikely(tsk->robust_list)) { in futex_cleanup() 1032 exit_robust_list(tsk); in futex_cleanup() 1033 tsk->robust_list = NULL; in futex_cleanup() 1037 if (unlikely(tsk->compat_robust_list)) { in futex_cleanup() 1038 compat_exit_robust_list(tsk); in futex_cleanup() 1039 tsk->compat_robust_list = NULL; in futex_cleanup() 1043 if (unlikely(!list_empty(&tsk->pi_state_list))) in futex_cleanup() 1044 exit_pi_state_list(tsk); in futex_cleanup() 1064 void futex_exit_recursive(struct task_struct *tsk) in futex_exit_recursive() argument [all …]
|
/kernel/dma/ |
D | map_benchmark.c | 101 struct task_struct **tsk; in do_map_benchmark() local 109 tsk = kmalloc_array(threads, sizeof(*tsk), GFP_KERNEL); in do_map_benchmark() 110 if (!tsk) in do_map_benchmark() 116 tsk[i] = kthread_create_on_node(map_benchmark_thread, map, in do_map_benchmark() 118 if (IS_ERR(tsk[i])) { in do_map_benchmark() 120 ret = PTR_ERR(tsk[i]); in do_map_benchmark() 125 kthread_bind_mask(tsk[i], cpu_mask); in do_map_benchmark() 136 get_task_struct(tsk[i]); in do_map_benchmark() 137 wake_up_process(tsk[i]); in do_map_benchmark() 144 ret = kthread_stop(tsk[i]); in do_map_benchmark() [all …]
|
/kernel/events/ |
D | hw_breakpoint_test.c | 31 static struct perf_event *register_test_bp(int cpu, struct task_struct *tsk, int idx) in register_test_bp() argument 42 return perf_event_create_kernel_counter(&attr, cpu, tsk, NULL, NULL); in register_test_bp() 65 static void fill_one_bp_slot(struct kunit *test, int *id, int cpu, struct task_struct *tsk) in fill_one_bp_slot() argument 67 struct perf_event *bp = register_test_bp(cpu, tsk, *id); in fill_one_bp_slot() 80 static bool fill_bp_slots(struct kunit *test, int *id, int cpu, struct task_struct *tsk, int skip) in fill_bp_slots() argument 83 fill_one_bp_slot(test, id, cpu, tsk); in fill_bp_slots() 95 struct task_struct *tsk; in get_other_task() local 100 tsk = kthread_create(dummy_kthread, NULL, "hw_breakpoint_dummy_task"); in get_other_task() 101 KUNIT_ASSERT_FALSE(test, IS_ERR(tsk)); in get_other_task() 102 __other_task = tsk; in get_other_task()
|
/kernel/trace/ |
D | rethook.c | 215 static unsigned long __rethook_find_ret_addr(struct task_struct *tsk, in __rethook_find_ret_addr() argument 222 node = tsk->rethooks.first; in __rethook_find_ret_addr() 254 unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame, in rethook_find_ret_addr() argument 263 if (WARN_ON_ONCE(tsk != current && task_is_running(tsk))) in rethook_find_ret_addr() 267 ret = __rethook_find_ret_addr(tsk, cur); in rethook_find_ret_addr()
|
/kernel/cgroup/ |
D | cpuset.c | 530 static void guarantee_online_cpus(struct task_struct *tsk, in guarantee_online_cpus() argument 533 const struct cpumask *possible_mask = task_cpu_possible_mask(tsk); in guarantee_online_cpus() 540 cs = task_cs(tsk); in guarantee_online_cpus() 586 struct task_struct *tsk) in cpuset_update_task_spread_flags() argument 592 task_set_spread_page(tsk); in cpuset_update_task_spread_flags() 594 task_clear_spread_page(tsk); in cpuset_update_task_spread_flags() 597 task_set_spread_slab(tsk); in cpuset_update_task_spread_flags() 599 task_clear_spread_slab(tsk); in cpuset_update_task_spread_flags() 2001 static void cpuset_change_task_nodemask(struct task_struct *tsk, in cpuset_change_task_nodemask() argument 2004 task_lock(tsk); in cpuset_change_task_nodemask() [all …]
|