/kernel/rcu/ |
D | update.c | 88 struct task_struct *t = current; in __rcu_read_unlock() local 90 if (t->rcu_read_lock_nesting != 1) { in __rcu_read_unlock() 91 --t->rcu_read_lock_nesting; in __rcu_read_unlock() 94 t->rcu_read_lock_nesting = INT_MIN; in __rcu_read_unlock() 96 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special.s))) in __rcu_read_unlock() 97 rcu_read_unlock_special(t); in __rcu_read_unlock() 99 t->rcu_read_lock_nesting = 0; in __rcu_read_unlock() 103 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); in __rcu_read_unlock() 494 static void check_holdout_task(struct task_struct *t, in check_holdout_task() argument 499 if (!ACCESS_ONCE(t->rcu_tasks_holdout) || in check_holdout_task() [all …]
|
D | tree_plugin.h | 161 struct task_struct *t = current; in rcu_preempt_note_context_switch() local 166 if (t->rcu_read_lock_nesting > 0 && in rcu_preempt_note_context_switch() 167 !t->rcu_read_unlock_special.b.blocked) { in rcu_preempt_note_context_switch() 174 t->rcu_read_unlock_special.b.blocked = true; in rcu_preempt_note_context_switch() 175 t->rcu_blocked_node = rnp; in rcu_preempt_note_context_switch() 196 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); in rcu_preempt_note_context_switch() 198 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); in rcu_preempt_note_context_switch() 199 rnp->gp_tasks = &t->rcu_node_entry; in rcu_preempt_note_context_switch() 205 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_note_context_switch() 207 rnp->gp_tasks = &t->rcu_node_entry; in rcu_preempt_note_context_switch() [all …]
|
D | srcu.c | 151 unsigned long t; in srcu_readers_seq_idx() local 154 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); in srcu_readers_seq_idx() 155 sum += t; in srcu_readers_seq_idx() 168 unsigned long t; in srcu_readers_active_idx() local 171 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); in srcu_readers_active_idx() 172 sum += t; in srcu_readers_active_idx()
|
/kernel/ |
D | kcov.c | 40 struct task_struct *t; member 49 struct task_struct *t; in __sanitizer_cov_trace_pc() local 52 t = current; in __sanitizer_cov_trace_pc() 57 if (!t || in_interrupt()) in __sanitizer_cov_trace_pc() 59 mode = READ_ONCE(t->kcov_mode); in __sanitizer_cov_trace_pc() 72 area = t->kcov_area; in __sanitizer_cov_trace_pc() 75 if (likely(pos < t->kcov_size)) { in __sanitizer_cov_trace_pc() 96 void kcov_task_init(struct task_struct *t) in kcov_task_init() argument 98 t->kcov_mode = KCOV_MODE_DISABLED; in kcov_task_init() 99 t->kcov_size = 0; in kcov_task_init() [all …]
|
D | softirq.c | 445 void __tasklet_schedule(struct tasklet_struct *t) in __tasklet_schedule() argument 450 t->next = NULL; in __tasklet_schedule() 451 *__this_cpu_read(tasklet_vec.tail) = t; in __tasklet_schedule() 452 __this_cpu_write(tasklet_vec.tail, &(t->next)); in __tasklet_schedule() 458 void __tasklet_hi_schedule(struct tasklet_struct *t) in __tasklet_hi_schedule() argument 463 t->next = NULL; in __tasklet_hi_schedule() 464 *__this_cpu_read(tasklet_hi_vec.tail) = t; in __tasklet_hi_schedule() 465 __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); in __tasklet_hi_schedule() 471 void __tasklet_hi_schedule_first(struct tasklet_struct *t) in __tasklet_hi_schedule_first() argument 475 t->next = __this_cpu_read(tasklet_hi_vec.head); in __tasklet_hi_schedule_first() [all …]
|
D | hung_task.c | 75 static void check_hung_task(struct task_struct *t, unsigned long timeout) in check_hung_task() argument 77 unsigned long switch_count = t->nvcsw + t->nivcsw; in check_hung_task() 83 if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP))) in check_hung_task() 94 if (switch_count != t->last_switch_count) { in check_hung_task() 95 t->last_switch_count = switch_count; in check_hung_task() 99 trace_sched_process_hang(t); in check_hung_task() 112 t->comm, t->pid, timeout); in check_hung_task() 119 sched_show_task(t); in check_hung_task() 120 debug_show_held_locks(t); in check_hung_task() 137 static bool rcu_lock_break(struct task_struct *g, struct task_struct *t) in rcu_lock_break() argument [all …]
|
D | signal.c | 56 static void __user *sig_handler(struct task_struct *t, int sig) in sig_handler() argument 58 return t->sighand->action[sig - 1].sa.sa_handler; in sig_handler() 68 static int sig_task_ignored(struct task_struct *t, int sig, bool force) in sig_task_ignored() argument 72 handler = sig_handler(t, sig); in sig_task_ignored() 74 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && in sig_task_ignored() 81 static int sig_ignored(struct task_struct *t, int sig, bool force) in sig_ignored() argument 88 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) in sig_ignored() 96 if (t->ptrace && sig != SIGKILL) in sig_ignored() 99 return sig_task_ignored(t, sig, force); in sig_ignored() 134 static int recalc_sigpending_tsk(struct task_struct *t) in recalc_sigpending_tsk() argument [all …]
|
D | capability.c | 294 bool has_ns_capability(struct task_struct *t, in has_ns_capability() argument 300 ret = security_capable(__task_cred(t), ns, cap); in has_ns_capability() 316 bool has_capability(struct task_struct *t, int cap) in has_capability() argument 318 return has_ns_capability(t, &init_user_ns, cap); in has_capability() 334 bool has_ns_capability_noaudit(struct task_struct *t, in has_ns_capability_noaudit() argument 340 ret = security_capable_noaudit(__task_cred(t), ns, cap); in has_ns_capability_noaudit() 358 bool has_capability_noaudit(struct task_struct *t, int cap) in has_capability_noaudit() argument 360 return has_ns_capability_noaudit(t, &init_user_ns, cap); in has_capability_noaudit()
|
D | futex_compat.c | 179 ktime_t t, *tp = NULL; in COMPAT_SYSCALL_DEFINE6() local 191 t = timespec_to_ktime(ts); in COMPAT_SYSCALL_DEFINE6() 193 t = ktime_add_safe(ktime_get(), t); in COMPAT_SYSCALL_DEFINE6() 194 tp = &t; in COMPAT_SYSCALL_DEFINE6()
|
D | audit.h | 323 extern int __audit_signal_info(int sig, struct task_struct *t); 324 static inline int audit_signal_info(int sig, struct task_struct *t) in audit_signal_info() argument 326 if (unlikely((audit_pid && t->tgid == audit_pid) || in audit_signal_info() 328 return __audit_signal_info(sig, t); in audit_signal_info() 334 #define audit_signal_info(s,t) AUDIT_DISABLED argument 335 #define audit_filter_inodes(t,c) AUDIT_DISABLED argument
|
D | tracepoint.c | 495 struct task_struct *p, *t; in syscall_regfunc() local 499 for_each_process_thread(p, t) { in syscall_regfunc() 500 set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); in syscall_regfunc() 509 struct task_struct *p, *t; in syscall_unregfunc() local 514 for_each_process_thread(p, t) { in syscall_unregfunc() 515 clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); in syscall_unregfunc()
|
D | panic.c | 273 const struct tnt *t = &tnts[i]; in print_tainted() local 274 *s++ = test_bit(t->bit, &tainted_mask) ? in print_tainted() 275 t->true : t->false; in print_tainted()
|
D | auditsc.c | 2033 struct timespec *t, unsigned int *serial) in auditsc_get_stamp() argument 2039 t->tv_sec = ctx->ctime.tv_sec; in auditsc_get_stamp() 2040 t->tv_nsec = ctx->ctime.tv_nsec; in auditsc_get_stamp() 2307 void __audit_ptrace(struct task_struct *t) in __audit_ptrace() argument 2311 context->target_pid = task_tgid_nr(t); in __audit_ptrace() 2312 context->target_auid = audit_get_loginuid(t); in __audit_ptrace() 2313 context->target_uid = task_uid(t); in __audit_ptrace() 2314 context->target_sessionid = audit_get_sessionid(t); in __audit_ptrace() 2315 security_task_getsecid(t, &context->target_sid); in __audit_ptrace() 2316 memcpy(context->target_comm, t->comm, TASK_COMM_LEN); in __audit_ptrace() [all …]
|
/kernel/sched/ |
D | stats.h | 51 static inline void sched_info_reset_dequeued(struct task_struct *t) in sched_info_reset_dequeued() argument 53 t->sched_info.last_queued = 0; in sched_info_reset_dequeued() 62 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) in sched_info_dequeued() argument 67 if (t->sched_info.last_queued) in sched_info_dequeued() 68 delta = now - t->sched_info.last_queued; in sched_info_dequeued() 69 sched_info_reset_dequeued(t); in sched_info_dequeued() 70 t->sched_info.run_delay += delta; in sched_info_dequeued() 80 static void sched_info_arrive(struct rq *rq, struct task_struct *t) in sched_info_arrive() argument 84 if (t->sched_info.last_queued) in sched_info_arrive() 85 delta = now - t->sched_info.last_queued; in sched_info_arrive() [all …]
|
D | cputime.c | 317 struct task_struct *t; in thread_group_cputime() local 331 for_each_thread(tsk, t) { in thread_group_cputime() 332 task_cputime(t, &utime, &stime); in thread_group_cputime() 335 times->sum_exec_runtime += task_sched_runtime(t); in thread_group_cputime() 782 void vtime_init_idle(struct task_struct *t, int cpu) in vtime_init_idle() argument 786 write_seqlock_irqsave(&t->vtime_seqlock, flags); in vtime_init_idle() 787 t->vtime_snap_whence = VTIME_SYS; in vtime_init_idle() 788 t->vtime_snap = sched_clock_cpu(cpu); in vtime_init_idle() 789 write_sequnlock_irqrestore(&t->vtime_seqlock, flags); in vtime_init_idle() 792 cputime_t task_gtime(struct task_struct *t) in task_gtime() argument [all …]
|
D | completion.c | 189 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); in wait_for_completion_interruptible() local 190 if (t == -ERESTARTSYS) in wait_for_completion_interruptible() 191 return t; in wait_for_completion_interruptible() 226 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); in wait_for_completion_killable() local 227 if (t == -ERESTARTSYS) in wait_for_completion_killable() 228 return t; in wait_for_completion_killable()
|
/kernel/time/ |
D | timeconst.bc | 4 auto t; 6 t = b; 8 a = t; 64 print "#define HZ_TO_MSEC_SHR32\t", s, "\n" 71 print "#define MSEC_TO_HZ_SHR32\t", s, "\n" 75 print "#define HZ_TO_MSEC_NUM\t\t", 1000/cd, "\n" 76 print "#define HZ_TO_MSEC_DEN\t\t", hz/cd, "\n" 77 print "#define MSEC_TO_HZ_NUM\t\t", hz/cd, "\n" 78 print "#define MSEC_TO_HZ_DEN\t\t", 1000/cd, "\n" 86 print "#define HZ_TO_USEC_SHR32\t", s, "\n" [all …]
|
D | hrtimer.c | 257 # define switch_hrtimer_base(t, b, p) (b) argument 1484 struct hrtimer_sleeper *t = in hrtimer_wakeup() local 1486 struct task_struct *task = t->task; in hrtimer_wakeup() 1488 t->task = NULL; in hrtimer_wakeup() 1502 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) in do_nanosleep() argument 1504 hrtimer_init_sleeper(t, current); in do_nanosleep() 1508 hrtimer_start_expires(&t->timer, mode); in do_nanosleep() 1509 if (!hrtimer_active(&t->timer)) in do_nanosleep() 1510 t->task = NULL; in do_nanosleep() 1512 if (likely(t->task)) in do_nanosleep() [all …]
|
D | itimer.c | 57 cputime_t t; in get_cpu_itimer() local 61 t = cputime.utime + cputime.stime; in get_cpu_itimer() 64 t = cputime.utime; in get_cpu_itimer() 66 if (cval < t) in get_cpu_itimer() 70 cval = cval - t; in get_cpu_itimer() 187 #define timeval_valid(t) \ argument 188 (((t)->tv_sec >= 0) && (((unsigned long) (t)->tv_usec) < USEC_PER_SEC))
|
/kernel/trace/ |
D | trace_probe.c | 318 static int parse_probe_vars(char *arg, const struct fetch_type *t, in parse_probe_vars() argument 327 f->fn = t->fetch[FETCH_MTD_retval]; in parse_probe_vars() 332 if (strcmp(t->name, DEFAULT_FETCH_TYPE_STR)) in parse_probe_vars() 344 f->fn = t->fetch[FETCH_MTD_stack]; in parse_probe_vars() 356 static int parse_probe_arg(char *arg, const struct fetch_type *t, in parse_probe_arg() argument 370 ret = parse_probe_vars(arg + 1, t, f, is_return, is_kprobe); in parse_probe_arg() 376 f->fn = t->fetch[FETCH_MTD_reg]; in parse_probe_arg() 388 f->fn = t->fetch[FETCH_MTD_memory]; in parse_probe_arg() 399 f->fn = t->fetch[FETCH_MTD_file_offset]; in parse_probe_arg() 412 f->fn = t->fetch[FETCH_MTD_symbol]; in parse_probe_arg() [all …]
|
D | blktrace.c | 71 struct blk_io_trace *t; in trace_note() local 82 sizeof(*t) + len, in trace_note() 86 t = ring_buffer_event_data(event); in trace_note() 93 t = relay_reserve(bt->rchan, sizeof(*t) + len); in trace_note() 94 if (t) { in trace_note() 95 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; in trace_note() 96 t->time = ktime_to_ns(ktime_get()); in trace_note() 98 t->device = bt->dev; in trace_note() 99 t->action = action; in trace_note() 100 t->pid = pid; in trace_note() [all …]
|
D | trace_mmiotrace.c | 179 unsigned long long t = ns2usecs(iter->ts); in mmio_print_rw() local 180 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_rw() 181 unsigned secs = (unsigned long)t; in mmio_print_rw() 226 unsigned long long t = ns2usecs(iter->ts); in mmio_print_map() local 227 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_map() 228 unsigned secs = (unsigned long)t; in mmio_print_map() 262 unsigned long long t = ns2usecs(iter->ts); in mmio_print_mark() local 263 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_mark() 264 unsigned secs = (unsigned long)t; in mmio_print_mark()
|
D | trace.c | 1166 struct tracer *t; in register_tracer() local 1183 for (t = trace_types; t; t = t->next) { in register_tracer() 1184 if (strcmp(type->name, t->name) == 0) { in register_tracer() 3277 trace_ok_for_array(struct tracer *t, struct trace_array *tr) in trace_ok_for_array() argument 3279 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; in trace_ok_for_array() 3284 get_tracer_for_array(struct trace_array *tr, struct tracer *t) in get_tracer_for_array() argument 3286 while (t && !trace_ok_for_array(t, tr)) in get_tracer_for_array() 3287 t = t->next; in get_tracer_for_array() 3289 return t; in get_tracer_for_array() 3296 struct tracer *t = v; in t_next() local [all …]
|
/kernel/bpf/ |
D | verifier.c | 246 enum bpf_reg_type t; in print_verifier_state() local 250 t = env->cur_state.regs[i].type; in print_verifier_state() 251 if (t == NOT_INIT) in print_verifier_state() 253 verbose(" R%d=%s", i, reg_type_str[t]); in print_verifier_state() 254 if (t == CONST_IMM || t == PTR_TO_STACK) in print_verifier_state() 256 else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || in print_verifier_state() 257 t == PTR_TO_MAP_VALUE_OR_NULL) in print_verifier_state() 497 enum reg_arg_type t) in check_reg_arg() argument 504 if (t == SRC_OP) { in check_reg_arg() 516 if (t == DST_OP) in check_reg_arg() [all …]
|
/kernel/events/ |
D | uprobes.c | 1028 struct rb_node *n, *t; in build_probe_list() local 1038 for (t = n; t; t = rb_prev(t)) { in build_probe_list() 1039 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list() 1045 for (t = n; (t = rb_next(t)); ) { in build_probe_list() 1046 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list() 1384 void uprobe_free_utask(struct task_struct *t) in uprobe_free_utask() argument 1386 struct uprobe_task *utask = t->utask; in uprobe_free_utask() 1404 xol_free_insn_slot(t); in uprobe_free_utask() 1406 t->utask = NULL; in uprobe_free_utask() 1424 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) in dup_utask() argument [all …]
|