/kernel/bpf/ |
D | btf.c | 223 const struct btf_type *t; member 281 const struct btf_type *t, 294 const struct btf_type *t); 295 void (*seq_show)(const struct btf *btf, const struct btf_type *t, 304 const struct btf_type *t, u32 type_id); 306 static bool btf_type_is_modifier(const struct btf_type *t) in btf_type_is_modifier() argument 318 switch (BTF_INFO_KIND(t->info)) { in btf_type_is_modifier() 329 bool btf_type_is_void(const struct btf_type *t) in btf_type_is_void() argument 331 return t == &btf_void; in btf_type_is_void() 334 static bool btf_type_is_fwd(const struct btf_type *t) in btf_type_is_fwd() argument [all …]
|
/kernel/ |
D | kcov.c | 62 struct task_struct *t; member 145 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) in check_kcov_mode() argument 155 mode = READ_ONCE(t->kcov_mode); in check_kcov_mode() 181 struct task_struct *t; in __sanitizer_cov_trace_pc() local 186 t = current; in __sanitizer_cov_trace_pc() 187 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t)) in __sanitizer_cov_trace_pc() 190 area = t->kcov_area; in __sanitizer_cov_trace_pc() 193 if (likely(pos < t->kcov_size)) { in __sanitizer_cov_trace_pc() 203 struct task_struct *t; in write_comp_data() local 207 t = current; in write_comp_data() [all …]
|
D | rseq.c | 84 static int rseq_update_cpu_id(struct task_struct *t) in rseq_update_cpu_id() argument 88 if (put_user(cpu_id, &t->rseq->cpu_id_start)) in rseq_update_cpu_id() 90 if (put_user(cpu_id, &t->rseq->cpu_id)) in rseq_update_cpu_id() 92 trace_rseq_update(t); in rseq_update_cpu_id() 96 static int rseq_reset_rseq_cpu_id(struct task_struct *t) in rseq_reset_rseq_cpu_id() argument 103 if (put_user(cpu_id_start, &t->rseq->cpu_id_start)) in rseq_reset_rseq_cpu_id() 110 if (put_user(cpu_id, &t->rseq->cpu_id)) in rseq_reset_rseq_cpu_id() 115 static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) in rseq_get_rseq_cs() argument 123 if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr))) in rseq_get_rseq_cs() 161 static int rseq_need_restart(struct task_struct *t, u32 cs_flags) in rseq_need_restart() argument [all …]
|
D | hung_task.c | 88 static void check_hung_task(struct task_struct *t, unsigned long timeout) in check_hung_task() argument 90 unsigned long switch_count = t->nvcsw + t->nivcsw; in check_hung_task() 96 if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP))) in check_hung_task() 107 if (switch_count != t->last_switch_count) { in check_hung_task() 108 t->last_switch_count = switch_count; in check_hung_task() 109 t->last_switch_time = jiffies; in check_hung_task() 112 if (time_is_after_jiffies(t->last_switch_time + timeout * HZ)) in check_hung_task() 115 trace_sched_process_hang(t); in check_hung_task() 131 t->comm, t->pid, (jiffies - t->last_switch_time) / HZ); in check_hung_task() 138 sched_show_task(t); in check_hung_task() [all …]
|
D | softirq.c | 471 static void __tasklet_schedule_common(struct tasklet_struct *t, in __tasklet_schedule_common() argument 480 t->next = NULL; in __tasklet_schedule_common() 481 *head->tail = t; in __tasklet_schedule_common() 482 head->tail = &(t->next); in __tasklet_schedule_common() 487 void __tasklet_schedule(struct tasklet_struct *t) in __tasklet_schedule() argument 489 __tasklet_schedule_common(t, &tasklet_vec, in __tasklet_schedule() 494 void __tasklet_hi_schedule(struct tasklet_struct *t) in __tasklet_hi_schedule() argument 496 __tasklet_schedule_common(t, &tasklet_hi_vec, in __tasklet_hi_schedule() 514 struct tasklet_struct *t = list; in tasklet_action_common() local 518 if (tasklet_trylock(t)) { in tasklet_action_common() [all …]
|
D | signal.c | 67 static void __user *sig_handler(struct task_struct *t, int sig) in sig_handler() argument 69 return t->sighand->action[sig - 1].sa.sa_handler; in sig_handler() 79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force) in sig_task_ignored() argument 83 handler = sig_handler(t, sig); in sig_task_ignored() 86 if (unlikely(is_global_init(t) && sig_kernel_only(sig))) in sig_task_ignored() 89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && in sig_task_ignored() 94 if (unlikely((t->flags & PF_KTHREAD) && in sig_task_ignored() 101 static bool sig_ignored(struct task_struct *t, int sig, bool force) in sig_ignored() argument 108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) in sig_ignored() 116 if (t->ptrace && sig != SIGKILL) in sig_ignored() [all …]
|
D | capability.c | 294 bool has_ns_capability(struct task_struct *t, in has_ns_capability() argument 300 ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NONE); in has_ns_capability() 316 bool has_capability(struct task_struct *t, int cap) in has_capability() argument 318 return has_ns_capability(t, &init_user_ns, cap); in has_capability() 335 bool has_ns_capability_noaudit(struct task_struct *t, in has_ns_capability_noaudit() argument 341 ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NOAUDIT); in has_ns_capability_noaudit() 359 bool has_capability_noaudit(struct task_struct *t, int cap) in has_capability_noaudit() argument 361 return has_ns_capability_noaudit(t, &init_user_ns, cap); in has_capability_noaudit()
|
/kernel/time/ |
D | timeconst.bc | 6 auto t; 8 t = b; 10 a = t; 66 print "#define HZ_TO_MSEC_SHR32\t", s, "\n" 73 print "#define MSEC_TO_HZ_SHR32\t", s, "\n" 77 print "#define HZ_TO_MSEC_NUM\t\t", 1000/cd, "\n" 78 print "#define HZ_TO_MSEC_DEN\t\t", hz/cd, "\n" 79 print "#define MSEC_TO_HZ_NUM\t\t", hz/cd, "\n" 80 print "#define MSEC_TO_HZ_DEN\t\t", 1000/cd, "\n" 88 print "#define HZ_TO_USEC_SHR32\t", s, "\n" [all …]
|
D | posix-stubs.c | 128 struct timespec64 t; in SYSCALL_DEFINE4() local 139 if (get_timespec64(&t, rqtp)) in SYSCALL_DEFINE4() 141 if (!timespec64_valid(&t)) in SYSCALL_DEFINE4() 147 return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ? in SYSCALL_DEFINE4() 214 struct timespec64 t; in SYSCALL_DEFINE4() local 225 if (get_old_timespec32(&t, rqtp)) in SYSCALL_DEFINE4() 227 if (!timespec64_valid(&t)) in SYSCALL_DEFINE4() 233 return hrtimer_nanosleep(&t, flags & TIMER_ABSTIME ? in SYSCALL_DEFINE4()
|
D | hrtimer.c | 287 # define switch_hrtimer_base(t, b, p) (b) argument 1763 struct hrtimer_sleeper *t = in hrtimer_wakeup() local 1765 struct task_struct *task = t->task; in hrtimer_wakeup() 1767 t->task = NULL; in hrtimer_wakeup() 1865 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) in do_nanosleep() argument 1871 hrtimer_sleeper_start_expires(t, mode); in do_nanosleep() 1873 if (likely(t->task)) in do_nanosleep() 1876 hrtimer_cancel(&t->timer); in do_nanosleep() 1879 } while (t->task && !signal_pending(current)); in do_nanosleep() 1883 if (!t->task) in do_nanosleep() [all …]
|
D | timekeeping_debug.c | 46 void tk_debug_account_sleep_time(const struct timespec64 *t) in tk_debug_account_sleep_time() argument 49 int bin = min(fls(t->tv_sec), NUM_BINS-1); in tk_debug_account_sleep_time() 53 (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC); in tk_debug_account_sleep_time()
|
D | itimer.c | 58 u64 t, samples[CPUCLOCK_MAX]; in get_cpu_itimer() local 61 t = samples[clock_id]; in get_cpu_itimer() 63 if (val < t) in get_cpu_itimer() 67 val -= t; in get_cpu_itimer() 182 #define timeval_valid(t) \ argument 183 (((t)->tv_sec >= 0) && (((unsigned long) (t)->tv_usec) < USEC_PER_SEC))
|
/kernel/sched/ |
D | stats.h | 145 static inline void sched_info_reset_dequeued(struct task_struct *t) in sched_info_reset_dequeued() argument 147 t->sched_info.last_queued = 0; in sched_info_reset_dequeued() 156 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) in sched_info_dequeued() argument 161 if (t->sched_info.last_queued) in sched_info_dequeued() 162 delta = now - t->sched_info.last_queued; in sched_info_dequeued() 164 sched_info_reset_dequeued(t); in sched_info_dequeued() 165 t->sched_info.run_delay += delta; in sched_info_dequeued() 175 static void sched_info_arrive(struct rq *rq, struct task_struct *t) in sched_info_arrive() argument 179 if (t->sched_info.last_queued) in sched_info_arrive() 180 delta = now - t->sched_info.last_queued; in sched_info_arrive() [all …]
|
D | psi.c | 493 struct psi_trigger *t; in init_triggers() local 495 list_for_each_entry(t, &group->triggers, node) in init_triggers() 496 window_reset(&t->win, now, in init_triggers() 497 group->total[PSI_POLL][t->state], 0); in init_triggers() 505 struct psi_trigger *t; in update_triggers() local 513 list_for_each_entry(t, &group->triggers, node) { in update_triggers() 517 if (group->polling_total[t->state] == total[t->state]) in update_triggers() 529 growth = window_update(&t->win, now, total[t->state]); in update_triggers() 530 if (growth < t->threshold) in update_triggers() 534 if (now < t->last_event_time + t->win.size) in update_triggers() [all …]
|
D | cputime.c | 277 static inline u64 read_sum_exec_runtime(struct task_struct *t) in read_sum_exec_runtime() argument 279 return t->se.sum_exec_runtime; in read_sum_exec_runtime() 282 static u64 read_sum_exec_runtime(struct task_struct *t) in read_sum_exec_runtime() argument 288 rq = task_rq_lock(t, &rf); in read_sum_exec_runtime() 289 ns = t->se.sum_exec_runtime; in read_sum_exec_runtime() 290 task_rq_unlock(rq, t, &rf); in read_sum_exec_runtime() 304 struct task_struct *t; in thread_group_cputime() local 329 for_each_thread(tsk, t) { in thread_group_cputime() 330 task_cputime(t, &utime, &stime); in thread_group_cputime() 333 times->sum_exec_runtime += read_sum_exec_runtime(t); in thread_group_cputime() [all …]
|
D | completion.c | 204 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); in wait_for_completion_interruptible() local 205 if (t == -ERESTARTSYS) in wait_for_completion_interruptible() 206 return t; in wait_for_completion_interruptible() 241 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); in wait_for_completion_killable() local 242 if (t == -ERESTARTSYS) in wait_for_completion_killable() 243 return t; in wait_for_completion_killable()
|
/kernel/rcu/ |
D | update.c | 598 static void check_holdout_task(struct task_struct *t, in check_holdout_task() argument 603 if (!READ_ONCE(t->rcu_tasks_holdout) || in check_holdout_task() 604 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || in check_holdout_task() 605 !READ_ONCE(t->on_rq) || in check_holdout_task() 607 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { in check_holdout_task() 608 WRITE_ONCE(t->rcu_tasks_holdout, false); in check_holdout_task() 609 list_del_init(&t->rcu_tasks_holdout_list); in check_holdout_task() 610 put_task_struct(t); in check_holdout_task() 613 rcu_request_urgent_qs_task(t); in check_holdout_task() 620 cpu = task_cpu(t); in check_holdout_task() [all …]
|
D | tree_plugin.h | 85 static void rcu_read_unlock_special(struct task_struct *t); 137 struct task_struct *t = current; in rcu_preempt_ctxt_queue() local 164 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue() 182 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue() 195 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue() 206 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue() 223 rnp->gp_tasks = &t->rcu_node_entry; in rcu_preempt_ctxt_queue() 227 rnp->exp_tasks = &t->rcu_node_entry; in rcu_preempt_ctxt_queue() 287 struct task_struct *t = current; in rcu_note_context_switch() local 293 WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); in rcu_note_context_switch() [all …]
|
D | tree_exp.h | 606 struct task_struct *t = current; in rcu_exp_handler() local 613 if (!t->rcu_read_lock_nesting) { in rcu_exp_handler() 619 set_tsk_need_resched(t); in rcu_exp_handler() 637 if (t->rcu_read_lock_nesting > 0) { in rcu_exp_handler() 641 t->rcu_read_unlock_special.b.exp_hint = true; in rcu_exp_handler() 666 rcu_preempt_deferred_qs(t); in rcu_exp_handler() 668 set_tsk_need_resched(t); in rcu_exp_handler() 685 struct task_struct *t; in rcu_print_task_exp_stall() local 690 t = list_entry(rnp->exp_tasks->prev, in rcu_print_task_exp_stall() 692 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_exp_stall() [all …]
|
/kernel/trace/ |
D | fgraph.c | 347 struct task_struct *g, *t; in alloc_retstack_tasklist() local 363 do_each_thread(g, t) { in alloc_retstack_tasklist() 369 if (t->ret_stack == NULL) { in alloc_retstack_tasklist() 370 atomic_set(&t->tracing_graph_pause, 0); in alloc_retstack_tasklist() 371 atomic_set(&t->trace_overrun, 0); in alloc_retstack_tasklist() 372 t->curr_ret_stack = -1; in alloc_retstack_tasklist() 373 t->curr_ret_depth = -1; in alloc_retstack_tasklist() 376 t->ret_stack = ret_stack_list[start++]; in alloc_retstack_tasklist() 378 } while_each_thread(g, t); in alloc_retstack_tasklist() 463 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) in graph_init_task() argument [all …]
|
D | blktrace.c | 70 struct blk_io_trace *t; in trace_note() local 82 sizeof(*t) + len + cgid_len, in trace_note() 86 t = ring_buffer_event_data(event); in trace_note() 93 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len); in trace_note() 94 if (t) { in trace_note() 95 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; in trace_note() 96 t->time = ktime_to_ns(ktime_get()); in trace_note() 98 t->device = bt->dev; in trace_note() 99 t->action = action | (cgid ? __BLK_TN_CGROUP : 0); in trace_note() 100 t->pid = pid; in trace_note() [all …]
|
D | trace_probe.c | 272 static int parse_probe_vars(char *arg, const struct fetch_type *t, in parse_probe_vars() argument 509 const struct fetch_type *t, in __parse_bitfield_probe_arg() argument 535 code->lshift = BYTES_TO_BITS(t->size) - (bw + bo); in __parse_bitfield_probe_arg() 536 code->rshift = BYTES_TO_BITS(t->size) - bw; in __parse_bitfield_probe_arg() 537 code->basesize = t->size; in __parse_bitfield_probe_arg() 539 return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0; in __parse_bitfield_probe_arg() 547 char *t, *t2, *t3; in traceprobe_parse_probe_arg_body() local 563 t = strchr(arg, ':'); in traceprobe_parse_probe_arg_body() 564 if (t) { in traceprobe_parse_probe_arg_body() 565 *t = '\0'; in traceprobe_parse_probe_arg_body() [all …]
|
D | trace_mmiotrace.c | 173 unsigned long long t = ns2usecs(iter->ts); in mmio_print_rw() local 174 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_rw() 175 unsigned secs = (unsigned long)t; in mmio_print_rw() 218 unsigned long long t = ns2usecs(iter->ts); in mmio_print_map() local 219 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_map() 220 unsigned secs = (unsigned long)t; in mmio_print_map() 252 unsigned long long t = ns2usecs(iter->ts); in mmio_print_mark() local 253 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_mark() 254 unsigned secs = (unsigned long)t; in mmio_print_mark()
|
D | trace_probe.h | 171 #define __DEFAULT_FETCH_TYPE(t) x##t argument 172 #define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t) argument 176 #define __ADDR_FETCH_TYPE(t) u##t argument 177 #define _ADDR_FETCH_TYPE(t) __ADDR_FETCH_TYPE(t) argument
|
/kernel/events/ |
D | uprobes.c | 1300 struct rb_node *n, *t; in build_probe_list() local 1310 for (t = n; t; t = rb_prev(t)) { in build_probe_list() 1311 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list() 1317 for (t = n; (t = rb_next(t)); ) { in build_probe_list() 1318 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list() 1718 void uprobe_free_utask(struct task_struct *t) in uprobe_free_utask() argument 1720 struct uprobe_task *utask = t->utask; in uprobe_free_utask() 1733 xol_free_insn_slot(t); in uprobe_free_utask() 1735 t->utask = NULL; in uprobe_free_utask() 1753 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) in dup_utask() argument [all …]
|