/kernel/bpf/ |
D | btf.c | 256 const struct btf_type *t; member 316 const char *btf_type_str(const struct btf_type *t) in btf_type_str() argument 318 return btf_kind_str[BTF_INFO_KIND(t->info)]; in btf_type_str() 408 const struct btf_type *t, 421 const struct btf_type *t); 422 void (*show)(const struct btf *btf, const struct btf_type *t, 431 const struct btf_type *t, u32 type_id); 434 const struct btf_type *t); 436 static bool btf_type_is_modifier(const struct btf_type *t) in btf_type_is_modifier() argument 448 switch (BTF_INFO_KIND(t->info)) { in btf_type_is_modifier() [all …]
|
D | helpers.c | 1111 struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); in bpf_timer_cb() local 1112 struct bpf_map *map = t->map; in bpf_timer_cb() 1113 void *value = t->value; in bpf_timer_cb() 1119 callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held()); in bpf_timer_cb() 1129 this_cpu_write(hrtimer_running, t); in bpf_timer_cb() 1152 struct bpf_hrtimer *t; in BPF_CALL_3() local 1169 t = timer->timer; in BPF_CALL_3() 1170 if (t) { in BPF_CALL_3() 1175 t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node); in BPF_CALL_3() 1176 if (!t) { in BPF_CALL_3() [all …]
|
D | bpf_struct_ops.c | 109 const struct btf_type *t; in bpf_struct_ops_init() local 152 t = btf_type_by_id(btf, type_id); in bpf_struct_ops_init() 153 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) { in bpf_struct_ops_init() 155 btf_type_vlen(t), st_ops->name); in bpf_struct_ops_init() 159 for_each_member(j, t, member) { in bpf_struct_ops_init() 169 if (__btf_member_bitfield_size(t, member)) { in bpf_struct_ops_init() 188 if (j == btf_type_vlen(t)) { in bpf_struct_ops_init() 194 st_ops->type = t; in bpf_struct_ops_init() 282 const struct btf_type *t = st_map->st_ops->type; in bpf_struct_ops_map_put_progs() local 285 for (i = 0; i < btf_type_vlen(t); i++) { in bpf_struct_ops_map_put_progs() [all …]
|
/kernel/rcu/ |
D | tasks.h | 18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop); 587 struct task_struct *t; in rcu_spawn_tasks_kthread_generic() local 589 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); in rcu_spawn_tasks_kthread_generic() 590 …if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavio… in rcu_spawn_tasks_kthread_generic() 653 static void exit_tasks_rcu_finish_trace(struct task_struct *t); 672 struct task_struct *t; in rcu_tasks_wait_gp() local 686 for_each_process_thread(g, t) in rcu_tasks_wait_gp() 687 rtp->pertask_func(t, &holdouts); in rcu_tasks_wait_gp() 826 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) in rcu_tasks_pertask() argument 828 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { in rcu_tasks_pertask() [all …]
|
D | tree_plugin.h | 106 static void rcu_read_unlock_special(struct task_struct *t); 158 struct task_struct *t = current; in rcu_preempt_ctxt_queue() local 185 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue() 203 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue() 216 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue() 227 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue() 244 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue() 248 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue() 314 struct task_struct *t = current; in rcu_note_context_switch() local 322 !t->rcu_read_unlock_special.b.blocked) { in rcu_note_context_switch() [all …]
|
D | tree_stall.h | 246 struct task_struct *t; in rcu_print_detail_task_stall_rnp() local 253 t = list_entry(rnp->gp_tasks->prev, in rcu_print_detail_task_stall_rnp() 255 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_detail_task_stall_rnp() 261 sched_show_task(t); in rcu_print_detail_task_stall_rnp() 277 static int check_slow_task(struct task_struct *t, void *arg) in check_slow_task() argument 281 if (task_curr(t)) in check_slow_task() 283 rscrp->nesting = t->rcu_read_lock_nesting; in check_slow_task() 284 rscrp->rs = t->rcu_read_unlock_special; in check_slow_task() 285 rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry); in check_slow_task() 299 struct task_struct *t; in rcu_print_task_stall() local [all …]
|
/kernel/ |
D | kcov.c | 63 struct task_struct *t; member 164 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) in check_kcov_mode() argument 173 if (!in_task() && !(in_serving_softirq() && t->kcov_softirq)) in check_kcov_mode() 175 mode = READ_ONCE(t->kcov_mode); in check_kcov_mode() 201 struct task_struct *t; in __sanitizer_cov_trace_pc() local 206 t = current; in __sanitizer_cov_trace_pc() 207 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t)) in __sanitizer_cov_trace_pc() 210 area = t->kcov_area; in __sanitizer_cov_trace_pc() 213 if (likely(pos < t->kcov_size)) { in __sanitizer_cov_trace_pc() 231 struct task_struct *t; in write_comp_data() local [all …]
|
D | softirq.c | 783 static void __tasklet_schedule_common(struct tasklet_struct *t, in __tasklet_schedule_common() argument 792 t->next = NULL; in __tasklet_schedule_common() 793 *head->tail = t; in __tasklet_schedule_common() 794 head->tail = &(t->next); in __tasklet_schedule_common() 799 void __tasklet_schedule(struct tasklet_struct *t) in __tasklet_schedule() argument 801 __tasklet_schedule_common(t, &tasklet_vec, in __tasklet_schedule() 806 void __tasklet_hi_schedule(struct tasklet_struct *t) in __tasklet_hi_schedule() argument 808 __tasklet_schedule_common(t, &tasklet_hi_vec, in __tasklet_hi_schedule() 813 static bool tasklet_clear_sched(struct tasklet_struct *t) in tasklet_clear_sched() argument 815 if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) { in tasklet_clear_sched() [all …]
|
D | hung_task.c | 92 static void check_hung_task(struct task_struct *t, unsigned long timeout) in check_hung_task() argument 94 unsigned long switch_count = t->nvcsw + t->nivcsw; in check_hung_task() 100 if (unlikely(READ_ONCE(t->__state) & TASK_FROZEN)) in check_hung_task() 111 if (switch_count != t->last_switch_count) { in check_hung_task() 112 t->last_switch_count = switch_count; in check_hung_task() 113 t->last_switch_time = jiffies; in check_hung_task() 116 if (time_is_after_jiffies(t->last_switch_time + timeout * HZ)) in check_hung_task() 119 trace_sched_process_hang(t); in check_hung_task() 135 t->comm, t->pid, (jiffies - t->last_switch_time) / HZ); in check_hung_task() 142 sched_show_task(t); in check_hung_task() [all …]
|
D | rseq.c | 85 static int rseq_update_cpu_id(struct task_struct *t) in rseq_update_cpu_id() argument 88 struct rseq __user *rseq = t->rseq; in rseq_update_cpu_id() 95 trace_rseq_update(t); in rseq_update_cpu_id() 104 static int rseq_reset_rseq_cpu_id(struct task_struct *t) in rseq_reset_rseq_cpu_id() argument 111 if (put_user(cpu_id_start, &t->rseq->cpu_id_start)) in rseq_reset_rseq_cpu_id() 118 if (put_user(cpu_id, &t->rseq->cpu_id)) in rseq_reset_rseq_cpu_id() 123 static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) in rseq_get_rseq_cs() argument 132 if (get_user(ptr, &t->rseq->rseq_cs)) in rseq_get_rseq_cs() 135 if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr))) in rseq_get_rseq_cs() 189 static int rseq_need_restart(struct task_struct *t, u32 cs_flags) in rseq_need_restart() argument [all …]
|
D | signal.c | 71 static void __user *sig_handler(struct task_struct *t, int sig) in sig_handler() argument 73 return t->sighand->action[sig - 1].sa.sa_handler; in sig_handler() 83 static bool sig_task_ignored(struct task_struct *t, int sig, bool force) in sig_task_ignored() argument 87 handler = sig_handler(t, sig); in sig_task_ignored() 90 if (unlikely(is_global_init(t) && sig_kernel_only(sig))) in sig_task_ignored() 93 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && in sig_task_ignored() 98 if (unlikely((t->flags & PF_KTHREAD) && in sig_task_ignored() 105 static bool sig_ignored(struct task_struct *t, int sig, bool force) in sig_ignored() argument 112 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) in sig_ignored() 120 if (t->ptrace && sig != SIGKILL) in sig_ignored() [all …]
|
D | capability.c | 294 bool has_ns_capability(struct task_struct *t, in has_ns_capability() argument 300 ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NONE); in has_ns_capability() 316 bool has_capability(struct task_struct *t, int cap) in has_capability() argument 318 return has_ns_capability(t, &init_user_ns, cap); in has_capability() 335 bool has_ns_capability_noaudit(struct task_struct *t, in has_ns_capability_noaudit() argument 341 ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NOAUDIT); in has_ns_capability_noaudit() 359 bool has_capability_noaudit(struct task_struct *t, int cap) in has_capability_noaudit() argument 361 return has_ns_capability_noaudit(t, &init_user_ns, cap); in has_capability_noaudit()
|
/kernel/time/ |
D | timeconst.bc | 6 auto t; 8 t = b; 10 a = t; 66 print "#define HZ_TO_MSEC_SHR32\t", s, "\n" 73 print "#define MSEC_TO_HZ_SHR32\t", s, "\n" 77 print "#define HZ_TO_MSEC_NUM\t\t", 1000/cd, "\n" 78 print "#define HZ_TO_MSEC_DEN\t\t", hz/cd, "\n" 79 print "#define MSEC_TO_HZ_NUM\t\t", hz/cd, "\n" 80 print "#define MSEC_TO_HZ_DEN\t\t", 1000/cd, "\n" 88 print "#define HZ_TO_USEC_SHR32\t", s, "\n" [all …]
|
D | posix-stubs.c | 132 struct timespec64 t; in SYSCALL_DEFINE4() local 144 if (get_timespec64(&t, rqtp)) in SYSCALL_DEFINE4() 146 if (!timespec64_valid(&t)) in SYSCALL_DEFINE4() 153 texp = timespec64_to_ktime(t); in SYSCALL_DEFINE4() 226 struct timespec64 t; in SYSCALL_DEFINE4() local 238 if (get_old_timespec32(&t, rqtp)) in SYSCALL_DEFINE4() 240 if (!timespec64_valid(&t)) in SYSCALL_DEFINE4() 247 texp = timespec64_to_ktime(t); in SYSCALL_DEFINE4()
|
D | hrtimer.c | 291 # define switch_hrtimer_base(t, b, p) (b) argument 1933 struct hrtimer_sleeper *t = in hrtimer_wakeup() local 1935 struct task_struct *task = t->task; in hrtimer_wakeup() 1937 t->task = NULL; in hrtimer_wakeup() 2035 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) in do_nanosleep() argument 2041 hrtimer_sleeper_start_expires(t, mode); in do_nanosleep() 2043 if (likely(t->task)) in do_nanosleep() 2046 hrtimer_cancel(&t->timer); in do_nanosleep() 2049 } while (t->task && !signal_pending(current)); in do_nanosleep() 2053 if (!t->task) in do_nanosleep() [all …]
|
D | timekeeping_debug.c | 46 void tk_debug_account_sleep_time(const struct timespec64 *t) in tk_debug_account_sleep_time() argument 49 int bin = min(fls(t->tv_sec), NUM_BINS-1); in tk_debug_account_sleep_time() 53 (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC); in tk_debug_account_sleep_time()
|
/kernel/sched/ |
D | psi.c | 178 static void poll_timer_fn(struct timer_list *t); 497 struct psi_trigger *t; in init_triggers() local 499 list_for_each_entry(t, &group->triggers, node) in init_triggers() 500 window_reset(&t->win, now, in init_triggers() 501 group->total[PSI_POLL][t->state], 0); in init_triggers() 509 struct psi_trigger *t; in update_triggers() local 517 list_for_each_entry(t, &group->triggers, node) { in update_triggers() 521 new_stall = group->polling_total[t->state] != total[t->state]; in update_triggers() 524 if (!new_stall && !t->pending_event) in update_triggers() 542 growth = window_update(&t->win, now, total[t->state]); in update_triggers() [all …]
|
D | stats.h | 219 static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t) in sched_info_dequeue() argument 223 if (!t->sched_info.last_queued) in sched_info_dequeue() 226 delta = rq_clock(rq) - t->sched_info.last_queued; in sched_info_dequeue() 227 t->sched_info.last_queued = 0; in sched_info_dequeue() 228 t->sched_info.run_delay += delta; in sched_info_dequeue() 238 static void sched_info_arrive(struct rq *rq, struct task_struct *t) in sched_info_arrive() argument 242 if (!t->sched_info.last_queued) in sched_info_arrive() 246 delta = now - t->sched_info.last_queued; in sched_info_arrive() 247 t->sched_info.last_queued = 0; in sched_info_arrive() 248 t->sched_info.run_delay += delta; in sched_info_arrive() [all …]
|
D | completion.c | 206 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); in wait_for_completion_interruptible() local 208 if (t == -ERESTARTSYS) in wait_for_completion_interruptible() 209 return t; in wait_for_completion_interruptible() 244 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); in wait_for_completion_killable() local 246 if (t == -ERESTARTSYS) in wait_for_completion_killable() 247 return t; in wait_for_completion_killable() 254 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, state); in wait_for_completion_state() local 256 if (t == -ERESTARTSYS) in wait_for_completion_state() 257 return t; in wait_for_completion_state()
|
D | cputime.c | 300 static inline u64 read_sum_exec_runtime(struct task_struct *t) in read_sum_exec_runtime() argument 302 return t->se.sum_exec_runtime; in read_sum_exec_runtime() 305 static u64 read_sum_exec_runtime(struct task_struct *t) in read_sum_exec_runtime() argument 311 rq = task_rq_lock(t, &rf); in read_sum_exec_runtime() 312 ns = t->se.sum_exec_runtime; in read_sum_exec_runtime() 313 task_rq_unlock(rq, t, &rf); in read_sum_exec_runtime() 327 struct task_struct *t; in thread_group_cputime() local 352 for_each_thread(tsk, t) { in thread_group_cputime() 353 task_cputime(t, &utime, &stime); in thread_group_cputime() 356 times->sum_exec_runtime += read_sum_exec_runtime(t); in thread_group_cputime() [all …]
|
/kernel/trace/ |
D | fgraph.c | 385 struct task_struct *g, *t; in alloc_retstack_tasklist() local 401 for_each_process_thread(g, t) { in alloc_retstack_tasklist() 407 if (t->ret_stack == NULL) { in alloc_retstack_tasklist() 408 atomic_set(&t->trace_overrun, 0); in alloc_retstack_tasklist() 409 t->curr_ret_stack = -1; in alloc_retstack_tasklist() 410 t->curr_ret_depth = -1; in alloc_retstack_tasklist() 413 t->ret_stack = ret_stack_list[start++]; in alloc_retstack_tasklist() 502 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) in graph_init_task() argument 504 atomic_set(&t->trace_overrun, 0); in graph_init_task() 505 t->ftrace_timestamp = 0; in graph_init_task() [all …]
|
D | blktrace.c | 72 struct blk_io_trace *t; in trace_note() local 84 sizeof(*t) + len + cgid_len, in trace_note() 88 t = ring_buffer_event_data(event); in trace_note() 95 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len); in trace_note() 96 if (t) { in trace_note() 97 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; in trace_note() 98 t->time = ktime_to_ns(ktime_get()); in trace_note() 100 t->device = bt->dev; in trace_note() 101 t->action = action | (cgid ? __BLK_TN_CGROUP : 0); in trace_note() 102 t->pid = pid; in trace_note() [all …]
|
D | trace_probe.c | 281 static int parse_probe_vars(char *arg, const struct fetch_type *t, in parse_probe_vars() argument 532 const struct fetch_type *t, in __parse_bitfield_probe_arg() argument 558 code->lshift = BYTES_TO_BITS(t->size) - (bw + bo); in __parse_bitfield_probe_arg() 559 code->rshift = BYTES_TO_BITS(t->size) - bw; in __parse_bitfield_probe_arg() 560 code->basesize = t->size; in __parse_bitfield_probe_arg() 562 return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0; in __parse_bitfield_probe_arg() 570 char *t, *t2, *t3; in traceprobe_parse_probe_arg_body() local 594 t = strchr(arg, ':'); in traceprobe_parse_probe_arg_body() 595 if (t) { in traceprobe_parse_probe_arg_body() 596 *t = '\0'; in traceprobe_parse_probe_arg_body() [all …]
|
/kernel/futex/ |
D | syscalls.c | 153 futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t) in futex_init_timeout() argument 158 *t = timespec64_to_ktime(*ts); in futex_init_timeout() 160 *t = ktime_add_safe(ktime_get(), *t); in futex_init_timeout() 162 *t = timens_ktime_to_host(CLOCK_MONOTONIC, *t); in futex_init_timeout() 171 ktime_t t, *tp = NULL; in SYSCALL_DEFINE6() local 179 ret = futex_init_timeout(cmd, op, &ts, &t); in SYSCALL_DEFINE6() 182 tp = &t; in SYSCALL_DEFINE6() 366 ktime_t t, *tp = NULL; in SYSCALL_DEFINE6() local 372 ret = futex_init_timeout(cmd, op, &ts, &t); in SYSCALL_DEFINE6() 375 tp = &t; in SYSCALL_DEFINE6()
|
/kernel/events/ |
D | uprobes.c | 1297 struct rb_node *n, *t; in build_probe_list() local 1307 for (t = n; t; t = rb_prev(t)) { in build_probe_list() 1308 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list() 1314 for (t = n; (t = rb_next(t)); ) { in build_probe_list() 1315 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list() 1715 void uprobe_free_utask(struct task_struct *t) in uprobe_free_utask() argument 1717 struct uprobe_task *utask = t->utask; in uprobe_free_utask() 1730 xol_free_insn_slot(t); in uprobe_free_utask() 1732 t->utask = NULL; in uprobe_free_utask() 1750 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) in dup_utask() argument [all …]
|