Home
last modified time | relevance | path

Searched refs:t (Results 1 – 25 of 79) sorted by relevance

1234

/kernel/rcu/
Dtasks.h17 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
247 struct task_struct *t; in rcu_spawn_tasks_kthread_generic() local
249 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); in rcu_spawn_tasks_kthread_generic()
250 …if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavio… in rcu_spawn_tasks_kthread_generic()
295 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
306 struct task_struct *g, *t; in rcu_tasks_wait_gp() local
322 for_each_process_thread(g, t) in rcu_tasks_wait_gp()
323 rtp->pertask_func(t, &holdouts); in rcu_tasks_wait_gp()
406 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) in rcu_tasks_pertask() argument
408 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { in rcu_tasks_pertask()
[all …]
Dtree_plugin.h89 static void rcu_read_unlock_special(struct task_struct *t);
141 struct task_struct *t = current; in rcu_preempt_ctxt_queue() local
168 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
186 list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_ctxt_queue()
199 list_add(&t->rcu_node_entry, rnp->exp_tasks); in rcu_preempt_ctxt_queue()
210 list_add(&t->rcu_node_entry, rnp->gp_tasks); in rcu_preempt_ctxt_queue()
227 WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
231 WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry); in rcu_preempt_ctxt_queue()
291 struct task_struct *t = current; in rcu_note_context_switch() local
299 !t->rcu_read_unlock_special.b.blocked) { in rcu_note_context_switch()
[all …]
Dtree_stall.h209 struct task_struct *t; in rcu_print_detail_task_stall_rnp() local
216 t = list_entry(rnp->gp_tasks->prev, in rcu_print_detail_task_stall_rnp()
218 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_detail_task_stall_rnp()
224 sched_show_task(t); in rcu_print_detail_task_stall_rnp()
240 static bool check_slow_task(struct task_struct *t, void *arg) in check_slow_task() argument
244 if (task_curr(t)) in check_slow_task()
246 rscrp->nesting = t->rcu_read_lock_nesting; in check_slow_task()
247 rscrp->rs = t->rcu_read_unlock_special; in check_slow_task()
248 rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry); in check_slow_task()
262 struct task_struct *t; in rcu_print_task_stall() local
[all …]
/kernel/bpf/
Dbtf.c221 const struct btf_type *t; member
277 static const char *btf_type_str(const struct btf_type *t) in btf_type_str() argument
279 return btf_kind_str[BTF_INFO_KIND(t->info)]; in btf_type_str()
369 const struct btf_type *t,
382 const struct btf_type *t);
383 void (*show)(const struct btf *btf, const struct btf_type *t,
392 const struct btf_type *t, u32 type_id);
394 static bool btf_type_is_modifier(const struct btf_type *t) in btf_type_is_modifier() argument
406 switch (BTF_INFO_KIND(t->info)) { in btf_type_is_modifier()
417 bool btf_type_is_void(const struct btf_type *t) in btf_type_is_void() argument
[all …]
Dbpf_struct_ops.c105 const struct btf_type *t; in bpf_struct_ops_init() local
148 t = btf_type_by_id(btf, type_id); in bpf_struct_ops_init()
149 if (btf_type_vlen(t) > BPF_STRUCT_OPS_MAX_NR_MEMBERS) { in bpf_struct_ops_init()
151 btf_type_vlen(t), st_ops->name); in bpf_struct_ops_init()
155 for_each_member(j, t, member) { in bpf_struct_ops_init()
165 if (btf_member_bitfield_size(t, member)) { in bpf_struct_ops_init()
184 if (j == btf_type_vlen(t)) { in bpf_struct_ops_init()
190 st_ops->type = t; in bpf_struct_ops_init()
278 const struct btf_type *t = st_map->st_ops->type; in bpf_struct_ops_map_put_progs() local
281 for (i = 0; i < btf_type_vlen(t); i++) { in bpf_struct_ops_map_put_progs()
[all …]
Dverifier.c583 enum bpf_reg_type t; in print_verifier_state() local
590 t = reg->type; in print_verifier_state()
591 if (t == NOT_INIT) in print_verifier_state()
595 verbose(env, "=%s", reg_type_str[t]); in print_verifier_state()
596 if (t == SCALAR_VALUE && reg->precise) in print_verifier_state()
598 if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && in print_verifier_state()
603 if (t == PTR_TO_BTF_ID || in print_verifier_state()
604 t == PTR_TO_BTF_ID_OR_NULL || in print_verifier_state()
605 t == PTR_TO_PERCPU_BTF_ID) in print_verifier_state()
608 if (reg_type_may_be_refcounted_or_null(t)) in print_verifier_state()
[all …]
/kernel/
Dkcov.c62 struct task_struct *t; member
154 static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) in check_kcov_mode() argument
163 if (!in_task() && !(in_serving_softirq() && t->kcov_softirq)) in check_kcov_mode()
165 mode = READ_ONCE(t->kcov_mode); in check_kcov_mode()
191 struct task_struct *t; in __sanitizer_cov_trace_pc() local
196 t = current; in __sanitizer_cov_trace_pc()
197 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t)) in __sanitizer_cov_trace_pc()
200 area = t->kcov_area; in __sanitizer_cov_trace_pc()
203 if (likely(pos < t->kcov_size)) { in __sanitizer_cov_trace_pc()
213 struct task_struct *t; in write_comp_data() local
[all …]
Dsoftirq.c514 static void __tasklet_schedule_common(struct tasklet_struct *t, in __tasklet_schedule_common() argument
523 t->next = NULL; in __tasklet_schedule_common()
524 *head->tail = t; in __tasklet_schedule_common()
525 head->tail = &(t->next); in __tasklet_schedule_common()
530 void __tasklet_schedule(struct tasklet_struct *t) in __tasklet_schedule() argument
532 __tasklet_schedule_common(t, &tasklet_vec, in __tasklet_schedule()
537 void __tasklet_hi_schedule(struct tasklet_struct *t) in __tasklet_hi_schedule() argument
539 __tasklet_schedule_common(t, &tasklet_hi_vec, in __tasklet_hi_schedule()
557 struct tasklet_struct *t = list; in tasklet_action_common() local
561 if (tasklet_trylock(t)) { in tasklet_action_common()
[all …]
Drseq.c84 static int rseq_update_cpu_id(struct task_struct *t) in rseq_update_cpu_id() argument
88 if (put_user(cpu_id, &t->rseq->cpu_id_start)) in rseq_update_cpu_id()
90 if (put_user(cpu_id, &t->rseq->cpu_id)) in rseq_update_cpu_id()
92 trace_rseq_update(t); in rseq_update_cpu_id()
96 static int rseq_reset_rseq_cpu_id(struct task_struct *t) in rseq_reset_rseq_cpu_id() argument
103 if (put_user(cpu_id_start, &t->rseq->cpu_id_start)) in rseq_reset_rseq_cpu_id()
110 if (put_user(cpu_id, &t->rseq->cpu_id)) in rseq_reset_rseq_cpu_id()
115 static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs) in rseq_get_rseq_cs() argument
124 if (get_user(ptr, &t->rseq->rseq_cs)) in rseq_get_rseq_cs()
127 if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr))) in rseq_get_rseq_cs()
[all …]
Dhung_task.c89 static void check_hung_task(struct task_struct *t, unsigned long timeout) in check_hung_task() argument
91 unsigned long switch_count = t->nvcsw + t->nivcsw; in check_hung_task()
97 if (unlikely(frozen_or_skipped(t))) in check_hung_task()
108 if (switch_count != t->last_switch_count) { in check_hung_task()
109 t->last_switch_count = switch_count; in check_hung_task()
110 t->last_switch_time = jiffies; in check_hung_task()
113 if (time_is_after_jiffies(t->last_switch_time + timeout * HZ)) in check_hung_task()
116 trace_sched_process_hang(t); in check_hung_task()
132 t->comm, t->pid, (jiffies - t->last_switch_time) / HZ); in check_hung_task()
139 sched_show_task(t); in check_hung_task()
[all …]
Dsignal.c70 static void __user *sig_handler(struct task_struct *t, int sig) in sig_handler() argument
72 return t->sighand->action[sig - 1].sa.sa_handler; in sig_handler()
82 static bool sig_task_ignored(struct task_struct *t, int sig, bool force) in sig_task_ignored() argument
86 handler = sig_handler(t, sig); in sig_task_ignored()
89 if (unlikely(is_global_init(t) && sig_kernel_only(sig))) in sig_task_ignored()
92 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && in sig_task_ignored()
97 if (unlikely((t->flags & PF_KTHREAD) && in sig_task_ignored()
104 static bool sig_ignored(struct task_struct *t, int sig, bool force) in sig_ignored() argument
111 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) in sig_ignored()
119 if (t->ptrace && sig != SIGKILL) in sig_ignored()
[all …]
Dcapability.c294 bool has_ns_capability(struct task_struct *t, in has_ns_capability() argument
300 ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NONE); in has_ns_capability()
316 bool has_capability(struct task_struct *t, int cap) in has_capability() argument
318 return has_ns_capability(t, &init_user_ns, cap); in has_capability()
335 bool has_ns_capability_noaudit(struct task_struct *t, in has_ns_capability_noaudit() argument
341 ret = security_capable(__task_cred(t), ns, cap, CAP_OPT_NOAUDIT); in has_ns_capability_noaudit()
359 bool has_capability_noaudit(struct task_struct *t, int cap) in has_capability_noaudit() argument
361 return has_ns_capability_noaudit(t, &init_user_ns, cap); in has_capability_noaudit()
/kernel/time/
Dtimeconst.bc6 auto t;
8 t = b;
10 a = t;
66 print "#define HZ_TO_MSEC_SHR32\t", s, "\n"
73 print "#define MSEC_TO_HZ_SHR32\t", s, "\n"
77 print "#define HZ_TO_MSEC_NUM\t\t", 1000/cd, "\n"
78 print "#define HZ_TO_MSEC_DEN\t\t", hz/cd, "\n"
79 print "#define MSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
80 print "#define MSEC_TO_HZ_DEN\t\t", 1000/cd, "\n"
88 print "#define HZ_TO_USEC_SHR32\t", s, "\n"
[all …]
Dposix-stubs.c131 struct timespec64 t; in SYSCALL_DEFINE4() local
143 if (get_timespec64(&t, rqtp)) in SYSCALL_DEFINE4()
145 if (!timespec64_valid(&t)) in SYSCALL_DEFINE4()
152 texp = timespec64_to_ktime(t); in SYSCALL_DEFINE4()
225 struct timespec64 t; in SYSCALL_DEFINE4() local
237 if (get_old_timespec32(&t, rqtp)) in SYSCALL_DEFINE4()
239 if (!timespec64_valid(&t)) in SYSCALL_DEFINE4()
246 texp = timespec64_to_ktime(t); in SYSCALL_DEFINE4()
Dhrtimer.c291 # define switch_hrtimer_base(t, b, p) (b) argument
1831 struct hrtimer_sleeper *t = in hrtimer_wakeup() local
1833 struct task_struct *task = t->task; in hrtimer_wakeup()
1835 t->task = NULL; in hrtimer_wakeup()
1933 static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) in do_nanosleep() argument
1939 hrtimer_sleeper_start_expires(t, mode); in do_nanosleep()
1941 if (likely(t->task)) in do_nanosleep()
1944 hrtimer_cancel(&t->timer); in do_nanosleep()
1947 } while (t->task && !signal_pending(current)); in do_nanosleep()
1951 if (!t->task) in do_nanosleep()
[all …]
Dtimekeeping_debug.c46 void tk_debug_account_sleep_time(const struct timespec64 *t) in tk_debug_account_sleep_time() argument
49 int bin = min(fls(t->tv_sec), NUM_BINS-1); in tk_debug_account_sleep_time()
53 (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC); in tk_debug_account_sleep_time()
/kernel/sched/
Dstats.h166 static inline void sched_info_reset_dequeued(struct task_struct *t) in sched_info_reset_dequeued() argument
168 t->sched_info.last_queued = 0; in sched_info_reset_dequeued()
177 static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t) in sched_info_dequeued() argument
182 if (t->sched_info.last_queued) in sched_info_dequeued()
183 delta = now - t->sched_info.last_queued; in sched_info_dequeued()
185 sched_info_reset_dequeued(t); in sched_info_dequeued()
186 t->sched_info.run_delay += delta; in sched_info_dequeued()
196 static void sched_info_arrive(struct rq *rq, struct task_struct *t) in sched_info_arrive() argument
200 if (t->sched_info.last_queued) in sched_info_arrive()
201 delta = now - t->sched_info.last_queued; in sched_info_arrive()
[all …]
Dpsi.c185 static void poll_timer_fn(struct timer_list *t);
503 struct psi_trigger *t; in init_triggers() local
505 list_for_each_entry(t, &group->triggers, node) in init_triggers()
506 window_reset(&t->win, now, in init_triggers()
507 group->total[PSI_POLL][t->state], 0); in init_triggers()
515 struct psi_trigger *t; in update_triggers() local
523 list_for_each_entry(t, &group->triggers, node) { in update_triggers()
527 if (group->polling_total[t->state] == total[t->state]) in update_triggers()
539 growth = window_update(&t->win, now, total[t->state]); in update_triggers()
540 if (growth < t->threshold) in update_triggers()
[all …]
Dcputime.c281 static inline u64 read_sum_exec_runtime(struct task_struct *t) in read_sum_exec_runtime() argument
283 return t->se.sum_exec_runtime; in read_sum_exec_runtime()
286 static u64 read_sum_exec_runtime(struct task_struct *t) in read_sum_exec_runtime() argument
292 rq = task_rq_lock(t, &rf); in read_sum_exec_runtime()
293 ns = t->se.sum_exec_runtime; in read_sum_exec_runtime()
294 task_rq_unlock(rq, t, &rf); in read_sum_exec_runtime()
308 struct task_struct *t; in thread_group_cputime() local
333 for_each_thread(tsk, t) { in thread_group_cputime()
334 task_cputime(t, &utime, &stime); in thread_group_cputime()
337 times->sum_exec_runtime += read_sum_exec_runtime(t); in thread_group_cputime()
[all …]
Dcompletion.c206 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); in wait_for_completion_interruptible() local
207 if (t == -ERESTARTSYS) in wait_for_completion_interruptible()
208 return t; in wait_for_completion_interruptible()
243 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); in wait_for_completion_killable() local
244 if (t == -ERESTARTSYS) in wait_for_completion_killable()
245 return t; in wait_for_completion_killable()
/kernel/trace/
Dfgraph.c375 struct task_struct *g, *t; in alloc_retstack_tasklist() local
391 for_each_process_thread(g, t) { in alloc_retstack_tasklist()
397 if (t->ret_stack == NULL) { in alloc_retstack_tasklist()
398 atomic_set(&t->trace_overrun, 0); in alloc_retstack_tasklist()
399 t->curr_ret_stack = -1; in alloc_retstack_tasklist()
400 t->curr_ret_depth = -1; in alloc_retstack_tasklist()
403 t->ret_stack = ret_stack_list[start++]; in alloc_retstack_tasklist()
490 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) in graph_init_task() argument
492 atomic_set(&t->trace_overrun, 0); in graph_init_task()
493 t->ftrace_timestamp = 0; in graph_init_task()
[all …]
Dblktrace.c72 struct blk_io_trace *t; in trace_note() local
84 sizeof(*t) + len + cgid_len, in trace_note()
88 t = ring_buffer_event_data(event); in trace_note()
95 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len); in trace_note()
96 if (t) { in trace_note()
97 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; in trace_note()
98 t->time = ktime_to_ns(ktime_get()); in trace_note()
100 t->device = bt->dev; in trace_note()
101 t->action = action | (cgid ? __BLK_TN_CGROUP : 0); in trace_note()
102 t->pid = pid; in trace_note()
[all …]
Dtrace_probe.c272 static int parse_probe_vars(char *arg, const struct fetch_type *t, in parse_probe_vars() argument
509 const struct fetch_type *t, in __parse_bitfield_probe_arg() argument
535 code->lshift = BYTES_TO_BITS(t->size) - (bw + bo); in __parse_bitfield_probe_arg()
536 code->rshift = BYTES_TO_BITS(t->size) - bw; in __parse_bitfield_probe_arg()
537 code->basesize = t->size; in __parse_bitfield_probe_arg()
539 return (BYTES_TO_BITS(t->size) < (bw + bo)) ? -EINVAL : 0; in __parse_bitfield_probe_arg()
547 char *t, *t2, *t3; in traceprobe_parse_probe_arg_body() local
563 t = strchr(arg, ':'); in traceprobe_parse_probe_arg_body()
564 if (t) { in traceprobe_parse_probe_arg_body()
565 *t = '\0'; in traceprobe_parse_probe_arg_body()
[all …]
Dtrace_mmiotrace.c173 unsigned long long t = ns2usecs(iter->ts); in mmio_print_rw() local
174 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_rw()
175 unsigned secs = (unsigned long)t; in mmio_print_rw()
218 unsigned long long t = ns2usecs(iter->ts); in mmio_print_map() local
219 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_map()
220 unsigned secs = (unsigned long)t; in mmio_print_map()
252 unsigned long long t = ns2usecs(iter->ts); in mmio_print_mark() local
253 unsigned long usec_rem = do_div(t, USEC_PER_SEC); in mmio_print_mark()
254 unsigned secs = (unsigned long)t; in mmio_print_mark()
/kernel/events/
Duprobes.c1299 struct rb_node *n, *t; in build_probe_list() local
1309 for (t = n; t; t = rb_prev(t)) { in build_probe_list()
1310 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
1316 for (t = n; (t = rb_next(t)); ) { in build_probe_list()
1317 u = rb_entry(t, struct uprobe, rb_node); in build_probe_list()
1717 void uprobe_free_utask(struct task_struct *t) in uprobe_free_utask() argument
1719 struct uprobe_task *utask = t->utask; in uprobe_free_utask()
1732 xol_free_insn_slot(t); in uprobe_free_utask()
1734 t->utask = NULL; in uprobe_free_utask()
1752 static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) in dup_utask() argument
[all …]

1234