Lines Matching refs:t
161 struct task_struct *t = current; in rcu_preempt_note_context_switch() local
166 if (t->rcu_read_lock_nesting > 0 && in rcu_preempt_note_context_switch()
167 !t->rcu_read_unlock_special.b.blocked) { in rcu_preempt_note_context_switch()
174 t->rcu_read_unlock_special.b.blocked = true; in rcu_preempt_note_context_switch()
175 t->rcu_blocked_node = rnp; in rcu_preempt_note_context_switch()
196 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); in rcu_preempt_note_context_switch()
198 list_add(&t->rcu_node_entry, rnp->gp_tasks->prev); in rcu_preempt_note_context_switch()
199 rnp->gp_tasks = &t->rcu_node_entry; in rcu_preempt_note_context_switch()
205 list_add(&t->rcu_node_entry, &rnp->blkd_tasks); in rcu_preempt_note_context_switch()
207 rnp->gp_tasks = &t->rcu_node_entry; in rcu_preempt_note_context_switch()
210 t->pid, in rcu_preempt_note_context_switch()
215 } else if (t->rcu_read_lock_nesting < 0 && in rcu_preempt_note_context_switch()
216 t->rcu_read_unlock_special.s) { in rcu_preempt_note_context_switch()
222 rcu_read_unlock_special(t); in rcu_preempt_note_context_switch()
288 static struct list_head *rcu_next_node_entry(struct task_struct *t, in rcu_next_node_entry() argument
293 np = t->rcu_node_entry.next; in rcu_next_node_entry()
304 void rcu_read_unlock_special(struct task_struct *t) in rcu_read_unlock_special() argument
328 special = t->rcu_read_unlock_special; in rcu_read_unlock_special()
331 if (!t->rcu_read_unlock_special.s) { in rcu_read_unlock_special()
345 t->rcu_read_unlock_special.b.blocked = false; in rcu_read_unlock_special()
353 rnp = t->rcu_blocked_node; in rcu_read_unlock_special()
356 if (rnp == t->rcu_blocked_node) in rcu_read_unlock_special()
363 np = rcu_next_node_entry(t, rnp); in rcu_read_unlock_special()
364 list_del_init(&t->rcu_node_entry); in rcu_read_unlock_special()
365 t->rcu_blocked_node = NULL; in rcu_read_unlock_special()
367 rnp->gpnum, t->pid); in rcu_read_unlock_special()
368 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_read_unlock_special()
370 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_read_unlock_special()
373 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_read_unlock_special()
376 drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; in rcu_read_unlock_special()
427 struct task_struct *t; in rcu_print_detail_task_stall_rnp() local
434 t = list_entry(rnp->gp_tasks, in rcu_print_detail_task_stall_rnp()
436 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) in rcu_print_detail_task_stall_rnp()
437 sched_show_task(t); in rcu_print_detail_task_stall_rnp()
493 struct task_struct *t; in rcu_print_task_stall() local
499 t = list_entry(rnp->gp_tasks, in rcu_print_task_stall()
501 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { in rcu_print_task_stall()
502 pr_cont(" P%d", t->pid); in rcu_print_task_stall()
551 struct task_struct *t; in rcu_preempt_offline_tasks() local
577 t = list_entry(lp->next, typeof(*t), rcu_node_entry); in rcu_preempt_offline_tasks()
580 list_del(&t->rcu_node_entry); in rcu_preempt_offline_tasks()
581 t->rcu_blocked_node = rnp_root; in rcu_preempt_offline_tasks()
582 list_add(&t->rcu_node_entry, lp_root); in rcu_preempt_offline_tasks()
583 if (&t->rcu_node_entry == rnp->gp_tasks) in rcu_preempt_offline_tasks()
585 if (&t->rcu_node_entry == rnp->exp_tasks) in rcu_preempt_offline_tasks()
588 if (&t->rcu_node_entry == rnp->boost_tasks) in rcu_preempt_offline_tasks()
626 struct task_struct *t = current; in rcu_preempt_check_callbacks() local
628 if (t->rcu_read_lock_nesting == 0) { in rcu_preempt_check_callbacks()
632 if (t->rcu_read_lock_nesting > 0 && in rcu_preempt_check_callbacks()
635 t->rcu_read_unlock_special.b.need_qs = true; in rcu_preempt_check_callbacks()
912 struct task_struct *t = current; in exit_rcu() local
916 t->rcu_read_lock_nesting = 1; in exit_rcu()
918 t->rcu_read_unlock_special.b.blocked = true; in exit_rcu()
1106 static void rcu_wake_cond(struct task_struct *t, int status) in rcu_wake_cond() argument
1113 wake_up_process(t); in rcu_wake_cond()
1127 struct task_struct *t; in rcu_boost() local
1176 t = container_of(tb, struct task_struct, rcu_node_entry); in rcu_boost()
1177 rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); in rcu_boost()
1239 struct task_struct *t; in rcu_initiate_boost() local
1254 t = rnp->boost_kthread_task; in rcu_initiate_boost()
1255 if (t) in rcu_initiate_boost()
1256 rcu_wake_cond(t, rnp->boost_kthread_status); in rcu_initiate_boost()
1310 struct task_struct *t; in rcu_spawn_one_boost_kthread() local
1321 t = kthread_create(rcu_boost_kthread, (void *)rnp, in rcu_spawn_one_boost_kthread()
1323 if (IS_ERR(t)) in rcu_spawn_one_boost_kthread()
1324 return PTR_ERR(t); in rcu_spawn_one_boost_kthread()
1327 rnp->boost_kthread_task = t; in rcu_spawn_one_boost_kthread()
1330 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); in rcu_spawn_one_boost_kthread()
1331 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ in rcu_spawn_one_boost_kthread()
1407 struct task_struct *t = rnp->boost_kthread_task; in rcu_boost_kthread_setaffinity() local
1412 if (!t) in rcu_boost_kthread_setaffinity()
1425 set_cpus_allowed_ptr(t, cm); in rcu_boost_kthread_setaffinity()
2095 struct task_struct *t; in __call_rcu_nocb_enqueue() local
2105 t = ACCESS_ONCE(rdp->nocb_kthread); in __call_rcu_nocb_enqueue()
2106 if (rcu_nocb_poll || !t) { in __call_rcu_nocb_enqueue()
2558 struct task_struct *t; in rcu_spawn_one_nocb_kthread() local
2584 t = kthread_run(rcu_nocb_kthread, rdp_spawn, in rcu_spawn_one_nocb_kthread()
2586 BUG_ON(IS_ERR(t)); in rcu_spawn_one_nocb_kthread()
2587 ACCESS_ONCE(rdp_spawn->nocb_kthread) = t; in rcu_spawn_one_nocb_kthread()