Lines Matching refs:t
18 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
587 struct task_struct *t; in rcu_spawn_tasks_kthread_generic() local
589 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname); in rcu_spawn_tasks_kthread_generic()
590 …if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavio… in rcu_spawn_tasks_kthread_generic()
653 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
672 struct task_struct *t; in rcu_tasks_wait_gp() local
686 for_each_process_thread(g, t) in rcu_tasks_wait_gp()
687 rtp->pertask_func(t, &holdouts); in rcu_tasks_wait_gp()
826 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) in rcu_tasks_pertask() argument
828 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { in rcu_tasks_pertask()
829 get_task_struct(t); in rcu_tasks_pertask()
830 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); in rcu_tasks_pertask()
831 WRITE_ONCE(t->rcu_tasks_holdout, true); in rcu_tasks_pertask()
832 list_add(&t->rcu_tasks_holdout_list, hop); in rcu_tasks_pertask()
860 static void check_holdout_task(struct task_struct *t, in check_holdout_task() argument
865 if (!READ_ONCE(t->rcu_tasks_holdout) || in check_holdout_task()
866 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || in check_holdout_task()
867 !READ_ONCE(t->on_rq) || in check_holdout_task()
869 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { in check_holdout_task()
870 WRITE_ONCE(t->rcu_tasks_holdout, false); in check_holdout_task()
871 list_del_init(&t->rcu_tasks_holdout_list); in check_holdout_task()
872 put_task_struct(t); in check_holdout_task()
875 rcu_request_urgent_qs_task(t); in check_holdout_task()
882 cpu = task_cpu(t); in check_holdout_task()
884 t, ".I"[is_idle_task(t)], in check_holdout_task()
886 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, in check_holdout_task()
887 t->rcu_tasks_idle_cpu, cpu); in check_holdout_task()
888 sched_show_task(t); in check_holdout_task()
895 struct task_struct *t, *t1; in check_all_holdout_tasks() local
897 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) { in check_all_holdout_tasks()
898 check_holdout_task(t, needreport, firstreport); in check_all_holdout_tasks()
1031 struct task_struct *t = current; in exit_tasks_rcu_stop() local
1033 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx); in exit_tasks_rcu_stop()
1243 static u8 rcu_ld_need_qs(struct task_struct *t) in rcu_ld_need_qs() argument
1246 return smp_load_acquire(&t->trc_reader_special.b.need_qs); in rcu_ld_need_qs()
1250 static void rcu_st_need_qs(struct task_struct *t, u8 v) in rcu_st_need_qs() argument
1252 smp_store_release(&t->trc_reader_special.b.need_qs, v); in rcu_st_need_qs()
1261 u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new) in rcu_trc_cmpxchg_need_qs() argument
1264 union rcu_special trs_old = READ_ONCE(t->trc_reader_special); in rcu_trc_cmpxchg_need_qs()
1270 ret.s = cmpxchg(&t->trc_reader_special.s, trs_old.s, trs_new.s); in rcu_trc_cmpxchg_need_qs()
1279 void rcu_read_unlock_trace_special(struct task_struct *t) in rcu_read_unlock_trace_special() argument
1287 trs = smp_load_acquire(&t->trc_reader_special); in rcu_read_unlock_trace_special()
1289 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && t->trc_reader_special.b.need_mb) in rcu_read_unlock_trace_special()
1293 u8 result = rcu_trc_cmpxchg_need_qs(t, TRC_NEED_QS_CHECKED | TRC_NEED_QS, in rcu_read_unlock_trace_special()
1299 rtpcp = per_cpu_ptr(rcu_tasks_trace.rtpcpu, t->trc_blkd_cpu); in rcu_read_unlock_trace_special()
1301 list_del_init(&t->trc_blkd_node); in rcu_read_unlock_trace_special()
1302 WRITE_ONCE(t->trc_reader_special.b.blocked, false); in rcu_read_unlock_trace_special()
1305 WRITE_ONCE(t->trc_reader_nesting, 0); in rcu_read_unlock_trace_special()
1310 void rcu_tasks_trace_qs_blkd(struct task_struct *t) in rcu_tasks_trace_qs_blkd() argument
1318 t->trc_blkd_cpu = smp_processor_id(); in rcu_tasks_trace_qs_blkd()
1321 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); in rcu_tasks_trace_qs_blkd()
1322 WRITE_ONCE(t->trc_reader_special.b.blocked, true); in rcu_tasks_trace_qs_blkd()
1328 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp) in trc_add_holdout() argument
1330 if (list_empty(&t->trc_holdout_list)) { in trc_add_holdout()
1331 get_task_struct(t); in trc_add_holdout()
1332 list_add(&t->trc_holdout_list, bhp); in trc_add_holdout()
1338 static void trc_del_holdout(struct task_struct *t) in trc_del_holdout() argument
1340 if (!list_empty(&t->trc_holdout_list)) { in trc_del_holdout()
1341 list_del_init(&t->trc_holdout_list); in trc_del_holdout()
1342 put_task_struct(t); in trc_del_holdout()
1351 struct task_struct *t = current; in trc_read_check_handler() local
1355 if (unlikely(texp != t)) in trc_read_check_handler()
1360 nesting = READ_ONCE(t->trc_reader_nesting); in trc_read_check_handler()
1362 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in trc_read_check_handler()
1372 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED); in trc_read_check_handler()
1383 static int trc_inspect_reader(struct task_struct *t, void *bhp_in) in trc_inspect_reader() argument
1386 int cpu = task_cpu(t); in trc_inspect_reader()
1390 if (task_curr(t) && !ofl) { in trc_inspect_reader()
1400 if (!rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting)) in trc_inspect_reader()
1406 nesting = t->trc_reader_nesting; in trc_inspect_reader()
1407 WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t)); in trc_inspect_reader()
1416 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in trc_inspect_reader()
1425 if (!rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS | TRC_NEED_QS_CHECKED)) in trc_inspect_reader()
1426 trc_add_holdout(t, bhp); in trc_inspect_reader()
1431 static void trc_wait_for_one_reader(struct task_struct *t, in trc_wait_for_one_reader() argument
1437 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI in trc_wait_for_one_reader()
1441 if (t == current) { in trc_wait_for_one_reader()
1442 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in trc_wait_for_one_reader()
1443 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); in trc_wait_for_one_reader()
1448 get_task_struct(t); in trc_wait_for_one_reader()
1449 if (!task_call_func(t, trc_inspect_reader, bhp)) { in trc_wait_for_one_reader()
1450 put_task_struct(t); in trc_wait_for_one_reader()
1453 put_task_struct(t); in trc_wait_for_one_reader()
1462 trc_add_holdout(t, bhp); in trc_wait_for_one_reader()
1463 if (task_curr(t) && in trc_wait_for_one_reader()
1466 cpu = task_cpu(t); in trc_wait_for_one_reader()
1469 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0) in trc_wait_for_one_reader()
1473 t->trc_ipi_to_cpu = cpu; in trc_wait_for_one_reader()
1475 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) { in trc_wait_for_one_reader()
1482 t->trc_ipi_to_cpu = -1; in trc_wait_for_one_reader()
1491 static bool rcu_tasks_trace_pertask_prep(struct task_struct *t, bool notself) in rcu_tasks_trace_pertask_prep() argument
1497 if (unlikely(t == NULL) || (t == current && notself) || !list_empty(&t->trc_holdout_list)) in rcu_tasks_trace_pertask_prep()
1500 rcu_st_need_qs(t, 0); in rcu_tasks_trace_pertask_prep()
1501 t->trc_ipi_to_cpu = -1; in rcu_tasks_trace_pertask_prep()
1506 static void rcu_tasks_trace_pertask(struct task_struct *t, struct list_head *hop) in rcu_tasks_trace_pertask() argument
1508 if (rcu_tasks_trace_pertask_prep(t, true)) in rcu_tasks_trace_pertask()
1509 trc_wait_for_one_reader(t, hop); in rcu_tasks_trace_pertask()
1519 struct task_struct *t; in rcu_tasks_trace_pregp_step() local
1534 t = cpu_curr_snapshot(cpu); in rcu_tasks_trace_pregp_step()
1535 if (rcu_tasks_trace_pertask_prep(t, true)) in rcu_tasks_trace_pregp_step()
1536 trc_add_holdout(t, hop); in rcu_tasks_trace_pregp_step()
1550 t = list_first_entry(&blkd_tasks, struct task_struct, trc_blkd_node); in rcu_tasks_trace_pregp_step()
1551 list_del_init(&t->trc_blkd_node); in rcu_tasks_trace_pregp_step()
1552 list_add(&t->trc_blkd_node, &rtpcp->rtp_blkd_tasks); in rcu_tasks_trace_pregp_step()
1554 rcu_tasks_trace_pertask(t, hop); in rcu_tasks_trace_pregp_step()
1587 static int trc_check_slow_task(struct task_struct *t, void *arg) in trc_check_slow_task() argument
1591 if (task_curr(t) && cpu_online(task_cpu(t))) in trc_check_slow_task()
1593 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting); in trc_check_slow_task()
1594 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu); in trc_check_slow_task()
1595 trc_rdrp->needqs = rcu_ld_need_qs(t); in trc_check_slow_task()
1600 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport) in show_stalled_task_trace() argument
1604 bool is_idle_tsk = is_idle_task(t); in show_stalled_task_trace()
1610 cpu = task_cpu(t); in show_stalled_task_trace()
1611 if (!task_call_func(t, trc_check_slow_task, &trc_rdr)) in show_stalled_task_trace()
1613 t->pid, in show_stalled_task_trace()
1614 ".I"[t->trc_ipi_to_cpu >= 0], in show_stalled_task_trace()
1618 t->pid, in show_stalled_task_trace()
1622 ".B"[!!data_race(t->trc_reader_special.b.blocked)], in show_stalled_task_trace()
1627 sched_show_task(t); in show_stalled_task_trace()
1644 struct task_struct *g, *t; in check_all_holdout_tasks_trace() local
1649 list_for_each_entry_safe(t, g, hop, trc_holdout_list) { in check_all_holdout_tasks_trace()
1651 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 && in check_all_holdout_tasks_trace()
1652 !(rcu_ld_need_qs(t) & TRC_NEED_QS_CHECKED)) in check_all_holdout_tasks_trace()
1653 trc_wait_for_one_reader(t, hop); in check_all_holdout_tasks_trace()
1656 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 && in check_all_holdout_tasks_trace()
1657 rcu_ld_need_qs(t) == TRC_NEED_QS_CHECKED) in check_all_holdout_tasks_trace()
1658 trc_del_holdout(t); in check_all_holdout_tasks_trace()
1660 show_stalled_task_trace(t, firstreport); in check_all_holdout_tasks_trace()
1697 static void exit_tasks_rcu_finish_trace(struct task_struct *t) in exit_tasks_rcu_finish_trace() argument
1699 union rcu_special trs = READ_ONCE(t->trc_reader_special); in exit_tasks_rcu_finish_trace()
1701 rcu_trc_cmpxchg_need_qs(t, 0, TRC_NEED_QS_CHECKED); in exit_tasks_rcu_finish_trace()
1702 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting)); in exit_tasks_rcu_finish_trace()
1703 if (WARN_ON_ONCE(rcu_ld_need_qs(t) & TRC_NEED_QS || trs.b.blocked)) in exit_tasks_rcu_finish_trace()
1704 rcu_read_unlock_trace_special(t); in exit_tasks_rcu_finish_trace()
1706 WRITE_ONCE(t->trc_reader_nesting, 0); in exit_tasks_rcu_finish_trace()
1803 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { } in exit_tasks_rcu_finish_trace() argument