• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Task-based RCU implementations.
4  *
5  * Copyright (C) 2020 Paul E. McKenney
6  */
7 
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 
10 ////////////////////////////////////////////////////////////////////////
11 //
12 // Generic data structures.
13 
14 struct rcu_tasks;
15 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
16 typedef void (*pregp_func_t)(void);
17 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
18 typedef void (*postscan_func_t)(struct list_head *hop);
19 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
20 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
21 
22 /**
23  * Definition for a Tasks-RCU-like mechanism.
24  * @cbs_head: Head of callback list.
25  * @cbs_tail: Tail pointer for callback list.
26  * @cbs_wq: Wait queue allowning new callback to get kthread's attention.
27  * @cbs_lock: Lock protecting callback list.
28  * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
29  * @gp_func: This flavor's grace-period-wait function.
30  * @gp_state: Grace period's most recent state transition (debugging).
31  * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
32  * @init_fract: Initial backoff sleep interval.
33  * @gp_jiffies: Time of last @gp_state transition.
34  * @gp_start: Most recent grace-period start in jiffies.
35  * @n_gps: Number of grace periods completed since boot.
36  * @n_ipis: Number of IPIs sent to encourage grace periods to end.
37  * @n_ipis_fails: Number of IPI-send failures.
38  * @pregp_func: This flavor's pre-grace-period function (optional).
39  * @pertask_func: This flavor's per-task scan function (optional).
40  * @postscan_func: This flavor's post-task scan function (optional).
41  * @holdout_func: This flavor's holdout-list scan function (optional).
42  * @postgp_func: This flavor's post-grace-period function (optional).
43  * @call_func: This flavor's call_rcu()-equivalent function.
44  * @name: This flavor's textual name.
45  * @kname: This flavor's kthread name.
46  */
47 struct rcu_tasks {
48 	struct rcu_head *cbs_head;
49 	struct rcu_head **cbs_tail;
50 	struct wait_queue_head cbs_wq;
51 	raw_spinlock_t cbs_lock;
52 	int gp_state;
53 	int gp_sleep;
54 	int init_fract;
55 	unsigned long gp_jiffies;
56 	unsigned long gp_start;
57 	unsigned long n_gps;
58 	unsigned long n_ipis;
59 	unsigned long n_ipis_fails;
60 	struct task_struct *kthread_ptr;
61 	rcu_tasks_gp_func_t gp_func;
62 	pregp_func_t pregp_func;
63 	pertask_func_t pertask_func;
64 	postscan_func_t postscan_func;
65 	holdouts_func_t holdouts_func;
66 	postgp_func_t postgp_func;
67 	call_rcu_func_t call_func;
68 	char *name;
69 	char *kname;
70 };
71 
72 #define DEFINE_RCU_TASKS(rt_name, gp, call, n)				\
73 static struct rcu_tasks rt_name =					\
74 {									\
75 	.cbs_tail = &rt_name.cbs_head,					\
76 	.cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq),	\
77 	.cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock),		\
78 	.gp_func = gp,							\
79 	.call_func = call,						\
80 	.name = n,							\
81 	.kname = #rt_name,						\
82 }
83 
84 /* Track exiting tasks in order to allow them to be waited for. */
85 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
86 
87 /* Avoid IPIing CPUs early in the grace period. */
88 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
89 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
90 module_param(rcu_task_ipi_delay, int, 0644);
91 
92 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
93 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
94 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
95 module_param(rcu_task_stall_timeout, int, 0644);
96 
97 /* RCU tasks grace-period state for debugging. */
98 #define RTGS_INIT		 0
99 #define RTGS_WAIT_WAIT_CBS	 1
100 #define RTGS_WAIT_GP		 2
101 #define RTGS_PRE_WAIT_GP	 3
102 #define RTGS_SCAN_TASKLIST	 4
103 #define RTGS_POST_SCAN_TASKLIST	 5
104 #define RTGS_WAIT_SCAN_HOLDOUTS	 6
105 #define RTGS_SCAN_HOLDOUTS	 7
106 #define RTGS_POST_GP		 8
107 #define RTGS_WAIT_READERS	 9
108 #define RTGS_INVOKE_CBS		10
109 #define RTGS_WAIT_CBS		11
110 #ifndef CONFIG_TINY_RCU
111 static const char * const rcu_tasks_gp_state_names[] = {
112 	"RTGS_INIT",
113 	"RTGS_WAIT_WAIT_CBS",
114 	"RTGS_WAIT_GP",
115 	"RTGS_PRE_WAIT_GP",
116 	"RTGS_SCAN_TASKLIST",
117 	"RTGS_POST_SCAN_TASKLIST",
118 	"RTGS_WAIT_SCAN_HOLDOUTS",
119 	"RTGS_SCAN_HOLDOUTS",
120 	"RTGS_POST_GP",
121 	"RTGS_WAIT_READERS",
122 	"RTGS_INVOKE_CBS",
123 	"RTGS_WAIT_CBS",
124 };
125 #endif /* #ifndef CONFIG_TINY_RCU */
126 
127 ////////////////////////////////////////////////////////////////////////
128 //
129 // Generic code.
130 
131 /* Record grace-period phase and time. */
set_tasks_gp_state(struct rcu_tasks * rtp,int newstate)132 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
133 {
134 	rtp->gp_state = newstate;
135 	rtp->gp_jiffies = jiffies;
136 }
137 
138 #ifndef CONFIG_TINY_RCU
139 /* Return state name. */
tasks_gp_state_getname(struct rcu_tasks * rtp)140 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
141 {
142 	int i = data_race(rtp->gp_state); // Let KCSAN detect update races
143 	int j = READ_ONCE(i); // Prevent the compiler from reading twice
144 
145 	if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
146 		return "???";
147 	return rcu_tasks_gp_state_names[j];
148 }
149 #endif /* #ifndef CONFIG_TINY_RCU */
150 
151 // Enqueue a callback for the specified flavor of Tasks RCU.
call_rcu_tasks_generic(struct rcu_head * rhp,rcu_callback_t func,struct rcu_tasks * rtp)152 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
153 				   struct rcu_tasks *rtp)
154 {
155 	unsigned long flags;
156 	bool needwake;
157 
158 	rhp->next = NULL;
159 	rhp->func = func;
160 	raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
161 	needwake = !rtp->cbs_head;
162 	WRITE_ONCE(*rtp->cbs_tail, rhp);
163 	rtp->cbs_tail = &rhp->next;
164 	raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
165 	/* We can't create the thread unless interrupts are enabled. */
166 	if (needwake && READ_ONCE(rtp->kthread_ptr))
167 		wake_up(&rtp->cbs_wq);
168 }
169 
170 // Wait for a grace period for the specified flavor of Tasks RCU.
synchronize_rcu_tasks_generic(struct rcu_tasks * rtp)171 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
172 {
173 	/* Complain if the scheduler has not started.  */
174 	if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
175 			 "synchronize_%s() called too soon", rtp->name))
176 		return;
177 
178 	/* Wait for the grace period. */
179 	wait_rcu_gp(rtp->call_func);
180 }
181 
182 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
rcu_tasks_kthread(void * arg)183 static int __noreturn rcu_tasks_kthread(void *arg)
184 {
185 	unsigned long flags;
186 	struct rcu_head *list;
187 	struct rcu_head *next;
188 	struct rcu_tasks *rtp = arg;
189 
190 	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
191 	housekeeping_affine(current, HK_FLAG_RCU);
192 	WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
193 
194 	/*
195 	 * Each pass through the following loop makes one check for
196 	 * newly arrived callbacks, and, if there are some, waits for
197 	 * one RCU-tasks grace period and then invokes the callbacks.
198 	 * This loop is terminated by the system going down.  ;-)
199 	 */
200 	for (;;) {
201 		set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
202 
203 		/* Pick up any new callbacks. */
204 		raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
205 		smp_mb__after_spinlock(); // Order updates vs. GP.
206 		list = rtp->cbs_head;
207 		rtp->cbs_head = NULL;
208 		rtp->cbs_tail = &rtp->cbs_head;
209 		raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
210 
211 		/* If there were none, wait a bit and start over. */
212 		if (!list) {
213 			wait_event_interruptible(rtp->cbs_wq,
214 						 READ_ONCE(rtp->cbs_head));
215 			if (!rtp->cbs_head) {
216 				WARN_ON(signal_pending(current));
217 				set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
218 				schedule_timeout_idle(HZ/10);
219 			}
220 			continue;
221 		}
222 
223 		// Wait for one grace period.
224 		set_tasks_gp_state(rtp, RTGS_WAIT_GP);
225 		rtp->gp_start = jiffies;
226 		rtp->gp_func(rtp);
227 		rtp->n_gps++;
228 
229 		/* Invoke the callbacks. */
230 		set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
231 		while (list) {
232 			next = list->next;
233 			local_bh_disable();
234 			list->func(list);
235 			local_bh_enable();
236 			list = next;
237 			cond_resched();
238 		}
239 		/* Paranoid sleep to keep this from entering a tight loop */
240 		schedule_timeout_idle(rtp->gp_sleep);
241 	}
242 }
243 
244 /* Spawn RCU-tasks grace-period kthread. */
rcu_spawn_tasks_kthread_generic(struct rcu_tasks * rtp)245 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
246 {
247 	struct task_struct *t;
248 
249 	t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
250 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
251 		return;
252 	smp_mb(); /* Ensure others see full kthread. */
253 }
254 
255 #ifndef CONFIG_TINY_RCU
256 
257 /*
258  * Print any non-default Tasks RCU settings.
259  */
rcu_tasks_bootup_oddness(void)260 static void __init rcu_tasks_bootup_oddness(void)
261 {
262 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
263 	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
264 		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
265 #endif /* #ifdef CONFIG_TASKS_RCU */
266 #ifdef CONFIG_TASKS_RCU
267 	pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
268 #endif /* #ifdef CONFIG_TASKS_RCU */
269 #ifdef CONFIG_TASKS_RUDE_RCU
270 	pr_info("\tRude variant of Tasks RCU enabled.\n");
271 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
272 #ifdef CONFIG_TASKS_TRACE_RCU
273 	pr_info("\tTracing variant of Tasks RCU enabled.\n");
274 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
275 }
276 
277 #endif /* #ifndef CONFIG_TINY_RCU */
278 
279 #ifndef CONFIG_TINY_RCU
280 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
show_rcu_tasks_generic_gp_kthread(struct rcu_tasks * rtp,char * s)281 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
282 {
283 	pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
284 		rtp->kname,
285 		tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
286 		jiffies - data_race(rtp->gp_jiffies),
287 		data_race(rtp->n_gps),
288 		data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
289 		".k"[!!data_race(rtp->kthread_ptr)],
290 		".C"[!!data_race(rtp->cbs_head)],
291 		s);
292 }
293 #endif /* #ifndef CONFIG_TINY_RCU */
294 
295 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
296 
297 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
298 
299 ////////////////////////////////////////////////////////////////////////
300 //
301 // Shared code between task-list-scanning variants of Tasks RCU.
302 
303 /* Wait for one RCU-tasks grace period. */
rcu_tasks_wait_gp(struct rcu_tasks * rtp)304 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
305 {
306 	struct task_struct *g, *t;
307 	unsigned long lastreport;
308 	LIST_HEAD(holdouts);
309 	int fract;
310 
311 	set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
312 	rtp->pregp_func();
313 
314 	/*
315 	 * There were callbacks, so we need to wait for an RCU-tasks
316 	 * grace period.  Start off by scanning the task list for tasks
317 	 * that are not already voluntarily blocked.  Mark these tasks
318 	 * and make a list of them in holdouts.
319 	 */
320 	set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
321 	rcu_read_lock();
322 	for_each_process_thread(g, t)
323 		rtp->pertask_func(t, &holdouts);
324 	rcu_read_unlock();
325 
326 	set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
327 	rtp->postscan_func(&holdouts);
328 
329 	/*
330 	 * Each pass through the following loop scans the list of holdout
331 	 * tasks, removing any that are no longer holdouts.  When the list
332 	 * is empty, we are done.
333 	 */
334 	lastreport = jiffies;
335 
336 	// Start off with initial wait and slowly back off to 1 HZ wait.
337 	fract = rtp->init_fract;
338 	if (fract > HZ)
339 		fract = HZ;
340 
341 	for (;;) {
342 		bool firstreport;
343 		bool needreport;
344 		int rtst;
345 
346 		if (list_empty(&holdouts))
347 			break;
348 
349 		/* Slowly back off waiting for holdouts */
350 		set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
351 		schedule_timeout_idle(HZ/fract);
352 
353 		if (fract > 1)
354 			fract--;
355 
356 		rtst = READ_ONCE(rcu_task_stall_timeout);
357 		needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
358 		if (needreport)
359 			lastreport = jiffies;
360 		firstreport = true;
361 		WARN_ON(signal_pending(current));
362 		set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
363 		rtp->holdouts_func(&holdouts, needreport, &firstreport);
364 	}
365 
366 	set_tasks_gp_state(rtp, RTGS_POST_GP);
367 	rtp->postgp_func(rtp);
368 }
369 
370 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
371 
372 #ifdef CONFIG_TASKS_RCU
373 
374 ////////////////////////////////////////////////////////////////////////
375 //
376 // Simple variant of RCU whose quiescent states are voluntary context
377 // switch, cond_resched_rcu_qs(), user-space execution, and idle.
378 // As such, grace periods can take one good long time.  There are no
379 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
380 // because this implementation is intended to get the system into a safe
381 // state for some of the manipulations involved in tracing and the like.
382 // Finally, this implementation does not support high call_rcu_tasks()
383 // rates from multiple CPUs.  If this is required, per-CPU callback lists
384 // will be needed.
385 
386 /* Pre-grace-period preparation. */
rcu_tasks_pregp_step(void)387 static void rcu_tasks_pregp_step(void)
388 {
389 	/*
390 	 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
391 	 * to complete.  Invoking synchronize_rcu() suffices because all
392 	 * these transitions occur with interrupts disabled.  Without this
393 	 * synchronize_rcu(), a read-side critical section that started
394 	 * before the grace period might be incorrectly seen as having
395 	 * started after the grace period.
396 	 *
397 	 * This synchronize_rcu() also dispenses with the need for a
398 	 * memory barrier on the first store to t->rcu_tasks_holdout,
399 	 * as it forces the store to happen after the beginning of the
400 	 * grace period.
401 	 */
402 	synchronize_rcu();
403 }
404 
405 /* Per-task initial processing. */
rcu_tasks_pertask(struct task_struct * t,struct list_head * hop)406 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
407 {
408 	if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
409 		get_task_struct(t);
410 		t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
411 		WRITE_ONCE(t->rcu_tasks_holdout, true);
412 		list_add(&t->rcu_tasks_holdout_list, hop);
413 	}
414 }
415 
416 /* Processing between scanning taskslist and draining the holdout list. */
rcu_tasks_postscan(struct list_head * hop)417 static void rcu_tasks_postscan(struct list_head *hop)
418 {
419 	/*
420 	 * Exiting tasks may escape the tasklist scan. Those are vulnerable
421 	 * until their final schedule() with TASK_DEAD state. To cope with
422 	 * this, divide the fragile exit path part in two intersecting
423 	 * read side critical sections:
424 	 *
425 	 * 1) An _SRCU_ read side starting before calling exit_notify(),
426 	 *    which may remove the task from the tasklist, and ending after
427 	 *    the final preempt_disable() call in do_exit().
428 	 *
429 	 * 2) An _RCU_ read side starting with the final preempt_disable()
430 	 *    call in do_exit() and ending with the final call to schedule()
431 	 *    with TASK_DEAD state.
432 	 *
433 	 * This handles the part 1). And postgp will handle part 2) with a
434 	 * call to synchronize_rcu().
435 	 */
436 	synchronize_srcu(&tasks_rcu_exit_srcu);
437 }
438 
439 /* See if tasks are still holding out, complain if so. */
check_holdout_task(struct task_struct * t,bool needreport,bool * firstreport)440 static void check_holdout_task(struct task_struct *t,
441 			       bool needreport, bool *firstreport)
442 {
443 	int cpu;
444 
445 	if (!READ_ONCE(t->rcu_tasks_holdout) ||
446 	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
447 	    !READ_ONCE(t->on_rq) ||
448 	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
449 	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
450 		WRITE_ONCE(t->rcu_tasks_holdout, false);
451 		list_del_init(&t->rcu_tasks_holdout_list);
452 		put_task_struct(t);
453 		return;
454 	}
455 	rcu_request_urgent_qs_task(t);
456 	if (!needreport)
457 		return;
458 	if (*firstreport) {
459 		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
460 		*firstreport = false;
461 	}
462 	cpu = task_cpu(t);
463 	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
464 		 t, ".I"[is_idle_task(t)],
465 		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
466 		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
467 		 t->rcu_tasks_idle_cpu, cpu);
468 	sched_show_task(t);
469 }
470 
471 /* Scan the holdout lists for tasks no longer holding out. */
check_all_holdout_tasks(struct list_head * hop,bool needreport,bool * firstreport)472 static void check_all_holdout_tasks(struct list_head *hop,
473 				    bool needreport, bool *firstreport)
474 {
475 	struct task_struct *t, *t1;
476 
477 	list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
478 		check_holdout_task(t, needreport, firstreport);
479 		cond_resched();
480 	}
481 }
482 
483 /* Finish off the Tasks-RCU grace period. */
rcu_tasks_postgp(struct rcu_tasks * rtp)484 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
485 {
486 	/*
487 	 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
488 	 * memory barriers prior to them in the schedule() path, memory
489 	 * reordering on other CPUs could cause their RCU-tasks read-side
490 	 * critical sections to extend past the end of the grace period.
491 	 * However, because these ->nvcsw updates are carried out with
492 	 * interrupts disabled, we can use synchronize_rcu() to force the
493 	 * needed ordering on all such CPUs.
494 	 *
495 	 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
496 	 * accesses to be within the grace period, avoiding the need for
497 	 * memory barriers for ->rcu_tasks_holdout accesses.
498 	 *
499 	 * In addition, this synchronize_rcu() waits for exiting tasks
500 	 * to complete their final preempt_disable() region of execution,
501 	 * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
502 	 * enforcing the whole region before tasklist removal until
503 	 * the final schedule() with TASK_DEAD state to be an RCU TASKS
504 	 * read side critical section.
505 	 */
506 	synchronize_rcu();
507 }
508 
509 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
510 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
511 
512 /**
513  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
514  * @rhp: structure to be used for queueing the RCU updates.
515  * @func: actual callback function to be invoked after the grace period
516  *
517  * The callback function will be invoked some time after a full grace
518  * period elapses, in other words after all currently executing RCU
519  * read-side critical sections have completed. call_rcu_tasks() assumes
520  * that the read-side critical sections end at a voluntary context
521  * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
522  * or transition to usermode execution.  As such, there are no read-side
523  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
524  * this primitive is intended to determine that all tasks have passed
525  * through a safe state, not so much for data-strcuture synchronization.
526  *
527  * See the description of call_rcu() for more detailed information on
528  * memory ordering guarantees.
529  */
call_rcu_tasks(struct rcu_head * rhp,rcu_callback_t func)530 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
531 {
532 	call_rcu_tasks_generic(rhp, func, &rcu_tasks);
533 }
534 EXPORT_SYMBOL_GPL(call_rcu_tasks);
535 
536 /**
537  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
538  *
539  * Control will return to the caller some time after a full rcu-tasks
540  * grace period has elapsed, in other words after all currently
541  * executing rcu-tasks read-side critical sections have elapsed.  These
542  * read-side critical sections are delimited by calls to schedule(),
543  * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
544  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
545  *
546  * This is a very specialized primitive, intended only for a few uses in
547  * tracing and other situations requiring manipulation of function
548  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
549  * is not (yet) intended for heavy use from multiple CPUs.
550  *
551  * See the description of synchronize_rcu() for more detailed information
552  * on memory ordering guarantees.
553  */
synchronize_rcu_tasks(void)554 void synchronize_rcu_tasks(void)
555 {
556 	synchronize_rcu_tasks_generic(&rcu_tasks);
557 }
558 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
559 
560 /**
561  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
562  *
563  * Although the current implementation is guaranteed to wait, it is not
564  * obligated to, for example, if there are no pending callbacks.
565  */
rcu_barrier_tasks(void)566 void rcu_barrier_tasks(void)
567 {
568 	/* There is only one callback queue, so this is easy.  ;-) */
569 	synchronize_rcu_tasks();
570 }
571 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
572 
rcu_spawn_tasks_kthread(void)573 static int __init rcu_spawn_tasks_kthread(void)
574 {
575 	rcu_tasks.gp_sleep = HZ / 10;
576 	rcu_tasks.init_fract = 10;
577 	rcu_tasks.pregp_func = rcu_tasks_pregp_step;
578 	rcu_tasks.pertask_func = rcu_tasks_pertask;
579 	rcu_tasks.postscan_func = rcu_tasks_postscan;
580 	rcu_tasks.holdouts_func = check_all_holdout_tasks;
581 	rcu_tasks.postgp_func = rcu_tasks_postgp;
582 	rcu_spawn_tasks_kthread_generic(&rcu_tasks);
583 	return 0;
584 }
585 
586 #ifndef CONFIG_TINY_RCU
show_rcu_tasks_classic_gp_kthread(void)587 static void show_rcu_tasks_classic_gp_kthread(void)
588 {
589 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
590 }
591 #endif /* #ifndef CONFIG_TINY_RCU */
592 
593 /*
594  * Contribute to protect against tasklist scan blind spot while the
595  * task is exiting and may be removed from the tasklist. See
596  * corresponding synchronize_srcu() for further details.
597  */
exit_tasks_rcu_start(void)598 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
599 {
600 	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
601 }
602 
603 /*
604  * Contribute to protect against tasklist scan blind spot while the
605  * task is exiting and may be removed from the tasklist. See
606  * corresponding synchronize_srcu() for further details.
607  */
exit_tasks_rcu_stop(void)608 void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
609 {
610 	struct task_struct *t = current;
611 
612 	__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
613 }
614 
615 /*
616  * Contribute to protect against tasklist scan blind spot while the
617  * task is exiting and may be removed from the tasklist. See
618  * corresponding synchronize_srcu() for further details.
619  */
exit_tasks_rcu_finish(void)620 void exit_tasks_rcu_finish(void)
621 {
622 	exit_tasks_rcu_stop();
623 	exit_tasks_rcu_finish_trace(current);
624 }
625 
626 #else /* #ifdef CONFIG_TASKS_RCU */
show_rcu_tasks_classic_gp_kthread(void)627 static inline void show_rcu_tasks_classic_gp_kthread(void) { }
exit_tasks_rcu_start(void)628 void exit_tasks_rcu_start(void) { }
exit_tasks_rcu_stop(void)629 void exit_tasks_rcu_stop(void) { }
exit_tasks_rcu_finish(void)630 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
631 #endif /* #else #ifdef CONFIG_TASKS_RCU */
632 
633 #ifdef CONFIG_TASKS_RUDE_RCU
634 
635 ////////////////////////////////////////////////////////////////////////
636 //
637 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
638 // passing an empty function to schedule_on_each_cpu().  This approach
639 // provides an asynchronous call_rcu_tasks_rude() API and batching
640 // of concurrent calls to the synchronous synchronize_rcu_rude() API.
641 // This sends IPIs far and wide and induces otherwise unnecessary context
642 // switches on all online CPUs, whether idle or not.
643 
644 // Empty function to allow workqueues to force a context switch.
rcu_tasks_be_rude(struct work_struct * work)645 static void rcu_tasks_be_rude(struct work_struct *work)
646 {
647 }
648 
649 // Wait for one rude RCU-tasks grace period.
rcu_tasks_rude_wait_gp(struct rcu_tasks * rtp)650 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
651 {
652 	rtp->n_ipis += cpumask_weight(cpu_online_mask);
653 	schedule_on_each_cpu(rcu_tasks_be_rude);
654 }
655 
656 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
657 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
658 		 "RCU Tasks Rude");
659 
660 /**
661  * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
662  * @rhp: structure to be used for queueing the RCU updates.
663  * @func: actual callback function to be invoked after the grace period
664  *
665  * The callback function will be invoked some time after a full grace
666  * period elapses, in other words after all currently executing RCU
667  * read-side critical sections have completed. call_rcu_tasks_rude()
668  * assumes that the read-side critical sections end at context switch,
669  * cond_resched_rcu_qs(), or transition to usermode execution.  As such,
670  * there are no read-side primitives analogous to rcu_read_lock() and
671  * rcu_read_unlock() because this primitive is intended to determine
672  * that all tasks have passed through a safe state, not so much for
673  * data-strcuture synchronization.
674  *
675  * See the description of call_rcu() for more detailed information on
676  * memory ordering guarantees.
677  */
call_rcu_tasks_rude(struct rcu_head * rhp,rcu_callback_t func)678 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
679 {
680 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
681 }
682 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
683 
684 /**
685  * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
686  *
687  * Control will return to the caller some time after a rude rcu-tasks
688  * grace period has elapsed, in other words after all currently
689  * executing rcu-tasks read-side critical sections have elapsed.  These
690  * read-side critical sections are delimited by calls to schedule(),
691  * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
692  * anyway) cond_resched().
693  *
694  * This is a very specialized primitive, intended only for a few uses in
695  * tracing and other situations requiring manipulation of function preambles
696  * and profiling hooks.  The synchronize_rcu_tasks_rude() function is not
697  * (yet) intended for heavy use from multiple CPUs.
698  *
699  * See the description of synchronize_rcu() for more detailed information
700  * on memory ordering guarantees.
701  */
synchronize_rcu_tasks_rude(void)702 void synchronize_rcu_tasks_rude(void)
703 {
704 	synchronize_rcu_tasks_generic(&rcu_tasks_rude);
705 }
706 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
707 
708 /**
709  * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
710  *
711  * Although the current implementation is guaranteed to wait, it is not
712  * obligated to, for example, if there are no pending callbacks.
713  */
rcu_barrier_tasks_rude(void)714 void rcu_barrier_tasks_rude(void)
715 {
716 	/* There is only one callback queue, so this is easy.  ;-) */
717 	synchronize_rcu_tasks_rude();
718 }
719 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
720 
rcu_spawn_tasks_rude_kthread(void)721 static int __init rcu_spawn_tasks_rude_kthread(void)
722 {
723 	rcu_tasks_rude.gp_sleep = HZ / 10;
724 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
725 	return 0;
726 }
727 
728 #ifndef CONFIG_TINY_RCU
show_rcu_tasks_rude_gp_kthread(void)729 static void show_rcu_tasks_rude_gp_kthread(void)
730 {
731 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
732 }
733 #endif /* #ifndef CONFIG_TINY_RCU */
734 
735 #else /* #ifdef CONFIG_TASKS_RUDE_RCU */
show_rcu_tasks_rude_gp_kthread(void)736 static void show_rcu_tasks_rude_gp_kthread(void) {}
737 #endif /* #else #ifdef CONFIG_TASKS_RUDE_RCU */
738 
739 ////////////////////////////////////////////////////////////////////////
740 //
741 // Tracing variant of Tasks RCU.  This variant is designed to be used
742 // to protect tracing hooks, including those of BPF.  This variant
743 // therefore:
744 //
745 // 1.	Has explicit read-side markers to allow finite grace periods
746 //	in the face of in-kernel loops for PREEMPT=n builds.
747 //
748 // 2.	Protects code in the idle loop, exception entry/exit, and
749 //	CPU-hotplug code paths, similar to the capabilities of SRCU.
750 //
751 // 3.	Avoids expensive read-side instruction, having overhead similar
752 //	to that of Preemptible RCU.
753 //
754 // There are of course downsides.  The grace-period code can send IPIs to
755 // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
756 // It is necessary to scan the full tasklist, much as for Tasks RCU.  There
757 // is a single callback queue guarded by a single lock, again, much as for
758 // Tasks RCU.  If needed, these downsides can be at least partially remedied.
759 //
760 // Perhaps most important, this variant of RCU does not affect the vanilla
761 // flavors, rcu_preempt and rcu_sched.  The fact that RCU Tasks Trace
762 // readers can operate from idle, offline, and exception entry/exit in no
763 // way allows rcu_preempt and rcu_sched readers to also do so.
764 
765 // The lockdep state must be outside of #ifdef to be useful.
766 #ifdef CONFIG_DEBUG_LOCK_ALLOC
767 static struct lock_class_key rcu_lock_trace_key;
768 struct lockdep_map rcu_trace_lock_map =
769 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
770 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
771 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
772 
773 #ifdef CONFIG_TASKS_TRACE_RCU
774 
775 static atomic_t trc_n_readers_need_end;		// Number of waited-for readers.
776 static DECLARE_WAIT_QUEUE_HEAD(trc_wait);	// List of holdout tasks.
777 
778 // Record outstanding IPIs to each CPU.  No point in sending two...
779 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
780 
781 // The number of detections of task quiescent state relying on
782 // heavyweight readers executing explicit memory barriers.
783 static unsigned long n_heavy_reader_attempts;
784 static unsigned long n_heavy_reader_updates;
785 static unsigned long n_heavy_reader_ofl_updates;
786 
787 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
788 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
789 		 "RCU Tasks Trace");
790 
791 /*
792  * This irq_work handler allows rcu_read_unlock_trace() to be invoked
793  * while the scheduler locks are held.
794  */
rcu_read_unlock_iw(struct irq_work * iwp)795 static void rcu_read_unlock_iw(struct irq_work *iwp)
796 {
797 	wake_up(&trc_wait);
798 }
799 static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
800 
801 /* If we are the last reader, wake up the grace-period kthread. */
rcu_read_unlock_trace_special(struct task_struct * t,int nesting)802 void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
803 {
804 	int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
805 
806 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
807 	    t->trc_reader_special.b.need_mb)
808 		smp_mb(); // Pairs with update-side barriers.
809 	// Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
810 	if (nq)
811 		WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
812 	WRITE_ONCE(t->trc_reader_nesting, nesting);
813 	if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
814 		irq_work_queue(&rcu_tasks_trace_iw);
815 }
816 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
817 
818 /* Add a task to the holdout list, if it is not already on the list. */
trc_add_holdout(struct task_struct * t,struct list_head * bhp)819 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
820 {
821 	if (list_empty(&t->trc_holdout_list)) {
822 		get_task_struct(t);
823 		list_add(&t->trc_holdout_list, bhp);
824 	}
825 }
826 
827 /* Remove a task from the holdout list, if it is in fact present. */
trc_del_holdout(struct task_struct * t)828 static void trc_del_holdout(struct task_struct *t)
829 {
830 	if (!list_empty(&t->trc_holdout_list)) {
831 		list_del_init(&t->trc_holdout_list);
832 		put_task_struct(t);
833 	}
834 }
835 
836 /* IPI handler to check task state. */
trc_read_check_handler(void * t_in)837 static void trc_read_check_handler(void *t_in)
838 {
839 	struct task_struct *t = current;
840 	struct task_struct *texp = t_in;
841 
842 	// If the task is no longer running on this CPU, leave.
843 	if (unlikely(texp != t)) {
844 		goto reset_ipi; // Already on holdout list, so will check later.
845 	}
846 
847 	// If the task is not in a read-side critical section, and
848 	// if this is the last reader, awaken the grace-period kthread.
849 	if (likely(!READ_ONCE(t->trc_reader_nesting))) {
850 		WRITE_ONCE(t->trc_reader_checked, true);
851 		goto reset_ipi;
852 	}
853 	// If we are racing with an rcu_read_unlock_trace(), try again later.
854 	if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
855 		goto reset_ipi;
856 	WRITE_ONCE(t->trc_reader_checked, true);
857 
858 	// Get here if the task is in a read-side critical section.  Set
859 	// its state so that it will awaken the grace-period kthread upon
860 	// exit from that critical section.
861 	atomic_inc(&trc_n_readers_need_end); // One more to wait on.
862 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
863 	WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
864 
865 reset_ipi:
866 	// Allow future IPIs to be sent on CPU and for task.
867 	// Also order this IPI handler against any later manipulations of
868 	// the intended task.
869 	smp_store_release(&per_cpu(trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
870 	smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
871 }
872 
873 /* Callback function for scheduler to check locked-down task.  */
trc_inspect_reader(struct task_struct * t,void * arg)874 static bool trc_inspect_reader(struct task_struct *t, void *arg)
875 {
876 	int cpu = task_cpu(t);
877 	int nesting;
878 	bool ofl = cpu_is_offline(cpu);
879 
880 	if (task_curr(t)) {
881 		WARN_ON_ONCE(ofl && !is_idle_task(t));
882 
883 		// If no chance of heavyweight readers, do it the hard way.
884 		if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
885 			return false;
886 
887 		// If heavyweight readers are enabled on the remote task,
888 		// we can inspect its state despite its currently running.
889 		// However, we cannot safely change its state.
890 		n_heavy_reader_attempts++;
891 		if (!ofl && // Check for "running" idle tasks on offline CPUs.
892 		    !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
893 			return false; // No quiescent state, do it the hard way.
894 		n_heavy_reader_updates++;
895 		if (ofl)
896 			n_heavy_reader_ofl_updates++;
897 		nesting = 0;
898 	} else {
899 		// The task is not running, so C-language access is safe.
900 		nesting = t->trc_reader_nesting;
901 	}
902 
903 	// If not exiting a read-side critical section, mark as checked
904 	// so that the grace-period kthread will remove it from the
905 	// holdout list.
906 	t->trc_reader_checked = nesting >= 0;
907 	if (nesting <= 0)
908 		return !nesting;  // If in QS, done, otherwise try again later.
909 
910 	// The task is in a read-side critical section, so set up its
911 	// state so that it will awaken the grace-period kthread upon exit
912 	// from that critical section.
913 	atomic_inc(&trc_n_readers_need_end); // One more to wait on.
914 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
915 	WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
916 	return true;
917 }
918 
919 /* Attempt to extract the state for the specified task. */
trc_wait_for_one_reader(struct task_struct * t,struct list_head * bhp)920 static void trc_wait_for_one_reader(struct task_struct *t,
921 				    struct list_head *bhp)
922 {
923 	int cpu;
924 
925 	// If a previous IPI is still in flight, let it complete.
926 	if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
927 		return;
928 
929 	// The current task had better be in a quiescent state.
930 	if (t == current) {
931 		t->trc_reader_checked = true;
932 		WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
933 		return;
934 	}
935 
936 	// Attempt to nail down the task for inspection.
937 	get_task_struct(t);
938 	if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
939 		put_task_struct(t);
940 		return;
941 	}
942 	put_task_struct(t);
943 
944 	// If currently running, send an IPI, either way, add to list.
945 	trc_add_holdout(t, bhp);
946 	if (task_curr(t) &&
947 	    time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
948 		// The task is currently running, so try IPIing it.
949 		cpu = task_cpu(t);
950 
951 		// If there is already an IPI outstanding, let it happen.
952 		if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
953 			return;
954 
955 		per_cpu(trc_ipi_to_cpu, cpu) = true;
956 		t->trc_ipi_to_cpu = cpu;
957 		rcu_tasks_trace.n_ipis++;
958 		if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
959 			// Just in case there is some other reason for
960 			// failure than the target CPU being offline.
961 			WARN_ONCE(1, "%s():  smp_call_function_single() failed for CPU: %d\n",
962 				  __func__, cpu);
963 			rcu_tasks_trace.n_ipis_fails++;
964 			per_cpu(trc_ipi_to_cpu, cpu) = false;
965 			t->trc_ipi_to_cpu = -1;
966 		}
967 	}
968 }
969 
970 /* Initialize for a new RCU-tasks-trace grace period. */
rcu_tasks_trace_pregp_step(void)971 static void rcu_tasks_trace_pregp_step(void)
972 {
973 	int cpu;
974 
975 	// Allow for fast-acting IPIs.
976 	atomic_set(&trc_n_readers_need_end, 1);
977 
978 	// There shouldn't be any old IPIs, but...
979 	for_each_possible_cpu(cpu)
980 		WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
981 
982 	// Disable CPU hotplug across the tasklist scan.
983 	// This also waits for all readers in CPU-hotplug code paths.
984 	cpus_read_lock();
985 }
986 
987 /* Do first-round processing for the specified task. */
rcu_tasks_trace_pertask(struct task_struct * t,struct list_head * hop)988 static void rcu_tasks_trace_pertask(struct task_struct *t,
989 				    struct list_head *hop)
990 {
991 	// During early boot when there is only the one boot CPU, there
992 	// is no idle task for the other CPUs. Just return.
993 	if (unlikely(t == NULL))
994 		return;
995 
996 	WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
997 	WRITE_ONCE(t->trc_reader_checked, false);
998 	t->trc_ipi_to_cpu = -1;
999 	trc_wait_for_one_reader(t, hop);
1000 }
1001 
1002 /*
1003  * Do intermediate processing between task and holdout scans and
1004  * pick up the idle tasks.
1005  */
rcu_tasks_trace_postscan(struct list_head * hop)1006 static void rcu_tasks_trace_postscan(struct list_head *hop)
1007 {
1008 	int cpu;
1009 
1010 	for_each_possible_cpu(cpu)
1011 		rcu_tasks_trace_pertask(idle_task(cpu), hop);
1012 
1013 	// Re-enable CPU hotplug now that the tasklist scan has completed.
1014 	cpus_read_unlock();
1015 
1016 	// Wait for late-stage exiting tasks to finish exiting.
1017 	// These might have passed the call to exit_tasks_rcu_finish().
1018 	synchronize_rcu();
1019 	// Any tasks that exit after this point will set ->trc_reader_checked.
1020 }
1021 
1022 /* Show the state of a task stalling the current RCU tasks trace GP. */
show_stalled_task_trace(struct task_struct * t,bool * firstreport)1023 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1024 {
1025 	int cpu;
1026 
1027 	if (*firstreport) {
1028 		pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1029 		*firstreport = false;
1030 	}
1031 	// FIXME: This should attempt to use try_invoke_on_nonrunning_task().
1032 	cpu = task_cpu(t);
1033 	pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1034 		 t->pid,
1035 		 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
1036 		 ".i"[is_idle_task(t)],
1037 		 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
1038 		 READ_ONCE(t->trc_reader_nesting),
1039 		 " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)],
1040 		 cpu);
1041 	sched_show_task(t);
1042 }
1043 
1044 /* List stalled IPIs for RCU tasks trace. */
show_stalled_ipi_trace(void)1045 static void show_stalled_ipi_trace(void)
1046 {
1047 	int cpu;
1048 
1049 	for_each_possible_cpu(cpu)
1050 		if (per_cpu(trc_ipi_to_cpu, cpu))
1051 			pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1052 }
1053 
1054 /* Do one scan of the holdout list. */
check_all_holdout_tasks_trace(struct list_head * hop,bool needreport,bool * firstreport)1055 static void check_all_holdout_tasks_trace(struct list_head *hop,
1056 					  bool needreport, bool *firstreport)
1057 {
1058 	struct task_struct *g, *t;
1059 
1060 	// Disable CPU hotplug across the holdout list scan.
1061 	cpus_read_lock();
1062 
1063 	list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1064 		// If safe and needed, try to check the current task.
1065 		if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1066 		    !READ_ONCE(t->trc_reader_checked))
1067 			trc_wait_for_one_reader(t, hop);
1068 
1069 		// If check succeeded, remove this task from the list.
1070 		if (READ_ONCE(t->trc_reader_checked))
1071 			trc_del_holdout(t);
1072 		else if (needreport)
1073 			show_stalled_task_trace(t, firstreport);
1074 	}
1075 
1076 	// Re-enable CPU hotplug now that the holdout list scan has completed.
1077 	cpus_read_unlock();
1078 
1079 	if (needreport) {
1080 		if (firstreport)
1081 			pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1082 		show_stalled_ipi_trace();
1083 	}
1084 }
1085 
rcu_tasks_trace_empty_fn(void * unused)1086 static void rcu_tasks_trace_empty_fn(void *unused)
1087 {
1088 }
1089 
1090 /* Wait for grace period to complete and provide ordering. */
rcu_tasks_trace_postgp(struct rcu_tasks * rtp)1091 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1092 {
1093 	int cpu;
1094 	bool firstreport;
1095 	struct task_struct *g, *t;
1096 	LIST_HEAD(holdouts);
1097 	long ret;
1098 
1099 	// Wait for any lingering IPI handlers to complete.  Note that
1100 	// if a CPU has gone offline or transitioned to userspace in the
1101 	// meantime, all IPI handlers should have been drained beforehand.
1102 	// Yes, this assumes that CPUs process IPIs in order.  If that ever
1103 	// changes, there will need to be a recheck and/or timed wait.
1104 	for_each_online_cpu(cpu)
1105 		if (smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))
1106 			smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1107 
1108 	// Remove the safety count.
1109 	smp_mb__before_atomic();  // Order vs. earlier atomics
1110 	atomic_dec(&trc_n_readers_need_end);
1111 	smp_mb__after_atomic();  // Order vs. later atomics
1112 
1113 	// Wait for readers.
1114 	set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
1115 	for (;;) {
1116 		ret = wait_event_idle_exclusive_timeout(
1117 				trc_wait,
1118 				atomic_read(&trc_n_readers_need_end) == 0,
1119 				READ_ONCE(rcu_task_stall_timeout));
1120 		if (ret)
1121 			break;  // Count reached zero.
1122 		// Stall warning time, so make a list of the offenders.
1123 		rcu_read_lock();
1124 		for_each_process_thread(g, t)
1125 			if (READ_ONCE(t->trc_reader_special.b.need_qs))
1126 				trc_add_holdout(t, &holdouts);
1127 		rcu_read_unlock();
1128 		firstreport = true;
1129 		list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1130 			if (READ_ONCE(t->trc_reader_special.b.need_qs))
1131 				show_stalled_task_trace(t, &firstreport);
1132 			trc_del_holdout(t); // Release task_struct reference.
1133 		}
1134 		if (firstreport)
1135 			pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1136 		show_stalled_ipi_trace();
1137 		pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1138 	}
1139 	smp_mb(); // Caller's code must be ordered after wakeup.
1140 		  // Pairs with pretty much every ordering primitive.
1141 }
1142 
1143 /* Report any needed quiescent state for this exiting task. */
exit_tasks_rcu_finish_trace(struct task_struct * t)1144 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1145 {
1146 	WRITE_ONCE(t->trc_reader_checked, true);
1147 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1148 	WRITE_ONCE(t->trc_reader_nesting, 0);
1149 	if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1150 		rcu_read_unlock_trace_special(t, 0);
1151 }
1152 
1153 /**
1154  * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1155  * @rhp: structure to be used for queueing the RCU updates.
1156  * @func: actual callback function to be invoked after the grace period
1157  *
1158  * The callback function will be invoked some time after a full grace
1159  * period elapses, in other words after all currently executing RCU
1160  * read-side critical sections have completed. call_rcu_tasks_trace()
1161  * assumes that the read-side critical sections end at context switch,
1162  * cond_resched_rcu_qs(), or transition to usermode execution.  As such,
1163  * there are no read-side primitives analogous to rcu_read_lock() and
1164  * rcu_read_unlock() because this primitive is intended to determine
1165  * that all tasks have passed through a safe state, not so much for
1166  * data-strcuture synchronization.
1167  *
1168  * See the description of call_rcu() for more detailed information on
1169  * memory ordering guarantees.
1170  */
call_rcu_tasks_trace(struct rcu_head * rhp,rcu_callback_t func)1171 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1172 {
1173 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1174 }
1175 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1176 
1177 /**
1178  * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1179  *
1180  * Control will return to the caller some time after a trace rcu-tasks
1181  * grace period has elapsed, in other words after all currently executing
1182  * rcu-tasks read-side critical sections have elapsed.  These read-side
1183  * critical sections are delimited by calls to rcu_read_lock_trace()
1184  * and rcu_read_unlock_trace().
1185  *
1186  * This is a very specialized primitive, intended only for a few uses in
1187  * tracing and other situations requiring manipulation of function preambles
1188  * and profiling hooks.  The synchronize_rcu_tasks_trace() function is not
1189  * (yet) intended for heavy use from multiple CPUs.
1190  *
1191  * See the description of synchronize_rcu() for more detailed information
1192  * on memory ordering guarantees.
1193  */
synchronize_rcu_tasks_trace(void)1194 void synchronize_rcu_tasks_trace(void)
1195 {
1196 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1197 	synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1198 }
1199 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1200 
1201 /**
1202  * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1203  *
1204  * Although the current implementation is guaranteed to wait, it is not
1205  * obligated to, for example, if there are no pending callbacks.
1206  */
rcu_barrier_tasks_trace(void)1207 void rcu_barrier_tasks_trace(void)
1208 {
1209 	/* There is only one callback queue, so this is easy.  ;-) */
1210 	synchronize_rcu_tasks_trace();
1211 }
1212 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1213 
rcu_spawn_tasks_trace_kthread(void)1214 static int __init rcu_spawn_tasks_trace_kthread(void)
1215 {
1216 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1217 		rcu_tasks_trace.gp_sleep = HZ / 10;
1218 		rcu_tasks_trace.init_fract = 10;
1219 	} else {
1220 		rcu_tasks_trace.gp_sleep = HZ / 200;
1221 		if (rcu_tasks_trace.gp_sleep <= 0)
1222 			rcu_tasks_trace.gp_sleep = 1;
1223 		rcu_tasks_trace.init_fract = HZ / 5;
1224 		if (rcu_tasks_trace.init_fract <= 0)
1225 			rcu_tasks_trace.init_fract = 1;
1226 	}
1227 	rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1228 	rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1229 	rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1230 	rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1231 	rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1232 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1233 	return 0;
1234 }
1235 
1236 #ifndef CONFIG_TINY_RCU
show_rcu_tasks_trace_gp_kthread(void)1237 static void show_rcu_tasks_trace_gp_kthread(void)
1238 {
1239 	char buf[64];
1240 
1241 	sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1242 		data_race(n_heavy_reader_ofl_updates),
1243 		data_race(n_heavy_reader_updates),
1244 		data_race(n_heavy_reader_attempts));
1245 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1246 }
1247 #endif /* #ifndef CONFIG_TINY_RCU */
1248 
1249 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
exit_tasks_rcu_finish_trace(struct task_struct * t)1250 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
show_rcu_tasks_trace_gp_kthread(void)1251 static inline void show_rcu_tasks_trace_gp_kthread(void) {}
1252 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1253 
1254 #ifndef CONFIG_TINY_RCU
show_rcu_tasks_gp_kthreads(void)1255 void show_rcu_tasks_gp_kthreads(void)
1256 {
1257 	show_rcu_tasks_classic_gp_kthread();
1258 	show_rcu_tasks_rude_gp_kthread();
1259 	show_rcu_tasks_trace_gp_kthread();
1260 }
1261 #endif /* #ifndef CONFIG_TINY_RCU */
1262 
rcu_init_tasks_generic(void)1263 void __init rcu_init_tasks_generic(void)
1264 {
1265 #ifdef CONFIG_TASKS_RCU
1266 	rcu_spawn_tasks_kthread();
1267 #endif
1268 
1269 #ifdef CONFIG_TASKS_RUDE_RCU
1270 	rcu_spawn_tasks_rude_kthread();
1271 #endif
1272 
1273 #ifdef CONFIG_TASKS_TRACE_RCU
1274 	rcu_spawn_tasks_trace_kthread();
1275 #endif
1276 }
1277 
1278 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
rcu_tasks_bootup_oddness(void)1279 static inline void rcu_tasks_bootup_oddness(void) {}
show_rcu_tasks_gp_kthreads(void)1280 void show_rcu_tasks_gp_kthreads(void) {}
1281 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
1282