• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Task-based RCU implementations.
4  *
5  * Copyright (C) 2020 Paul E. McKenney
6  */
7 
8 #ifdef CONFIG_TASKS_RCU_GENERIC
9 
10 ////////////////////////////////////////////////////////////////////////
11 //
12 // Generic data structures.
13 
14 struct rcu_tasks;
15 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
16 typedef void (*pregp_func_t)(void);
17 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
18 typedef void (*postscan_func_t)(struct list_head *hop);
19 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
20 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
21 
22 /**
23  * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
24  * @cbs_head: Head of callback list.
25  * @cbs_tail: Tail pointer for callback list.
26  * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
27  * @cbs_lock: Lock protecting callback list.
28  * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
29  * @gp_func: This flavor's grace-period-wait function.
30  * @gp_state: Grace period's most recent state transition (debugging).
31  * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
32  * @init_fract: Initial backoff sleep interval.
33  * @gp_jiffies: Time of last @gp_state transition.
34  * @gp_start: Most recent grace-period start in jiffies.
35  * @n_gps: Number of grace periods completed since boot.
36  * @n_ipis: Number of IPIs sent to encourage grace periods to end.
37  * @n_ipis_fails: Number of IPI-send failures.
38  * @pregp_func: This flavor's pre-grace-period function (optional).
39  * @pertask_func: This flavor's per-task scan function (optional).
40  * @postscan_func: This flavor's post-task scan function (optional).
41  * @holdouts_func: This flavor's holdout-list scan function (optional).
42  * @postgp_func: This flavor's post-grace-period function (optional).
43  * @call_func: This flavor's call_rcu()-equivalent function.
44  * @name: This flavor's textual name.
45  * @kname: This flavor's kthread name.
46  */
47 struct rcu_tasks {
48 	struct rcu_head *cbs_head;
49 	struct rcu_head **cbs_tail;
50 	struct wait_queue_head cbs_wq;
51 	raw_spinlock_t cbs_lock;
52 	int gp_state;
53 	int gp_sleep;
54 	int init_fract;
55 	unsigned long gp_jiffies;
56 	unsigned long gp_start;
57 	unsigned long n_gps;
58 	unsigned long n_ipis;
59 	unsigned long n_ipis_fails;
60 	struct task_struct *kthread_ptr;
61 	rcu_tasks_gp_func_t gp_func;
62 	pregp_func_t pregp_func;
63 	pertask_func_t pertask_func;
64 	postscan_func_t postscan_func;
65 	holdouts_func_t holdouts_func;
66 	postgp_func_t postgp_func;
67 	call_rcu_func_t call_func;
68 	char *name;
69 	char *kname;
70 };
71 
72 #define DEFINE_RCU_TASKS(rt_name, gp, call, n)				\
73 static struct rcu_tasks rt_name =					\
74 {									\
75 	.cbs_tail = &rt_name.cbs_head,					\
76 	.cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq),	\
77 	.cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock),		\
78 	.gp_func = gp,							\
79 	.call_func = call,						\
80 	.name = n,							\
81 	.kname = #rt_name,						\
82 }
83 
84 /* Track exiting tasks in order to allow them to be waited for. */
85 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
86 
87 /* Avoid IPIing CPUs early in the grace period. */
88 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
89 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
90 module_param(rcu_task_ipi_delay, int, 0644);
91 
92 /* Control stall timeouts.  Disable with <= 0, otherwise jiffies till stall. */
93 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
94 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
95 module_param(rcu_task_stall_timeout, int, 0644);
96 
97 /* RCU tasks grace-period state for debugging. */
98 #define RTGS_INIT		 0
99 #define RTGS_WAIT_WAIT_CBS	 1
100 #define RTGS_WAIT_GP		 2
101 #define RTGS_PRE_WAIT_GP	 3
102 #define RTGS_SCAN_TASKLIST	 4
103 #define RTGS_POST_SCAN_TASKLIST	 5
104 #define RTGS_WAIT_SCAN_HOLDOUTS	 6
105 #define RTGS_SCAN_HOLDOUTS	 7
106 #define RTGS_POST_GP		 8
107 #define RTGS_WAIT_READERS	 9
108 #define RTGS_INVOKE_CBS		10
109 #define RTGS_WAIT_CBS		11
110 #ifndef CONFIG_TINY_RCU
111 static const char * const rcu_tasks_gp_state_names[] = {
112 	"RTGS_INIT",
113 	"RTGS_WAIT_WAIT_CBS",
114 	"RTGS_WAIT_GP",
115 	"RTGS_PRE_WAIT_GP",
116 	"RTGS_SCAN_TASKLIST",
117 	"RTGS_POST_SCAN_TASKLIST",
118 	"RTGS_WAIT_SCAN_HOLDOUTS",
119 	"RTGS_SCAN_HOLDOUTS",
120 	"RTGS_POST_GP",
121 	"RTGS_WAIT_READERS",
122 	"RTGS_INVOKE_CBS",
123 	"RTGS_WAIT_CBS",
124 };
125 #endif /* #ifndef CONFIG_TINY_RCU */
126 
127 ////////////////////////////////////////////////////////////////////////
128 //
129 // Generic code.
130 
131 /* Record grace-period phase and time. */
set_tasks_gp_state(struct rcu_tasks * rtp,int newstate)132 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
133 {
134 	rtp->gp_state = newstate;
135 	rtp->gp_jiffies = jiffies;
136 }
137 
138 #ifndef CONFIG_TINY_RCU
139 /* Return state name. */
tasks_gp_state_getname(struct rcu_tasks * rtp)140 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
141 {
142 	int i = data_race(rtp->gp_state); // Let KCSAN detect update races
143 	int j = READ_ONCE(i); // Prevent the compiler from reading twice
144 
145 	if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
146 		return "???";
147 	return rcu_tasks_gp_state_names[j];
148 }
149 #endif /* #ifndef CONFIG_TINY_RCU */
150 
151 // Enqueue a callback for the specified flavor of Tasks RCU.
call_rcu_tasks_generic(struct rcu_head * rhp,rcu_callback_t func,struct rcu_tasks * rtp)152 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
153 				   struct rcu_tasks *rtp)
154 {
155 	unsigned long flags;
156 	bool needwake;
157 
158 	rhp->next = NULL;
159 	rhp->func = func;
160 	raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
161 	needwake = !rtp->cbs_head;
162 	WRITE_ONCE(*rtp->cbs_tail, rhp);
163 	rtp->cbs_tail = &rhp->next;
164 	raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
165 	/* We can't create the thread unless interrupts are enabled. */
166 	if (needwake && READ_ONCE(rtp->kthread_ptr))
167 		wake_up(&rtp->cbs_wq);
168 }
169 
170 // Wait for a grace period for the specified flavor of Tasks RCU.
synchronize_rcu_tasks_generic(struct rcu_tasks * rtp)171 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
172 {
173 	/* Complain if the scheduler has not started.  */
174 	if (WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
175 			 "synchronize_%s() called too soon", rtp->name))
176 		return;
177 
178 	/* Wait for the grace period. */
179 	wait_rcu_gp(rtp->call_func);
180 }
181 
182 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
rcu_tasks_kthread(void * arg)183 static int __noreturn rcu_tasks_kthread(void *arg)
184 {
185 	unsigned long flags;
186 	struct rcu_head *list;
187 	struct rcu_head *next;
188 	struct rcu_tasks *rtp = arg;
189 
190 	/* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
191 	housekeeping_affine(current, HK_FLAG_RCU);
192 	WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
193 
194 	/*
195 	 * Each pass through the following loop makes one check for
196 	 * newly arrived callbacks, and, if there are some, waits for
197 	 * one RCU-tasks grace period and then invokes the callbacks.
198 	 * This loop is terminated by the system going down.  ;-)
199 	 */
200 	for (;;) {
201 		set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
202 
203 		/* Pick up any new callbacks. */
204 		raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
205 		smp_mb__after_spinlock(); // Order updates vs. GP.
206 		list = rtp->cbs_head;
207 		rtp->cbs_head = NULL;
208 		rtp->cbs_tail = &rtp->cbs_head;
209 		raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
210 
211 		/* If there were none, wait a bit and start over. */
212 		if (!list) {
213 			wait_event_interruptible(rtp->cbs_wq,
214 						 READ_ONCE(rtp->cbs_head));
215 			if (!rtp->cbs_head) {
216 				WARN_ON(signal_pending(current));
217 				set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
218 				schedule_timeout_idle(HZ/10);
219 			}
220 			continue;
221 		}
222 
223 		// Wait for one grace period.
224 		set_tasks_gp_state(rtp, RTGS_WAIT_GP);
225 		rtp->gp_start = jiffies;
226 		rtp->gp_func(rtp);
227 		rtp->n_gps++;
228 
229 		/* Invoke the callbacks. */
230 		set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
231 		while (list) {
232 			next = list->next;
233 			local_bh_disable();
234 			list->func(list);
235 			local_bh_enable();
236 			list = next;
237 			cond_resched();
238 		}
239 		/* Paranoid sleep to keep this from entering a tight loop */
240 		schedule_timeout_idle(rtp->gp_sleep);
241 	}
242 }
243 
244 /* Spawn RCU-tasks grace-period kthread. */
rcu_spawn_tasks_kthread_generic(struct rcu_tasks * rtp)245 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
246 {
247 	struct task_struct *t;
248 
249 	t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
250 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
251 		return;
252 	smp_mb(); /* Ensure others see full kthread. */
253 }
254 
255 #ifndef CONFIG_TINY_RCU
256 
257 /*
258  * Print any non-default Tasks RCU settings.
259  */
rcu_tasks_bootup_oddness(void)260 static void __init rcu_tasks_bootup_oddness(void)
261 {
262 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
263 	if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
264 		pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
265 #endif /* #ifdef CONFIG_TASKS_RCU */
266 #ifdef CONFIG_TASKS_RCU
267 	pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
268 #endif /* #ifdef CONFIG_TASKS_RCU */
269 #ifdef CONFIG_TASKS_RUDE_RCU
270 	pr_info("\tRude variant of Tasks RCU enabled.\n");
271 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
272 #ifdef CONFIG_TASKS_TRACE_RCU
273 	pr_info("\tTracing variant of Tasks RCU enabled.\n");
274 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
275 }
276 
277 #endif /* #ifndef CONFIG_TINY_RCU */
278 
279 #ifndef CONFIG_TINY_RCU
280 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
show_rcu_tasks_generic_gp_kthread(struct rcu_tasks * rtp,char * s)281 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
282 {
283 	pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
284 		rtp->kname,
285 		tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
286 		jiffies - data_race(rtp->gp_jiffies),
287 		data_race(rtp->n_gps),
288 		data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
289 		".k"[!!data_race(rtp->kthread_ptr)],
290 		".C"[!!data_race(rtp->cbs_head)],
291 		s);
292 }
293 #endif // #ifndef CONFIG_TINY_RCU
294 
295 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
296 
297 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
298 
299 ////////////////////////////////////////////////////////////////////////
300 //
301 // Shared code between task-list-scanning variants of Tasks RCU.
302 
303 /* Wait for one RCU-tasks grace period. */
rcu_tasks_wait_gp(struct rcu_tasks * rtp)304 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
305 {
306 	struct task_struct *g, *t;
307 	unsigned long lastreport;
308 	LIST_HEAD(holdouts);
309 	int fract;
310 
311 	set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
312 	rtp->pregp_func();
313 
314 	/*
315 	 * There were callbacks, so we need to wait for an RCU-tasks
316 	 * grace period.  Start off by scanning the task list for tasks
317 	 * that are not already voluntarily blocked.  Mark these tasks
318 	 * and make a list of them in holdouts.
319 	 */
320 	set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
321 	rcu_read_lock();
322 	for_each_process_thread(g, t)
323 		rtp->pertask_func(t, &holdouts);
324 	rcu_read_unlock();
325 
326 	set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
327 	rtp->postscan_func(&holdouts);
328 
329 	/*
330 	 * Each pass through the following loop scans the list of holdout
331 	 * tasks, removing any that are no longer holdouts.  When the list
332 	 * is empty, we are done.
333 	 */
334 	lastreport = jiffies;
335 
336 	// Start off with initial wait and slowly back off to 1 HZ wait.
337 	fract = rtp->init_fract;
338 
339 	while (!list_empty(&holdouts)) {
340 		bool firstreport;
341 		bool needreport;
342 		int rtst;
343 
344 		/* Slowly back off waiting for holdouts */
345 		set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
346 		schedule_timeout_idle(fract);
347 
348 		if (fract < HZ)
349 			fract++;
350 
351 		rtst = READ_ONCE(rcu_task_stall_timeout);
352 		needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
353 		if (needreport)
354 			lastreport = jiffies;
355 		firstreport = true;
356 		WARN_ON(signal_pending(current));
357 		set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
358 		rtp->holdouts_func(&holdouts, needreport, &firstreport);
359 	}
360 
361 	set_tasks_gp_state(rtp, RTGS_POST_GP);
362 	rtp->postgp_func(rtp);
363 }
364 
365 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
366 
367 #ifdef CONFIG_TASKS_RCU
368 
369 ////////////////////////////////////////////////////////////////////////
370 //
371 // Simple variant of RCU whose quiescent states are voluntary context
372 // switch, cond_resched_rcu_qs(), user-space execution, and idle.
373 // As such, grace periods can take one good long time.  There are no
374 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
375 // because this implementation is intended to get the system into a safe
376 // state for some of the manipulations involved in tracing and the like.
377 // Finally, this implementation does not support high call_rcu_tasks()
378 // rates from multiple CPUs.  If this is required, per-CPU callback lists
379 // will be needed.
380 //
381 // The implementation uses rcu_tasks_wait_gp(), which relies on function
382 // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_kthread()
383 // function sets these function pointers up so that rcu_tasks_wait_gp()
384 // invokes these functions in this order:
385 //
386 // rcu_tasks_pregp_step():
387 //	Invokes synchronize_rcu() in order to wait for all in-flight
388 //	t->on_rq and t->nvcsw transitions to complete.	This works because
389 //	all such transitions are carried out with interrupts disabled.
390 // rcu_tasks_pertask(), invoked on every non-idle task:
391 //	For every runnable non-idle task other than the current one, use
392 //	get_task_struct() to pin down that task, snapshot that task's
393 //	number of voluntary context switches, and add that task to the
394 //	holdout list.
395 // rcu_tasks_postscan():
396 //	Invoke synchronize_srcu() to ensure that all tasks that were
397 //	in the process of exiting (and which thus might not know to
398 //	synchronize with this RCU Tasks grace period) have completed
399 //	exiting.
400 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
401 //	Scans the holdout list, attempting to identify a quiescent state
402 //	for each task on the list.  If there is a quiescent state, the
403 //	corresponding task is removed from the holdout list.
404 // rcu_tasks_postgp():
405 //	Invokes synchronize_rcu() in order to ensure that all prior
406 //	t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
407 //	to have happened before the end of this RCU Tasks grace period.
408 //	Again, this works because all such transitions are carried out
409 //	with interrupts disabled.
410 //
411 // For each exiting task, the exit_tasks_rcu_start() and
412 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
413 // read-side critical sections waited for by rcu_tasks_postscan().
414 //
415 // Pre-grace-period update-side code is ordered before the grace via the
416 // ->cbs_lock and the smp_mb__after_spinlock().  Pre-grace-period read-side
417 // code is ordered before the grace period via synchronize_rcu() call
418 // in rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
419 // disabling.
420 
421 /* Pre-grace-period preparation. */
rcu_tasks_pregp_step(void)422 static void rcu_tasks_pregp_step(void)
423 {
424 	/*
425 	 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
426 	 * to complete.  Invoking synchronize_rcu() suffices because all
427 	 * these transitions occur with interrupts disabled.  Without this
428 	 * synchronize_rcu(), a read-side critical section that started
429 	 * before the grace period might be incorrectly seen as having
430 	 * started after the grace period.
431 	 *
432 	 * This synchronize_rcu() also dispenses with the need for a
433 	 * memory barrier on the first store to t->rcu_tasks_holdout,
434 	 * as it forces the store to happen after the beginning of the
435 	 * grace period.
436 	 */
437 	synchronize_rcu();
438 }
439 
440 /* Per-task initial processing. */
rcu_tasks_pertask(struct task_struct * t,struct list_head * hop)441 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
442 {
443 	if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
444 		get_task_struct(t);
445 		t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
446 		WRITE_ONCE(t->rcu_tasks_holdout, true);
447 		list_add(&t->rcu_tasks_holdout_list, hop);
448 	}
449 }
450 
451 /* Processing between scanning taskslist and draining the holdout list. */
rcu_tasks_postscan(struct list_head * hop)452 static void rcu_tasks_postscan(struct list_head *hop)
453 {
454 	/*
455 	 * Exiting tasks may escape the tasklist scan. Those are vulnerable
456 	 * until their final schedule() with TASK_DEAD state. To cope with
457 	 * this, divide the fragile exit path part in two intersecting
458 	 * read side critical sections:
459 	 *
460 	 * 1) An _SRCU_ read side starting before calling exit_notify(),
461 	 *    which may remove the task from the tasklist, and ending after
462 	 *    the final preempt_disable() call in do_exit().
463 	 *
464 	 * 2) An _RCU_ read side starting with the final preempt_disable()
465 	 *    call in do_exit() and ending with the final call to schedule()
466 	 *    with TASK_DEAD state.
467 	 *
468 	 * This handles the part 1). And postgp will handle part 2) with a
469 	 * call to synchronize_rcu().
470 	 */
471 	synchronize_srcu(&tasks_rcu_exit_srcu);
472 }
473 
474 /* See if tasks are still holding out, complain if so. */
check_holdout_task(struct task_struct * t,bool needreport,bool * firstreport)475 static void check_holdout_task(struct task_struct *t,
476 			       bool needreport, bool *firstreport)
477 {
478 	int cpu;
479 
480 	if (!READ_ONCE(t->rcu_tasks_holdout) ||
481 	    t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
482 	    !READ_ONCE(t->on_rq) ||
483 	    (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
484 	     !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
485 		WRITE_ONCE(t->rcu_tasks_holdout, false);
486 		list_del_init(&t->rcu_tasks_holdout_list);
487 		put_task_struct(t);
488 		return;
489 	}
490 	rcu_request_urgent_qs_task(t);
491 	if (!needreport)
492 		return;
493 	if (*firstreport) {
494 		pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
495 		*firstreport = false;
496 	}
497 	cpu = task_cpu(t);
498 	pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
499 		 t, ".I"[is_idle_task(t)],
500 		 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
501 		 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
502 		 t->rcu_tasks_idle_cpu, cpu);
503 	sched_show_task(t);
504 }
505 
506 /* Scan the holdout lists for tasks no longer holding out. */
check_all_holdout_tasks(struct list_head * hop,bool needreport,bool * firstreport)507 static void check_all_holdout_tasks(struct list_head *hop,
508 				    bool needreport, bool *firstreport)
509 {
510 	struct task_struct *t, *t1;
511 
512 	list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
513 		check_holdout_task(t, needreport, firstreport);
514 		cond_resched();
515 	}
516 }
517 
518 /* Finish off the Tasks-RCU grace period. */
rcu_tasks_postgp(struct rcu_tasks * rtp)519 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
520 {
521 	/*
522 	 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
523 	 * memory barriers prior to them in the schedule() path, memory
524 	 * reordering on other CPUs could cause their RCU-tasks read-side
525 	 * critical sections to extend past the end of the grace period.
526 	 * However, because these ->nvcsw updates are carried out with
527 	 * interrupts disabled, we can use synchronize_rcu() to force the
528 	 * needed ordering on all such CPUs.
529 	 *
530 	 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
531 	 * accesses to be within the grace period, avoiding the need for
532 	 * memory barriers for ->rcu_tasks_holdout accesses.
533 	 *
534 	 * In addition, this synchronize_rcu() waits for exiting tasks
535 	 * to complete their final preempt_disable() region of execution,
536 	 * cleaning up after synchronize_srcu(&tasks_rcu_exit_srcu),
537 	 * enforcing the whole region before tasklist removal until
538 	 * the final schedule() with TASK_DEAD state to be an RCU TASKS
539 	 * read side critical section.
540 	 */
541 	synchronize_rcu();
542 }
543 
544 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
545 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
546 
547 /**
548  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
549  * @rhp: structure to be used for queueing the RCU updates.
550  * @func: actual callback function to be invoked after the grace period
551  *
552  * The callback function will be invoked some time after a full grace
553  * period elapses, in other words after all currently executing RCU
554  * read-side critical sections have completed. call_rcu_tasks() assumes
555  * that the read-side critical sections end at a voluntary context
556  * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
557  * or transition to usermode execution.  As such, there are no read-side
558  * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
559  * this primitive is intended to determine that all tasks have passed
560  * through a safe state, not so much for data-structure synchronization.
561  *
562  * See the description of call_rcu() for more detailed information on
563  * memory ordering guarantees.
564  */
call_rcu_tasks(struct rcu_head * rhp,rcu_callback_t func)565 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
566 {
567 	call_rcu_tasks_generic(rhp, func, &rcu_tasks);
568 }
569 EXPORT_SYMBOL_GPL(call_rcu_tasks);
570 
571 /**
572  * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
573  *
574  * Control will return to the caller some time after a full rcu-tasks
575  * grace period has elapsed, in other words after all currently
576  * executing rcu-tasks read-side critical sections have elapsed.  These
577  * read-side critical sections are delimited by calls to schedule(),
578  * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
579  * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
580  *
581  * This is a very specialized primitive, intended only for a few uses in
582  * tracing and other situations requiring manipulation of function
583  * preambles and profiling hooks.  The synchronize_rcu_tasks() function
584  * is not (yet) intended for heavy use from multiple CPUs.
585  *
586  * See the description of synchronize_rcu() for more detailed information
587  * on memory ordering guarantees.
588  */
synchronize_rcu_tasks(void)589 void synchronize_rcu_tasks(void)
590 {
591 	synchronize_rcu_tasks_generic(&rcu_tasks);
592 }
593 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
594 
595 /**
596  * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
597  *
598  * Although the current implementation is guaranteed to wait, it is not
599  * obligated to, for example, if there are no pending callbacks.
600  */
rcu_barrier_tasks(void)601 void rcu_barrier_tasks(void)
602 {
603 	/* There is only one callback queue, so this is easy.  ;-) */
604 	synchronize_rcu_tasks();
605 }
606 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
607 
rcu_spawn_tasks_kthread(void)608 static int __init rcu_spawn_tasks_kthread(void)
609 {
610 	rcu_tasks.gp_sleep = HZ / 10;
611 	rcu_tasks.init_fract = HZ / 10;
612 	rcu_tasks.pregp_func = rcu_tasks_pregp_step;
613 	rcu_tasks.pertask_func = rcu_tasks_pertask;
614 	rcu_tasks.postscan_func = rcu_tasks_postscan;
615 	rcu_tasks.holdouts_func = check_all_holdout_tasks;
616 	rcu_tasks.postgp_func = rcu_tasks_postgp;
617 	rcu_spawn_tasks_kthread_generic(&rcu_tasks);
618 	return 0;
619 }
620 
621 #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_classic_gp_kthread(void)622 void show_rcu_tasks_classic_gp_kthread(void)
623 {
624 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
625 }
626 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
627 #endif // !defined(CONFIG_TINY_RCU)
628 
629 /*
630  * Contribute to protect against tasklist scan blind spot while the
631  * task is exiting and may be removed from the tasklist. See
632  * corresponding synchronize_srcu() for further details.
633  */
exit_tasks_rcu_start(void)634 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
635 {
636 	current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
637 }
638 
639 /*
640  * Contribute to protect against tasklist scan blind spot while the
641  * task is exiting and may be removed from the tasklist. See
642  * corresponding synchronize_srcu() for further details.
643  */
exit_tasks_rcu_stop(void)644 void exit_tasks_rcu_stop(void) __releases(&tasks_rcu_exit_srcu)
645 {
646 	struct task_struct *t = current;
647 
648 	__srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
649 }
650 
651 /*
652  * Contribute to protect against tasklist scan blind spot while the
653  * task is exiting and may be removed from the tasklist. See
654  * corresponding synchronize_srcu() for further details.
655  */
exit_tasks_rcu_finish(void)656 void exit_tasks_rcu_finish(void)
657 {
658 	exit_tasks_rcu_stop();
659 	exit_tasks_rcu_finish_trace(current);
660 }
661 
662 #else /* #ifdef CONFIG_TASKS_RCU */
exit_tasks_rcu_start(void)663 void exit_tasks_rcu_start(void) { }
exit_tasks_rcu_stop(void)664 void exit_tasks_rcu_stop(void) { }
exit_tasks_rcu_finish(void)665 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
666 #endif /* #else #ifdef CONFIG_TASKS_RCU */
667 
668 #ifdef CONFIG_TASKS_RUDE_RCU
669 
670 ////////////////////////////////////////////////////////////////////////
671 //
672 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
673 // passing an empty function to schedule_on_each_cpu().  This approach
674 // provides an asynchronous call_rcu_tasks_rude() API and batching of
675 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
676 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
677 // and induces otherwise unnecessary context switches on all online CPUs,
678 // whether idle or not.
679 //
680 // Callback handling is provided by the rcu_tasks_kthread() function.
681 //
682 // Ordering is provided by the scheduler's context-switch code.
683 
684 // Empty function to allow workqueues to force a context switch.
rcu_tasks_be_rude(struct work_struct * work)685 static void rcu_tasks_be_rude(struct work_struct *work)
686 {
687 }
688 
689 // Wait for one rude RCU-tasks grace period.
rcu_tasks_rude_wait_gp(struct rcu_tasks * rtp)690 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
691 {
692 	rtp->n_ipis += cpumask_weight(cpu_online_mask);
693 	schedule_on_each_cpu(rcu_tasks_be_rude);
694 }
695 
696 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
697 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
698 		 "RCU Tasks Rude");
699 
700 /**
701  * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
702  * @rhp: structure to be used for queueing the RCU updates.
703  * @func: actual callback function to be invoked after the grace period
704  *
705  * The callback function will be invoked some time after a full grace
706  * period elapses, in other words after all currently executing RCU
707  * read-side critical sections have completed. call_rcu_tasks_rude()
708  * assumes that the read-side critical sections end at context switch,
709  * cond_resched_rcu_qs(), or transition to usermode execution.  As such,
710  * there are no read-side primitives analogous to rcu_read_lock() and
711  * rcu_read_unlock() because this primitive is intended to determine
712  * that all tasks have passed through a safe state, not so much for
713  * data-structure synchronization.
714  *
715  * See the description of call_rcu() for more detailed information on
716  * memory ordering guarantees.
717  */
call_rcu_tasks_rude(struct rcu_head * rhp,rcu_callback_t func)718 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
719 {
720 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
721 }
722 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
723 
724 /**
725  * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
726  *
727  * Control will return to the caller some time after a rude rcu-tasks
728  * grace period has elapsed, in other words after all currently
729  * executing rcu-tasks read-side critical sections have elapsed.  These
730  * read-side critical sections are delimited by calls to schedule(),
731  * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
732  * anyway) cond_resched().
733  *
734  * This is a very specialized primitive, intended only for a few uses in
735  * tracing and other situations requiring manipulation of function preambles
736  * and profiling hooks.  The synchronize_rcu_tasks_rude() function is not
737  * (yet) intended for heavy use from multiple CPUs.
738  *
739  * See the description of synchronize_rcu() for more detailed information
740  * on memory ordering guarantees.
741  */
synchronize_rcu_tasks_rude(void)742 void synchronize_rcu_tasks_rude(void)
743 {
744 	synchronize_rcu_tasks_generic(&rcu_tasks_rude);
745 }
746 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
747 
748 /**
749  * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
750  *
751  * Although the current implementation is guaranteed to wait, it is not
752  * obligated to, for example, if there are no pending callbacks.
753  */
rcu_barrier_tasks_rude(void)754 void rcu_barrier_tasks_rude(void)
755 {
756 	/* There is only one callback queue, so this is easy.  ;-) */
757 	synchronize_rcu_tasks_rude();
758 }
759 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
760 
rcu_spawn_tasks_rude_kthread(void)761 static int __init rcu_spawn_tasks_rude_kthread(void)
762 {
763 	rcu_tasks_rude.gp_sleep = HZ / 10;
764 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
765 	return 0;
766 }
767 
768 #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_rude_gp_kthread(void)769 void show_rcu_tasks_rude_gp_kthread(void)
770 {
771 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
772 }
773 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
774 #endif // !defined(CONFIG_TINY_RCU)
775 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
776 
777 ////////////////////////////////////////////////////////////////////////
778 //
779 // Tracing variant of Tasks RCU.  This variant is designed to be used
780 // to protect tracing hooks, including those of BPF.  This variant
781 // therefore:
782 //
783 // 1.	Has explicit read-side markers to allow finite grace periods
784 //	in the face of in-kernel loops for PREEMPT=n builds.
785 //
786 // 2.	Protects code in the idle loop, exception entry/exit, and
787 //	CPU-hotplug code paths, similar to the capabilities of SRCU.
788 //
789 // 3.	Avoids expensive read-side instruction, having overhead similar
790 //	to that of Preemptible RCU.
791 //
792 // There are of course downsides.  The grace-period code can send IPIs to
793 // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
794 // It is necessary to scan the full tasklist, much as for Tasks RCU.  There
795 // is a single callback queue guarded by a single lock, again, much as for
796 // Tasks RCU.  If needed, these downsides can be at least partially remedied.
797 //
798 // Perhaps most important, this variant of RCU does not affect the vanilla
799 // flavors, rcu_preempt and rcu_sched.  The fact that RCU Tasks Trace
800 // readers can operate from idle, offline, and exception entry/exit in no
801 // way allows rcu_preempt and rcu_sched readers to also do so.
802 //
803 // The implementation uses rcu_tasks_wait_gp(), which relies on function
804 // pointers in the rcu_tasks structure.  The rcu_spawn_tasks_trace_kthread()
805 // function sets these function pointers up so that rcu_tasks_wait_gp()
806 // invokes these functions in this order:
807 //
808 // rcu_tasks_trace_pregp_step():
809 //	Initialize the count of readers and block CPU-hotplug operations.
810 // rcu_tasks_trace_pertask(), invoked on every non-idle task:
811 //	Initialize per-task state and attempt to identify an immediate
812 //	quiescent state for that task, or, failing that, attempt to
813 //	set that task's .need_qs flag so that task's next outermost
814 //	rcu_read_unlock_trace() will report the quiescent state (in which
815 //	case the count of readers is incremented).  If both attempts fail,
816 //	the task is added to a "holdout" list.  Note that IPIs are used
817 //	to invoke trc_read_check_handler() in the context of running tasks
818 //	in order to avoid ordering overhead on common-case shared-variable
819 //	accessses.
820 // rcu_tasks_trace_postscan():
821 //	Initialize state and attempt to identify an immediate quiescent
822 //	state as above (but only for idle tasks), unblock CPU-hotplug
823 //	operations, and wait for an RCU grace period to avoid races with
824 //	tasks that are in the process of exiting.
825 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
826 //	Scans the holdout list, attempting to identify a quiescent state
827 //	for each task on the list.  If there is a quiescent state, the
828 //	corresponding task is removed from the holdout list.
829 // rcu_tasks_trace_postgp():
830 //	Wait for the count of readers do drop to zero, reporting any stalls.
831 //	Also execute full memory barriers to maintain ordering with code
832 //	executing after the grace period.
833 //
834 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
835 //
836 // Pre-grace-period update-side code is ordered before the grace
837 // period via the ->cbs_lock and barriers in rcu_tasks_kthread().
838 // Pre-grace-period read-side code is ordered before the grace period by
839 // atomic_dec_and_test() of the count of readers (for IPIed readers) and by
840 // scheduler context-switch ordering (for locked-down non-running readers).
841 
842 // The lockdep state must be outside of #ifdef to be useful.
843 #ifdef CONFIG_DEBUG_LOCK_ALLOC
844 static struct lock_class_key rcu_lock_trace_key;
845 struct lockdep_map rcu_trace_lock_map =
846 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
847 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
848 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
849 
850 #ifdef CONFIG_TASKS_TRACE_RCU
851 
852 static atomic_t trc_n_readers_need_end;		// Number of waited-for readers.
853 static DECLARE_WAIT_QUEUE_HEAD(trc_wait);	// List of holdout tasks.
854 
855 // Record outstanding IPIs to each CPU.  No point in sending two...
856 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
857 
858 // The number of detections of task quiescent state relying on
859 // heavyweight readers executing explicit memory barriers.
860 static unsigned long n_heavy_reader_attempts;
861 static unsigned long n_heavy_reader_updates;
862 static unsigned long n_heavy_reader_ofl_updates;
863 
864 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
865 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
866 		 "RCU Tasks Trace");
867 
868 /*
869  * This irq_work handler allows rcu_read_unlock_trace() to be invoked
870  * while the scheduler locks are held.
871  */
rcu_read_unlock_iw(struct irq_work * iwp)872 static void rcu_read_unlock_iw(struct irq_work *iwp)
873 {
874 	wake_up(&trc_wait);
875 }
876 static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
877 
878 /* If we are the last reader, wake up the grace-period kthread. */
rcu_read_unlock_trace_special(struct task_struct * t,int nesting)879 void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
880 {
881 	int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
882 
883 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
884 	    t->trc_reader_special.b.need_mb)
885 		smp_mb(); // Pairs with update-side barriers.
886 	// Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
887 	if (nq)
888 		WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
889 	WRITE_ONCE(t->trc_reader_nesting, nesting);
890 	if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
891 		irq_work_queue(&rcu_tasks_trace_iw);
892 }
893 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
894 
895 /* Add a task to the holdout list, if it is not already on the list. */
trc_add_holdout(struct task_struct * t,struct list_head * bhp)896 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
897 {
898 	if (list_empty(&t->trc_holdout_list)) {
899 		get_task_struct(t);
900 		list_add(&t->trc_holdout_list, bhp);
901 	}
902 }
903 
904 /* Remove a task from the holdout list, if it is in fact present. */
trc_del_holdout(struct task_struct * t)905 static void trc_del_holdout(struct task_struct *t)
906 {
907 	if (!list_empty(&t->trc_holdout_list)) {
908 		list_del_init(&t->trc_holdout_list);
909 		put_task_struct(t);
910 	}
911 }
912 
913 /* IPI handler to check task state. */
trc_read_check_handler(void * t_in)914 static void trc_read_check_handler(void *t_in)
915 {
916 	struct task_struct *t = current;
917 	struct task_struct *texp = t_in;
918 
919 	// If the task is no longer running on this CPU, leave.
920 	if (unlikely(texp != t)) {
921 		goto reset_ipi; // Already on holdout list, so will check later.
922 	}
923 
924 	// If the task is not in a read-side critical section, and
925 	// if this is the last reader, awaken the grace-period kthread.
926 	if (likely(!READ_ONCE(t->trc_reader_nesting))) {
927 		WRITE_ONCE(t->trc_reader_checked, true);
928 		goto reset_ipi;
929 	}
930 	// If we are racing with an rcu_read_unlock_trace(), try again later.
931 	if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
932 		goto reset_ipi;
933 	WRITE_ONCE(t->trc_reader_checked, true);
934 
935 	// Get here if the task is in a read-side critical section.  Set
936 	// its state so that it will awaken the grace-period kthread upon
937 	// exit from that critical section.
938 	atomic_inc(&trc_n_readers_need_end); // One more to wait on.
939 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
940 	WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
941 
942 reset_ipi:
943 	// Allow future IPIs to be sent on CPU and for task.
944 	// Also order this IPI handler against any later manipulations of
945 	// the intended task.
946 	smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
947 	smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
948 }
949 
950 /* Callback function for scheduler to check locked-down task.  */
trc_inspect_reader(struct task_struct * t,void * arg)951 static bool trc_inspect_reader(struct task_struct *t, void *arg)
952 {
953 	int cpu = task_cpu(t);
954 	int nesting;
955 	bool ofl = cpu_is_offline(cpu);
956 
957 	if (task_curr(t)) {
958 		WARN_ON_ONCE(ofl && !is_idle_task(t));
959 
960 		// If no chance of heavyweight readers, do it the hard way.
961 		if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
962 			return false;
963 
964 		// If heavyweight readers are enabled on the remote task,
965 		// we can inspect its state despite its currently running.
966 		// However, we cannot safely change its state.
967 		n_heavy_reader_attempts++;
968 		if (!ofl && // Check for "running" idle tasks on offline CPUs.
969 		    !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
970 			return false; // No quiescent state, do it the hard way.
971 		n_heavy_reader_updates++;
972 		if (ofl)
973 			n_heavy_reader_ofl_updates++;
974 		nesting = 0;
975 	} else {
976 		// The task is not running, so C-language access is safe.
977 		nesting = t->trc_reader_nesting;
978 	}
979 
980 	// If not exiting a read-side critical section, mark as checked
981 	// so that the grace-period kthread will remove it from the
982 	// holdout list.
983 	t->trc_reader_checked = nesting >= 0;
984 	if (nesting <= 0)
985 		return !nesting;  // If in QS, done, otherwise try again later.
986 
987 	// The task is in a read-side critical section, so set up its
988 	// state so that it will awaken the grace-period kthread upon exit
989 	// from that critical section.
990 	atomic_inc(&trc_n_readers_need_end); // One more to wait on.
991 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
992 	WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
993 	return true;
994 }
995 
996 /* Attempt to extract the state for the specified task. */
trc_wait_for_one_reader(struct task_struct * t,struct list_head * bhp)997 static void trc_wait_for_one_reader(struct task_struct *t,
998 				    struct list_head *bhp)
999 {
1000 	int cpu;
1001 
1002 	// If a previous IPI is still in flight, let it complete.
1003 	if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
1004 		return;
1005 
1006 	// The current task had better be in a quiescent state.
1007 	if (t == current) {
1008 		t->trc_reader_checked = true;
1009 		WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1010 		return;
1011 	}
1012 
1013 	// Attempt to nail down the task for inspection.
1014 	get_task_struct(t);
1015 	if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
1016 		put_task_struct(t);
1017 		return;
1018 	}
1019 	put_task_struct(t);
1020 
1021 	// If this task is not yet on the holdout list, then we are in
1022 	// an RCU read-side critical section.  Otherwise, the invocation of
1023 	// rcu_add_holdout() that added it to the list did the necessary
1024 	// get_task_struct().  Either way, the task cannot be freed out
1025 	// from under this code.
1026 
1027 	// If currently running, send an IPI, either way, add to list.
1028 	trc_add_holdout(t, bhp);
1029 	if (task_curr(t) &&
1030 	    time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1031 		// The task is currently running, so try IPIing it.
1032 		cpu = task_cpu(t);
1033 
1034 		// If there is already an IPI outstanding, let it happen.
1035 		if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1036 			return;
1037 
1038 		per_cpu(trc_ipi_to_cpu, cpu) = true;
1039 		t->trc_ipi_to_cpu = cpu;
1040 		rcu_tasks_trace.n_ipis++;
1041 		if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1042 			// Just in case there is some other reason for
1043 			// failure than the target CPU being offline.
1044 			WARN_ONCE(1, "%s():  smp_call_function_single() failed for CPU: %d\n",
1045 				  __func__, cpu);
1046 			rcu_tasks_trace.n_ipis_fails++;
1047 			per_cpu(trc_ipi_to_cpu, cpu) = false;
1048 			t->trc_ipi_to_cpu = -1;
1049 		}
1050 	}
1051 }
1052 
1053 /* Initialize for a new RCU-tasks-trace grace period. */
rcu_tasks_trace_pregp_step(void)1054 static void rcu_tasks_trace_pregp_step(void)
1055 {
1056 	int cpu;
1057 
1058 	// Allow for fast-acting IPIs.
1059 	atomic_set(&trc_n_readers_need_end, 1);
1060 
1061 	// There shouldn't be any old IPIs, but...
1062 	for_each_possible_cpu(cpu)
1063 		WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1064 
1065 	// Disable CPU hotplug across the tasklist scan.
1066 	// This also waits for all readers in CPU-hotplug code paths.
1067 	cpus_read_lock();
1068 }
1069 
1070 /* Do first-round processing for the specified task. */
rcu_tasks_trace_pertask(struct task_struct * t,struct list_head * hop)1071 static void rcu_tasks_trace_pertask(struct task_struct *t,
1072 				    struct list_head *hop)
1073 {
1074 	// During early boot when there is only the one boot CPU, there
1075 	// is no idle task for the other CPUs. Just return.
1076 	if (unlikely(t == NULL))
1077 		return;
1078 
1079 	WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1080 	WRITE_ONCE(t->trc_reader_checked, false);
1081 	t->trc_ipi_to_cpu = -1;
1082 	trc_wait_for_one_reader(t, hop);
1083 }
1084 
1085 /*
1086  * Do intermediate processing between task and holdout scans and
1087  * pick up the idle tasks.
1088  */
rcu_tasks_trace_postscan(struct list_head * hop)1089 static void rcu_tasks_trace_postscan(struct list_head *hop)
1090 {
1091 	int cpu;
1092 
1093 	for_each_possible_cpu(cpu)
1094 		rcu_tasks_trace_pertask(idle_task(cpu), hop);
1095 
1096 	// Re-enable CPU hotplug now that the tasklist scan has completed.
1097 	cpus_read_unlock();
1098 
1099 	// Wait for late-stage exiting tasks to finish exiting.
1100 	// These might have passed the call to exit_tasks_rcu_finish().
1101 	synchronize_rcu();
1102 	// Any tasks that exit after this point will set ->trc_reader_checked.
1103 }
1104 
1105 /* Show the state of a task stalling the current RCU tasks trace GP. */
show_stalled_task_trace(struct task_struct * t,bool * firstreport)1106 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1107 {
1108 	int cpu;
1109 
1110 	if (*firstreport) {
1111 		pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1112 		*firstreport = false;
1113 	}
1114 	// FIXME: This should attempt to use try_invoke_on_nonrunning_task().
1115 	cpu = task_cpu(t);
1116 	pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1117 		 t->pid,
1118 		 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
1119 		 ".i"[is_idle_task(t)],
1120 		 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
1121 		 READ_ONCE(t->trc_reader_nesting),
1122 		 " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)],
1123 		 cpu);
1124 	sched_show_task(t);
1125 }
1126 
1127 /* List stalled IPIs for RCU tasks trace. */
show_stalled_ipi_trace(void)1128 static void show_stalled_ipi_trace(void)
1129 {
1130 	int cpu;
1131 
1132 	for_each_possible_cpu(cpu)
1133 		if (per_cpu(trc_ipi_to_cpu, cpu))
1134 			pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1135 }
1136 
1137 /* Do one scan of the holdout list. */
check_all_holdout_tasks_trace(struct list_head * hop,bool needreport,bool * firstreport)1138 static void check_all_holdout_tasks_trace(struct list_head *hop,
1139 					  bool needreport, bool *firstreport)
1140 {
1141 	struct task_struct *g, *t;
1142 
1143 	// Disable CPU hotplug across the holdout list scan.
1144 	cpus_read_lock();
1145 
1146 	list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1147 		// If safe and needed, try to check the current task.
1148 		if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1149 		    !READ_ONCE(t->trc_reader_checked))
1150 			trc_wait_for_one_reader(t, hop);
1151 
1152 		// If check succeeded, remove this task from the list.
1153 		if (READ_ONCE(t->trc_reader_checked))
1154 			trc_del_holdout(t);
1155 		else if (needreport)
1156 			show_stalled_task_trace(t, firstreport);
1157 	}
1158 
1159 	// Re-enable CPU hotplug now that the holdout list scan has completed.
1160 	cpus_read_unlock();
1161 
1162 	if (needreport) {
1163 		if (firstreport)
1164 			pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1165 		show_stalled_ipi_trace();
1166 	}
1167 }
1168 
rcu_tasks_trace_empty_fn(void * unused)1169 static void rcu_tasks_trace_empty_fn(void *unused)
1170 {
1171 }
1172 
1173 /* Wait for grace period to complete and provide ordering. */
rcu_tasks_trace_postgp(struct rcu_tasks * rtp)1174 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1175 {
1176 	int cpu;
1177 	bool firstreport;
1178 	struct task_struct *g, *t;
1179 	LIST_HEAD(holdouts);
1180 	long ret;
1181 
1182 	// Wait for any lingering IPI handlers to complete.  Note that
1183 	// if a CPU has gone offline or transitioned to userspace in the
1184 	// meantime, all IPI handlers should have been drained beforehand.
1185 	// Yes, this assumes that CPUs process IPIs in order.  If that ever
1186 	// changes, there will need to be a recheck and/or timed wait.
1187 	for_each_online_cpu(cpu)
1188 		if (smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))
1189 			smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1190 
1191 	// Remove the safety count.
1192 	smp_mb__before_atomic();  // Order vs. earlier atomics
1193 	atomic_dec(&trc_n_readers_need_end);
1194 	smp_mb__after_atomic();  // Order vs. later atomics
1195 
1196 	// Wait for readers.
1197 	set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
1198 	for (;;) {
1199 		ret = wait_event_idle_exclusive_timeout(
1200 				trc_wait,
1201 				atomic_read(&trc_n_readers_need_end) == 0,
1202 				READ_ONCE(rcu_task_stall_timeout));
1203 		if (ret)
1204 			break;  // Count reached zero.
1205 		// Stall warning time, so make a list of the offenders.
1206 		rcu_read_lock();
1207 		for_each_process_thread(g, t)
1208 			if (READ_ONCE(t->trc_reader_special.b.need_qs))
1209 				trc_add_holdout(t, &holdouts);
1210 		rcu_read_unlock();
1211 		firstreport = true;
1212 		list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1213 			if (READ_ONCE(t->trc_reader_special.b.need_qs))
1214 				show_stalled_task_trace(t, &firstreport);
1215 			trc_del_holdout(t); // Release task_struct reference.
1216 		}
1217 		if (firstreport)
1218 			pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1219 		show_stalled_ipi_trace();
1220 		pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1221 	}
1222 	smp_mb(); // Caller's code must be ordered after wakeup.
1223 		  // Pairs with pretty much every ordering primitive.
1224 }
1225 
1226 /* Report any needed quiescent state for this exiting task. */
exit_tasks_rcu_finish_trace(struct task_struct * t)1227 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1228 {
1229 	WRITE_ONCE(t->trc_reader_checked, true);
1230 	WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1231 	WRITE_ONCE(t->trc_reader_nesting, 0);
1232 	if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1233 		rcu_read_unlock_trace_special(t, 0);
1234 }
1235 
1236 /**
1237  * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1238  * @rhp: structure to be used for queueing the RCU updates.
1239  * @func: actual callback function to be invoked after the grace period
1240  *
1241  * The callback function will be invoked some time after a full grace
1242  * period elapses, in other words after all currently executing RCU
1243  * read-side critical sections have completed. call_rcu_tasks_trace()
1244  * assumes that the read-side critical sections end at context switch,
1245  * cond_resched_rcu_qs(), or transition to usermode execution.  As such,
1246  * there are no read-side primitives analogous to rcu_read_lock() and
1247  * rcu_read_unlock() because this primitive is intended to determine
1248  * that all tasks have passed through a safe state, not so much for
1249  * data-structure synchronization.
1250  *
1251  * See the description of call_rcu() for more detailed information on
1252  * memory ordering guarantees.
1253  */
call_rcu_tasks_trace(struct rcu_head * rhp,rcu_callback_t func)1254 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1255 {
1256 	call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1257 }
1258 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1259 
1260 /**
1261  * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1262  *
1263  * Control will return to the caller some time after a trace rcu-tasks
1264  * grace period has elapsed, in other words after all currently executing
1265  * rcu-tasks read-side critical sections have elapsed.  These read-side
1266  * critical sections are delimited by calls to rcu_read_lock_trace()
1267  * and rcu_read_unlock_trace().
1268  *
1269  * This is a very specialized primitive, intended only for a few uses in
1270  * tracing and other situations requiring manipulation of function preambles
1271  * and profiling hooks.  The synchronize_rcu_tasks_trace() function is not
1272  * (yet) intended for heavy use from multiple CPUs.
1273  *
1274  * See the description of synchronize_rcu() for more detailed information
1275  * on memory ordering guarantees.
1276  */
synchronize_rcu_tasks_trace(void)1277 void synchronize_rcu_tasks_trace(void)
1278 {
1279 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1280 	synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1281 }
1282 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1283 
1284 /**
1285  * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1286  *
1287  * Although the current implementation is guaranteed to wait, it is not
1288  * obligated to, for example, if there are no pending callbacks.
1289  */
rcu_barrier_tasks_trace(void)1290 void rcu_barrier_tasks_trace(void)
1291 {
1292 	/* There is only one callback queue, so this is easy.  ;-) */
1293 	synchronize_rcu_tasks_trace();
1294 }
1295 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1296 
rcu_spawn_tasks_trace_kthread(void)1297 static int __init rcu_spawn_tasks_trace_kthread(void)
1298 {
1299 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1300 		rcu_tasks_trace.gp_sleep = HZ / 10;
1301 		rcu_tasks_trace.init_fract = HZ / 10;
1302 	} else {
1303 		rcu_tasks_trace.gp_sleep = HZ / 200;
1304 		if (rcu_tasks_trace.gp_sleep <= 0)
1305 			rcu_tasks_trace.gp_sleep = 1;
1306 		rcu_tasks_trace.init_fract = HZ / 200;
1307 		if (rcu_tasks_trace.init_fract <= 0)
1308 			rcu_tasks_trace.init_fract = 1;
1309 	}
1310 	rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1311 	rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1312 	rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1313 	rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1314 	rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1315 	rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1316 	return 0;
1317 }
1318 
1319 #if !defined(CONFIG_TINY_RCU)
show_rcu_tasks_trace_gp_kthread(void)1320 void show_rcu_tasks_trace_gp_kthread(void)
1321 {
1322 	char buf[64];
1323 
1324 	sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1325 		data_race(n_heavy_reader_ofl_updates),
1326 		data_race(n_heavy_reader_updates),
1327 		data_race(n_heavy_reader_attempts));
1328 	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1329 }
1330 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1331 #endif // !defined(CONFIG_TINY_RCU)
1332 
1333 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
exit_tasks_rcu_finish_trace(struct task_struct * t)1334 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1335 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1336 
1337 #ifndef CONFIG_TINY_RCU
show_rcu_tasks_gp_kthreads(void)1338 void show_rcu_tasks_gp_kthreads(void)
1339 {
1340 	show_rcu_tasks_classic_gp_kthread();
1341 	show_rcu_tasks_rude_gp_kthread();
1342 	show_rcu_tasks_trace_gp_kthread();
1343 }
1344 #endif /* #ifndef CONFIG_TINY_RCU */
1345 
1346 #ifdef CONFIG_PROVE_RCU
1347 struct rcu_tasks_test_desc {
1348 	struct rcu_head rh;
1349 	const char *name;
1350 	bool notrun;
1351 };
1352 
1353 static struct rcu_tasks_test_desc tests[] = {
1354 	{
1355 		.name = "call_rcu_tasks()",
1356 		/* If not defined, the test is skipped. */
1357 		.notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1358 	},
1359 	{
1360 		.name = "call_rcu_tasks_rude()",
1361 		/* If not defined, the test is skipped. */
1362 		.notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1363 	},
1364 	{
1365 		.name = "call_rcu_tasks_trace()",
1366 		/* If not defined, the test is skipped. */
1367 		.notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1368 	}
1369 };
1370 
test_rcu_tasks_callback(struct rcu_head * rhp)1371 static void test_rcu_tasks_callback(struct rcu_head *rhp)
1372 {
1373 	struct rcu_tasks_test_desc *rttd =
1374 		container_of(rhp, struct rcu_tasks_test_desc, rh);
1375 
1376 	pr_info("Callback from %s invoked.\n", rttd->name);
1377 
1378 	rttd->notrun = true;
1379 }
1380 
rcu_tasks_initiate_self_tests(void)1381 static void rcu_tasks_initiate_self_tests(void)
1382 {
1383 	pr_info("Running RCU-tasks wait API self tests\n");
1384 #ifdef CONFIG_TASKS_RCU
1385 	synchronize_rcu_tasks();
1386 	call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1387 #endif
1388 
1389 #ifdef CONFIG_TASKS_RUDE_RCU
1390 	synchronize_rcu_tasks_rude();
1391 	call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1392 #endif
1393 
1394 #ifdef CONFIG_TASKS_TRACE_RCU
1395 	synchronize_rcu_tasks_trace();
1396 	call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1397 #endif
1398 }
1399 
rcu_tasks_verify_self_tests(void)1400 static int rcu_tasks_verify_self_tests(void)
1401 {
1402 	int ret = 0;
1403 	int i;
1404 
1405 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
1406 		if (!tests[i].notrun) {		// still hanging.
1407 			pr_err("%s has been failed.\n", tests[i].name);
1408 			ret = -1;
1409 		}
1410 	}
1411 
1412 	if (ret)
1413 		WARN_ON(1);
1414 
1415 	return ret;
1416 }
1417 late_initcall(rcu_tasks_verify_self_tests);
1418 #else /* #ifdef CONFIG_PROVE_RCU */
rcu_tasks_initiate_self_tests(void)1419 static void rcu_tasks_initiate_self_tests(void) { }
1420 #endif /* #else #ifdef CONFIG_PROVE_RCU */
1421 
rcu_init_tasks_generic(void)1422 void __init rcu_init_tasks_generic(void)
1423 {
1424 #ifdef CONFIG_TASKS_RCU
1425 	rcu_spawn_tasks_kthread();
1426 #endif
1427 
1428 #ifdef CONFIG_TASKS_RUDE_RCU
1429 	rcu_spawn_tasks_rude_kthread();
1430 #endif
1431 
1432 #ifdef CONFIG_TASKS_TRACE_RCU
1433 	rcu_spawn_tasks_trace_kthread();
1434 #endif
1435 
1436 	// Run the self-tests.
1437 	rcu_tasks_initiate_self_tests();
1438 }
1439 
1440 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
rcu_tasks_bootup_oddness(void)1441 static inline void rcu_tasks_bootup_oddness(void) {}
1442 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
1443