• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  * Internal non-public definitions that provide either classic
5  * or preemptible semantics.
6  *
7  * Copyright Red Hat, 2009
8  * Copyright IBM Corporation, 2009
9  *
10  * Author: Ingo Molnar <mingo@elte.hu>
11  *	   Paul E. McKenney <paulmck@linux.ibm.com>
12  */
13 
14 #include "../locking/rtmutex_common.h"
15 
16 #ifdef CONFIG_RCU_NOCB_CPU
17 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
18 static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
19 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
20 
21 /*
22  * Check the RCU kernel configuration parameters and print informative
23  * messages about anything out of the ordinary.
24  */
rcu_bootup_announce_oddness(void)25 static void __init rcu_bootup_announce_oddness(void)
26 {
27 	if (IS_ENABLED(CONFIG_RCU_TRACE))
28 		pr_info("\tRCU event tracing is enabled.\n");
29 	if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
30 	    (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
31 		pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
32 			RCU_FANOUT);
33 	if (rcu_fanout_exact)
34 		pr_info("\tHierarchical RCU autobalancing is disabled.\n");
35 	if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
36 		pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
37 	if (IS_ENABLED(CONFIG_PROVE_RCU))
38 		pr_info("\tRCU lockdep checking is enabled.\n");
39 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
40 		pr_info("\tRCU strict (and thus non-scalable) grace periods enabled.\n");
41 	if (RCU_NUM_LVLS >= 4)
42 		pr_info("\tFour(or more)-level hierarchy is enabled.\n");
43 	if (RCU_FANOUT_LEAF != 16)
44 		pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
45 			RCU_FANOUT_LEAF);
46 	if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
47 		pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
48 			rcu_fanout_leaf);
49 	if (nr_cpu_ids != NR_CPUS)
50 		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
51 #ifdef CONFIG_RCU_BOOST
52 	pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
53 		kthread_prio, CONFIG_RCU_BOOST_DELAY);
54 #endif
55 	if (blimit != DEFAULT_RCU_BLIMIT)
56 		pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
57 	if (qhimark != DEFAULT_RCU_QHIMARK)
58 		pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
59 	if (qlowmark != DEFAULT_RCU_QLOMARK)
60 		pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
61 	if (qovld != DEFAULT_RCU_QOVLD)
62 		pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld);
63 	if (jiffies_till_first_fqs != ULONG_MAX)
64 		pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
65 	if (jiffies_till_next_fqs != ULONG_MAX)
66 		pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
67 	if (jiffies_till_sched_qs != ULONG_MAX)
68 		pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs);
69 	if (rcu_kick_kthreads)
70 		pr_info("\tKick kthreads if too-long grace period.\n");
71 	if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
72 		pr_info("\tRCU callback double-/use-after-free debug enabled.\n");
73 	if (gp_preinit_delay)
74 		pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
75 	if (gp_init_delay)
76 		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
77 	if (gp_cleanup_delay)
78 		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay);
79 	if (!use_softirq)
80 		pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n");
81 	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
82 		pr_info("\tRCU debug extended QS entry/exit.\n");
83 	rcupdate_announce_bootup_oddness();
84 }
85 
86 #ifdef CONFIG_PREEMPT_RCU
87 
88 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
89 static void rcu_read_unlock_special(struct task_struct *t);
90 
91 /*
92  * Tell them what RCU they are running.
93  */
rcu_bootup_announce(void)94 static void __init rcu_bootup_announce(void)
95 {
96 	pr_info("Preemptible hierarchical RCU implementation.\n");
97 	rcu_bootup_announce_oddness();
98 }
99 
100 /* Flags for rcu_preempt_ctxt_queue() decision table. */
101 #define RCU_GP_TASKS	0x8
102 #define RCU_EXP_TASKS	0x4
103 #define RCU_GP_BLKD	0x2
104 #define RCU_EXP_BLKD	0x1
105 
106 /*
107  * Queues a task preempted within an RCU-preempt read-side critical
108  * section into the appropriate location within the ->blkd_tasks list,
109  * depending on the states of any ongoing normal and expedited grace
110  * periods.  The ->gp_tasks pointer indicates which element the normal
111  * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
112  * indicates which element the expedited grace period is waiting on (again,
113  * NULL if none).  If a grace period is waiting on a given element in the
114  * ->blkd_tasks list, it also waits on all subsequent elements.  Thus,
115  * adding a task to the tail of the list blocks any grace period that is
116  * already waiting on one of the elements.  In contrast, adding a task
117  * to the head of the list won't block any grace period that is already
118  * waiting on one of the elements.
119  *
120  * This queuing is imprecise, and can sometimes make an ongoing grace
121  * period wait for a task that is not strictly speaking blocking it.
122  * Given the choice, we needlessly block a normal grace period rather than
123  * blocking an expedited grace period.
124  *
125  * Note that an endless sequence of expedited grace periods still cannot
126  * indefinitely postpone a normal grace period.  Eventually, all of the
127  * fixed number of preempted tasks blocking the normal grace period that are
128  * not also blocking the expedited grace period will resume and complete
129  * their RCU read-side critical sections.  At that point, the ->gp_tasks
130  * pointer will equal the ->exp_tasks pointer, at which point the end of
131  * the corresponding expedited grace period will also be the end of the
132  * normal grace period.
133  */
rcu_preempt_ctxt_queue(struct rcu_node * rnp,struct rcu_data * rdp)134 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
135 	__releases(rnp->lock) /* But leaves rrupts disabled. */
136 {
137 	int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
138 			 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
139 			 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
140 			 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
141 	struct task_struct *t = current;
142 
143 	raw_lockdep_assert_held_rcu_node(rnp);
144 	WARN_ON_ONCE(rdp->mynode != rnp);
145 	WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
146 	/* RCU better not be waiting on newly onlined CPUs! */
147 	WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
148 		     rdp->grpmask);
149 
150 	/*
151 	 * Decide where to queue the newly blocked task.  In theory,
152 	 * this could be an if-statement.  In practice, when I tried
153 	 * that, it was quite messy.
154 	 */
155 	switch (blkd_state) {
156 	case 0:
157 	case                RCU_EXP_TASKS:
158 	case                RCU_EXP_TASKS + RCU_GP_BLKD:
159 	case RCU_GP_TASKS:
160 	case RCU_GP_TASKS + RCU_EXP_TASKS:
161 
162 		/*
163 		 * Blocking neither GP, or first task blocking the normal
164 		 * GP but not blocking the already-waiting expedited GP.
165 		 * Queue at the head of the list to avoid unnecessarily
166 		 * blocking the already-waiting GPs.
167 		 */
168 		list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
169 		break;
170 
171 	case                                              RCU_EXP_BLKD:
172 	case                                RCU_GP_BLKD:
173 	case                                RCU_GP_BLKD + RCU_EXP_BLKD:
174 	case RCU_GP_TASKS +                               RCU_EXP_BLKD:
175 	case RCU_GP_TASKS +                 RCU_GP_BLKD + RCU_EXP_BLKD:
176 	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
177 
178 		/*
179 		 * First task arriving that blocks either GP, or first task
180 		 * arriving that blocks the expedited GP (with the normal
181 		 * GP already waiting), or a task arriving that blocks
182 		 * both GPs with both GPs already waiting.  Queue at the
183 		 * tail of the list to avoid any GP waiting on any of the
184 		 * already queued tasks that are not blocking it.
185 		 */
186 		list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
187 		break;
188 
189 	case                RCU_EXP_TASKS +               RCU_EXP_BLKD:
190 	case                RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
191 	case RCU_GP_TASKS + RCU_EXP_TASKS +               RCU_EXP_BLKD:
192 
193 		/*
194 		 * Second or subsequent task blocking the expedited GP.
195 		 * The task either does not block the normal GP, or is the
196 		 * first task blocking the normal GP.  Queue just after
197 		 * the first task blocking the expedited GP.
198 		 */
199 		list_add(&t->rcu_node_entry, rnp->exp_tasks);
200 		break;
201 
202 	case RCU_GP_TASKS +                 RCU_GP_BLKD:
203 	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
204 
205 		/*
206 		 * Second or subsequent task blocking the normal GP.
207 		 * The task does not block the expedited GP. Queue just
208 		 * after the first task blocking the normal GP.
209 		 */
210 		list_add(&t->rcu_node_entry, rnp->gp_tasks);
211 		break;
212 
213 	default:
214 
215 		/* Yet another exercise in excessive paranoia. */
216 		WARN_ON_ONCE(1);
217 		break;
218 	}
219 
220 	/*
221 	 * We have now queued the task.  If it was the first one to
222 	 * block either grace period, update the ->gp_tasks and/or
223 	 * ->exp_tasks pointers, respectively, to reference the newly
224 	 * blocked tasks.
225 	 */
226 	if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
227 		WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
228 		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
229 	}
230 	if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
231 		WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
232 	WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
233 		     !(rnp->qsmask & rdp->grpmask));
234 	WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
235 		     !(rnp->expmask & rdp->grpmask));
236 	raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
237 
238 	/*
239 	 * Report the quiescent state for the expedited GP.  This expedited
240 	 * GP should not be able to end until we report, so there should be
241 	 * no need to check for a subsequent expedited GP.  (Though we are
242 	 * still in a quiescent state in any case.)
243 	 */
244 	if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
245 		rcu_report_exp_rdp(rdp);
246 	else
247 		WARN_ON_ONCE(rdp->exp_deferred_qs);
248 }
249 
250 /*
251  * Record a preemptible-RCU quiescent state for the specified CPU.
252  * Note that this does not necessarily mean that the task currently running
253  * on the CPU is in a quiescent state:  Instead, it means that the current
254  * grace period need not wait on any RCU read-side critical section that
255  * starts later on this CPU.  It also means that if the current task is
256  * in an RCU read-side critical section, it has already added itself to
257  * some leaf rcu_node structure's ->blkd_tasks list.  In addition to the
258  * current task, there might be any number of other tasks blocked while
259  * in an RCU read-side critical section.
260  *
261  * Callers to this function must disable preemption.
262  */
rcu_qs(void)263 static void rcu_qs(void)
264 {
265 	RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n");
266 	if (__this_cpu_read(rcu_data.cpu_no_qs.s)) {
267 		trace_rcu_grace_period(TPS("rcu_preempt"),
268 				       __this_cpu_read(rcu_data.gp_seq),
269 				       TPS("cpuqs"));
270 		__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
271 		barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
272 		WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
273 	}
274 }
275 
276 /*
277  * We have entered the scheduler, and the current task might soon be
278  * context-switched away from.  If this task is in an RCU read-side
279  * critical section, we will no longer be able to rely on the CPU to
280  * record that fact, so we enqueue the task on the blkd_tasks list.
281  * The task will dequeue itself when it exits the outermost enclosing
282  * RCU read-side critical section.  Therefore, the current grace period
283  * cannot be permitted to complete until the blkd_tasks list entries
284  * predating the current grace period drain, in other words, until
285  * rnp->gp_tasks becomes NULL.
286  *
287  * Caller must disable interrupts.
288  */
rcu_note_context_switch(bool preempt)289 void rcu_note_context_switch(bool preempt)
290 {
291 	struct task_struct *t = current;
292 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
293 	struct rcu_node *rnp;
294 
295 	trace_rcu_utilization(TPS("Start context switch"));
296 	lockdep_assert_irqs_disabled();
297 	WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
298 	if (rcu_preempt_depth() > 0 &&
299 	    !t->rcu_read_unlock_special.b.blocked) {
300 
301 		/* Possibly blocking in an RCU read-side critical section. */
302 		rnp = rdp->mynode;
303 		raw_spin_lock_rcu_node(rnp);
304 		t->rcu_read_unlock_special.b.blocked = true;
305 		t->rcu_blocked_node = rnp;
306 
307 		/*
308 		 * Verify the CPU's sanity, trace the preemption, and
309 		 * then queue the task as required based on the states
310 		 * of any ongoing and expedited grace periods.
311 		 */
312 		WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
313 		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
314 		trace_rcu_preempt_task(rcu_state.name,
315 				       t->pid,
316 				       (rnp->qsmask & rdp->grpmask)
317 				       ? rnp->gp_seq
318 				       : rcu_seq_snap(&rnp->gp_seq));
319 		rcu_preempt_ctxt_queue(rnp, rdp);
320 	} else {
321 		rcu_preempt_deferred_qs(t);
322 	}
323 
324 	/*
325 	 * Either we were not in an RCU read-side critical section to
326 	 * begin with, or we have now recorded that critical section
327 	 * globally.  Either way, we can now note a quiescent state
328 	 * for this CPU.  Again, if we were in an RCU read-side critical
329 	 * section, and if that critical section was blocking the current
330 	 * grace period, then the fact that the task has been enqueued
331 	 * means that we continue to block the current grace period.
332 	 */
333 	rcu_qs();
334 	if (rdp->exp_deferred_qs)
335 		rcu_report_exp_rdp(rdp);
336 	rcu_tasks_qs(current, preempt);
337 	trace_rcu_utilization(TPS("End context switch"));
338 }
339 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
340 
341 /*
342  * Check for preempted RCU readers blocking the current grace period
343  * for the specified rcu_node structure.  If the caller needs a reliable
344  * answer, it must hold the rcu_node's ->lock.
345  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)346 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
347 {
348 	return READ_ONCE(rnp->gp_tasks) != NULL;
349 }
350 
351 /* limit value for ->rcu_read_lock_nesting. */
352 #define RCU_NEST_PMAX (INT_MAX / 2)
353 
rcu_preempt_read_enter(void)354 static void rcu_preempt_read_enter(void)
355 {
356 	current->rcu_read_lock_nesting++;
357 }
358 
rcu_preempt_read_exit(void)359 static int rcu_preempt_read_exit(void)
360 {
361 	return --current->rcu_read_lock_nesting;
362 }
363 
rcu_preempt_depth_set(int val)364 static void rcu_preempt_depth_set(int val)
365 {
366 	current->rcu_read_lock_nesting = val;
367 }
368 
369 /*
370  * Preemptible RCU implementation for rcu_read_lock().
371  * Just increment ->rcu_read_lock_nesting, shared state will be updated
372  * if we block.
373  */
__rcu_read_lock(void)374 void __rcu_read_lock(void)
375 {
376 	rcu_preempt_read_enter();
377 	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
378 		WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
379 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread)
380 		WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
381 	barrier();  /* critical section after entry code. */
382 }
383 EXPORT_SYMBOL_GPL(__rcu_read_lock);
384 
385 /*
386  * Preemptible RCU implementation for rcu_read_unlock().
387  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
388  * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
389  * invoke rcu_read_unlock_special() to clean up after a context switch
390  * in an RCU read-side critical section and other special cases.
391  */
__rcu_read_unlock(void)392 void __rcu_read_unlock(void)
393 {
394 	struct task_struct *t = current;
395 
396 	if (rcu_preempt_read_exit() == 0) {
397 		barrier();  /* critical section before exit code. */
398 		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
399 			rcu_read_unlock_special(t);
400 	}
401 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
402 		int rrln = rcu_preempt_depth();
403 
404 		WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX);
405 	}
406 }
407 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
408 
409 /*
410  * Advance a ->blkd_tasks-list pointer to the next entry, instead
411  * returning NULL if at the end of the list.
412  */
rcu_next_node_entry(struct task_struct * t,struct rcu_node * rnp)413 static struct list_head *rcu_next_node_entry(struct task_struct *t,
414 					     struct rcu_node *rnp)
415 {
416 	struct list_head *np;
417 
418 	np = t->rcu_node_entry.next;
419 	if (np == &rnp->blkd_tasks)
420 		np = NULL;
421 	return np;
422 }
423 
424 /*
425  * Return true if the specified rcu_node structure has tasks that were
426  * preempted within an RCU read-side critical section.
427  */
rcu_preempt_has_tasks(struct rcu_node * rnp)428 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
429 {
430 	return !list_empty(&rnp->blkd_tasks);
431 }
432 
433 /*
434  * Report deferred quiescent states.  The deferral time can
435  * be quite short, for example, in the case of the call from
436  * rcu_read_unlock_special().
437  */
438 static void
rcu_preempt_deferred_qs_irqrestore(struct task_struct * t,unsigned long flags)439 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
440 {
441 	bool empty_exp;
442 	bool empty_norm;
443 	bool empty_exp_now;
444 	struct list_head *np;
445 	bool drop_boost_mutex = false;
446 	struct rcu_data *rdp;
447 	struct rcu_node *rnp;
448 	union rcu_special special;
449 
450 	/*
451 	 * If RCU core is waiting for this CPU to exit its critical section,
452 	 * report the fact that it has exited.  Because irqs are disabled,
453 	 * t->rcu_read_unlock_special cannot change.
454 	 */
455 	special = t->rcu_read_unlock_special;
456 	rdp = this_cpu_ptr(&rcu_data);
457 	if (!special.s && !rdp->exp_deferred_qs) {
458 		local_irq_restore(flags);
459 		return;
460 	}
461 	t->rcu_read_unlock_special.s = 0;
462 	if (special.b.need_qs) {
463 		if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
464 			rcu_report_qs_rdp(rdp);
465 			udelay(rcu_unlock_delay);
466 		} else {
467 			rcu_qs();
468 		}
469 	}
470 
471 	/*
472 	 * Respond to a request by an expedited grace period for a
473 	 * quiescent state from this CPU.  Note that requests from
474 	 * tasks are handled when removing the task from the
475 	 * blocked-tasks list below.
476 	 */
477 	if (rdp->exp_deferred_qs)
478 		rcu_report_exp_rdp(rdp);
479 
480 	/* Clean up if blocked during RCU read-side critical section. */
481 	if (special.b.blocked) {
482 
483 		/*
484 		 * Remove this task from the list it blocked on.  The task
485 		 * now remains queued on the rcu_node corresponding to the
486 		 * CPU it first blocked on, so there is no longer any need
487 		 * to loop.  Retain a WARN_ON_ONCE() out of sheer paranoia.
488 		 */
489 		rnp = t->rcu_blocked_node;
490 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
491 		WARN_ON_ONCE(rnp != t->rcu_blocked_node);
492 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
493 		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
494 		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
495 			     (!empty_norm || rnp->qsmask));
496 		empty_exp = sync_rcu_exp_done(rnp);
497 		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
498 		np = rcu_next_node_entry(t, rnp);
499 		list_del_init(&t->rcu_node_entry);
500 		t->rcu_blocked_node = NULL;
501 		trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
502 						rnp->gp_seq, t->pid);
503 		if (&t->rcu_node_entry == rnp->gp_tasks)
504 			WRITE_ONCE(rnp->gp_tasks, np);
505 		if (&t->rcu_node_entry == rnp->exp_tasks)
506 			WRITE_ONCE(rnp->exp_tasks, np);
507 		if (IS_ENABLED(CONFIG_RCU_BOOST)) {
508 			/* Snapshot ->boost_mtx ownership w/rnp->lock held. */
509 			drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
510 			if (&t->rcu_node_entry == rnp->boost_tasks)
511 				WRITE_ONCE(rnp->boost_tasks, np);
512 		}
513 
514 		/*
515 		 * If this was the last task on the current list, and if
516 		 * we aren't waiting on any CPUs, report the quiescent state.
517 		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
518 		 * so we must take a snapshot of the expedited state.
519 		 */
520 		empty_exp_now = sync_rcu_exp_done(rnp);
521 		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
522 			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
523 							 rnp->gp_seq,
524 							 0, rnp->qsmask,
525 							 rnp->level,
526 							 rnp->grplo,
527 							 rnp->grphi,
528 							 !!rnp->gp_tasks);
529 			rcu_report_unblock_qs_rnp(rnp, flags);
530 		} else {
531 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
532 		}
533 
534 		/*
535 		 * If this was the last task on the expedited lists,
536 		 * then we need to report up the rcu_node hierarchy.
537 		 */
538 		if (!empty_exp && empty_exp_now)
539 			rcu_report_exp_rnp(rnp, true);
540 
541 		/* Unboost if we were boosted. */
542 		if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
543 			rt_mutex_futex_unlock(&rnp->boost_mtx);
544 
545 	} else {
546 		local_irq_restore(flags);
547 	}
548 }
549 
550 /*
551  * Is a deferred quiescent-state pending, and are we also not in
552  * an RCU read-side critical section?  It is the caller's responsibility
553  * to ensure it is otherwise safe to report any deferred quiescent
554  * states.  The reason for this is that it is safe to report a
555  * quiescent state during context switch even though preemption
556  * is disabled.  This function cannot be expected to understand these
557  * nuances, so the caller must handle them.
558  */
rcu_preempt_need_deferred_qs(struct task_struct * t)559 static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
560 {
561 	return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
562 		READ_ONCE(t->rcu_read_unlock_special.s)) &&
563 	       rcu_preempt_depth() == 0;
564 }
565 
566 /*
567  * Report a deferred quiescent state if needed and safe to do so.
568  * As with rcu_preempt_need_deferred_qs(), "safe" involves only
569  * not being in an RCU read-side critical section.  The caller must
570  * evaluate safety in terms of interrupt, softirq, and preemption
571  * disabling.
572  */
rcu_preempt_deferred_qs(struct task_struct * t)573 static void rcu_preempt_deferred_qs(struct task_struct *t)
574 {
575 	unsigned long flags;
576 
577 	if (!rcu_preempt_need_deferred_qs(t))
578 		return;
579 	local_irq_save(flags);
580 	rcu_preempt_deferred_qs_irqrestore(t, flags);
581 }
582 
583 /*
584  * Minimal handler to give the scheduler a chance to re-evaluate.
585  */
rcu_preempt_deferred_qs_handler(struct irq_work * iwp)586 static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
587 {
588 	struct rcu_data *rdp;
589 
590 	rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
591 	rdp->defer_qs_iw_pending = false;
592 }
593 
594 /*
595  * Handle special cases during rcu_read_unlock(), such as needing to
596  * notify RCU core processing or task having blocked during the RCU
597  * read-side critical section.
598  */
rcu_read_unlock_special(struct task_struct * t)599 static void rcu_read_unlock_special(struct task_struct *t)
600 {
601 	unsigned long flags;
602 	bool preempt_bh_were_disabled =
603 			!!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
604 	bool irqs_were_disabled;
605 
606 	/* NMI handlers cannot block and cannot safely manipulate state. */
607 	if (in_nmi())
608 		return;
609 
610 	local_irq_save(flags);
611 	irqs_were_disabled = irqs_disabled_flags(flags);
612 	if (preempt_bh_were_disabled || irqs_were_disabled) {
613 		bool exp;
614 		struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
615 		struct rcu_node *rnp = rdp->mynode;
616 
617 		exp = (t->rcu_blocked_node &&
618 		       READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||
619 		      (rdp->grpmask & READ_ONCE(rnp->expmask));
620 		// Need to defer quiescent state until everything is enabled.
621 		if (use_softirq && (in_irq() || (exp && !irqs_were_disabled))) {
622 			// Using softirq, safe to awaken, and either the
623 			// wakeup is free or there is an expedited GP.
624 			raise_softirq_irqoff(RCU_SOFTIRQ);
625 		} else {
626 			// Enabling BH or preempt does reschedule, so...
627 			// Also if no expediting, slow is OK.
628 			// Plus nohz_full CPUs eventually get tick enabled.
629 			set_tsk_need_resched(current);
630 			set_preempt_need_resched();
631 			if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
632 			    !rdp->defer_qs_iw_pending && exp && cpu_online(rdp->cpu)) {
633 				// Get scheduler to re-evaluate and call hooks.
634 				// If !IRQ_WORK, FQS scan will eventually IPI.
635 				init_irq_work(&rdp->defer_qs_iw,
636 					      rcu_preempt_deferred_qs_handler);
637 				rdp->defer_qs_iw_pending = true;
638 				irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
639 			}
640 		}
641 		local_irq_restore(flags);
642 		return;
643 	}
644 	rcu_preempt_deferred_qs_irqrestore(t, flags);
645 }
646 
647 /*
648  * Check that the list of blocked tasks for the newly completed grace
649  * period is in fact empty.  It is a serious bug to complete a grace
650  * period that still has RCU readers blocked!  This function must be
651  * invoked -before- updating this rnp's ->gp_seq.
652  *
653  * Also, if there are blocked tasks on the list, they automatically
654  * block the newly created grace period, so set up ->gp_tasks accordingly.
655  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)656 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
657 {
658 	struct task_struct *t;
659 
660 	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
661 	raw_lockdep_assert_held_rcu_node(rnp);
662 	if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
663 		dump_blkd_tasks(rnp, 10);
664 	if (rcu_preempt_has_tasks(rnp) &&
665 	    (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
666 		WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
667 		t = container_of(rnp->gp_tasks, struct task_struct,
668 				 rcu_node_entry);
669 		trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
670 						rnp->gp_seq, t->pid);
671 	}
672 	WARN_ON_ONCE(rnp->qsmask);
673 }
674 
675 /*
676  * Check for a quiescent state from the current CPU, including voluntary
677  * context switches for Tasks RCU.  When a task blocks, the task is
678  * recorded in the corresponding CPU's rcu_node structure, which is checked
679  * elsewhere, hence this function need only check for quiescent states
680  * related to the current CPU, not to those related to tasks.
681  */
rcu_flavor_sched_clock_irq(int user)682 static void rcu_flavor_sched_clock_irq(int user)
683 {
684 	struct task_struct *t = current;
685 
686 	lockdep_assert_irqs_disabled();
687 	if (user || rcu_is_cpu_rrupt_from_idle()) {
688 		rcu_note_voluntary_context_switch(current);
689 	}
690 	if (rcu_preempt_depth() > 0 ||
691 	    (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
692 		/* No QS, force context switch if deferred. */
693 		if (rcu_preempt_need_deferred_qs(t)) {
694 			set_tsk_need_resched(t);
695 			set_preempt_need_resched();
696 		}
697 	} else if (rcu_preempt_need_deferred_qs(t)) {
698 		rcu_preempt_deferred_qs(t); /* Report deferred QS. */
699 		return;
700 	} else if (!WARN_ON_ONCE(rcu_preempt_depth())) {
701 		rcu_qs(); /* Report immediate QS. */
702 		return;
703 	}
704 
705 	/* If GP is oldish, ask for help from rcu_read_unlock_special(). */
706 	if (rcu_preempt_depth() > 0 &&
707 	    __this_cpu_read(rcu_data.core_needs_qs) &&
708 	    __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
709 	    !t->rcu_read_unlock_special.b.need_qs &&
710 	    time_after(jiffies, rcu_state.gp_start + HZ))
711 		t->rcu_read_unlock_special.b.need_qs = true;
712 }
713 
714 /*
715  * Check for a task exiting while in a preemptible-RCU read-side
716  * critical section, clean up if so.  No need to issue warnings, as
717  * debug_check_no_locks_held() already does this if lockdep is enabled.
718  * Besides, if this function does anything other than just immediately
719  * return, there was a bug of some sort.  Spewing warnings from this
720  * function is like as not to simply obscure important prior warnings.
721  */
exit_rcu(void)722 void exit_rcu(void)
723 {
724 	struct task_struct *t = current;
725 
726 	if (unlikely(!list_empty(&current->rcu_node_entry))) {
727 		rcu_preempt_depth_set(1);
728 		barrier();
729 		WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
730 	} else if (unlikely(rcu_preempt_depth())) {
731 		rcu_preempt_depth_set(1);
732 	} else {
733 		return;
734 	}
735 	__rcu_read_unlock();
736 	rcu_preempt_deferred_qs(current);
737 }
738 
739 /*
740  * Dump the blocked-tasks state, but limit the list dump to the
741  * specified number of elements.
742  */
743 static void
dump_blkd_tasks(struct rcu_node * rnp,int ncheck)744 dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
745 {
746 	int cpu;
747 	int i;
748 	struct list_head *lhp;
749 	bool onl;
750 	struct rcu_data *rdp;
751 	struct rcu_node *rnp1;
752 
753 	raw_lockdep_assert_held_rcu_node(rnp);
754 	pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
755 		__func__, rnp->grplo, rnp->grphi, rnp->level,
756 		(long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs);
757 	for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
758 		pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
759 			__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
760 	pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
761 		__func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks),
762 		READ_ONCE(rnp->exp_tasks));
763 	pr_info("%s: ->blkd_tasks", __func__);
764 	i = 0;
765 	list_for_each(lhp, &rnp->blkd_tasks) {
766 		pr_cont(" %p", lhp);
767 		if (++i >= ncheck)
768 			break;
769 	}
770 	pr_cont("\n");
771 	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
772 		rdp = per_cpu_ptr(&rcu_data, cpu);
773 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
774 		pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
775 			cpu, ".o"[onl],
776 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
777 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
778 	}
779 }
780 
781 #else /* #ifdef CONFIG_PREEMPT_RCU */
782 
783 /*
784  * If strict grace periods are enabled, and if the calling
785  * __rcu_read_unlock() marks the beginning of a quiescent state, immediately
786  * report that quiescent state and, if requested, spin for a bit.
787  */
rcu_read_unlock_strict(void)788 void rcu_read_unlock_strict(void)
789 {
790 	struct rcu_data *rdp;
791 
792 	if (!IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ||
793 	   irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
794 		return;
795 	rdp = this_cpu_ptr(&rcu_data);
796 	rcu_report_qs_rdp(rdp);
797 	udelay(rcu_unlock_delay);
798 }
799 EXPORT_SYMBOL_GPL(rcu_read_unlock_strict);
800 
801 /*
802  * Tell them what RCU they are running.
803  */
rcu_bootup_announce(void)804 static void __init rcu_bootup_announce(void)
805 {
806 	pr_info("Hierarchical RCU implementation.\n");
807 	rcu_bootup_announce_oddness();
808 }
809 
810 /*
811  * Note a quiescent state for PREEMPTION=n.  Because we do not need to know
812  * how many quiescent states passed, just if there was at least one since
813  * the start of the grace period, this just sets a flag.  The caller must
814  * have disabled preemption.
815  */
rcu_qs(void)816 static void rcu_qs(void)
817 {
818 	RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!");
819 	if (!__this_cpu_read(rcu_data.cpu_no_qs.s))
820 		return;
821 	trace_rcu_grace_period(TPS("rcu_sched"),
822 			       __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs"));
823 	__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
824 	if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
825 		return;
826 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, false);
827 	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
828 }
829 
830 /*
831  * Register an urgently needed quiescent state.  If there is an
832  * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
833  * dyntick-idle quiescent state visible to other CPUs, which will in
834  * some cases serve for expedited as well as normal grace periods.
835  * Either way, register a lightweight quiescent state.
836  */
rcu_all_qs(void)837 void rcu_all_qs(void)
838 {
839 	unsigned long flags;
840 
841 	if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
842 		return;
843 	preempt_disable();
844 	/* Load rcu_urgent_qs before other flags. */
845 	if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
846 		preempt_enable();
847 		return;
848 	}
849 	this_cpu_write(rcu_data.rcu_urgent_qs, false);
850 	if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
851 		local_irq_save(flags);
852 		rcu_momentary_dyntick_idle();
853 		local_irq_restore(flags);
854 	}
855 	rcu_qs();
856 	preempt_enable();
857 }
858 EXPORT_SYMBOL_GPL(rcu_all_qs);
859 
860 /*
861  * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
862  */
rcu_note_context_switch(bool preempt)863 void rcu_note_context_switch(bool preempt)
864 {
865 	trace_rcu_utilization(TPS("Start context switch"));
866 	rcu_qs();
867 	/* Load rcu_urgent_qs before other flags. */
868 	if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
869 		goto out;
870 	this_cpu_write(rcu_data.rcu_urgent_qs, false);
871 	if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
872 		rcu_momentary_dyntick_idle();
873 	rcu_tasks_qs(current, preempt);
874 out:
875 	trace_rcu_utilization(TPS("End context switch"));
876 }
877 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
878 
879 /*
880  * Because preemptible RCU does not exist, there are never any preempted
881  * RCU readers.
882  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)883 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
884 {
885 	return 0;
886 }
887 
888 /*
889  * Because there is no preemptible RCU, there can be no readers blocked.
890  */
rcu_preempt_has_tasks(struct rcu_node * rnp)891 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
892 {
893 	return false;
894 }
895 
896 /*
897  * Because there is no preemptible RCU, there can be no deferred quiescent
898  * states.
899  */
rcu_preempt_need_deferred_qs(struct task_struct * t)900 static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
901 {
902 	return false;
903 }
rcu_preempt_deferred_qs(struct task_struct * t)904 static void rcu_preempt_deferred_qs(struct task_struct *t) { }
905 
906 /*
907  * Because there is no preemptible RCU, there can be no readers blocked,
908  * so there is no need to check for blocked tasks.  So check only for
909  * bogus qsmask values.
910  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)911 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
912 {
913 	WARN_ON_ONCE(rnp->qsmask);
914 }
915 
916 /*
917  * Check to see if this CPU is in a non-context-switch quiescent state,
918  * namely user mode and idle loop.
919  */
rcu_flavor_sched_clock_irq(int user)920 static void rcu_flavor_sched_clock_irq(int user)
921 {
922 	if (user || rcu_is_cpu_rrupt_from_idle()) {
923 
924 		/*
925 		 * Get here if this CPU took its interrupt from user
926 		 * mode or from the idle loop, and if this is not a
927 		 * nested interrupt.  In this case, the CPU is in
928 		 * a quiescent state, so note it.
929 		 *
930 		 * No memory barrier is required here because rcu_qs()
931 		 * references only CPU-local variables that other CPUs
932 		 * neither access nor modify, at least not while the
933 		 * corresponding CPU is online.
934 		 */
935 
936 		rcu_qs();
937 	}
938 }
939 
940 /*
941  * Because preemptible RCU does not exist, tasks cannot possibly exit
942  * while in preemptible RCU read-side critical sections.
943  */
exit_rcu(void)944 void exit_rcu(void)
945 {
946 }
947 
948 /*
949  * Dump the guaranteed-empty blocked-tasks state.  Trust but verify.
950  */
951 static void
dump_blkd_tasks(struct rcu_node * rnp,int ncheck)952 dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
953 {
954 	WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
955 }
956 
957 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
958 
959 /*
960  * If boosting, set rcuc kthreads to realtime priority.
961  */
rcu_cpu_kthread_setup(unsigned int cpu)962 static void rcu_cpu_kthread_setup(unsigned int cpu)
963 {
964 #ifdef CONFIG_RCU_BOOST
965 	struct sched_param sp;
966 
967 	sp.sched_priority = kthread_prio;
968 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
969 #endif /* #ifdef CONFIG_RCU_BOOST */
970 }
971 
972 #ifdef CONFIG_RCU_BOOST
973 
974 /*
975  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
976  * or ->boost_tasks, advancing the pointer to the next task in the
977  * ->blkd_tasks list.
978  *
979  * Note that irqs must be enabled: boosting the task can block.
980  * Returns 1 if there are more tasks needing to be boosted.
981  */
rcu_boost(struct rcu_node * rnp)982 static int rcu_boost(struct rcu_node *rnp)
983 {
984 	unsigned long flags;
985 	struct task_struct *t;
986 	struct list_head *tb;
987 
988 	if (READ_ONCE(rnp->exp_tasks) == NULL &&
989 	    READ_ONCE(rnp->boost_tasks) == NULL)
990 		return 0;  /* Nothing left to boost. */
991 
992 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
993 
994 	/*
995 	 * Recheck under the lock: all tasks in need of boosting
996 	 * might exit their RCU read-side critical sections on their own.
997 	 */
998 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
999 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1000 		return 0;
1001 	}
1002 
1003 	/*
1004 	 * Preferentially boost tasks blocking expedited grace periods.
1005 	 * This cannot starve the normal grace periods because a second
1006 	 * expedited grace period must boost all blocked tasks, including
1007 	 * those blocking the pre-existing normal grace period.
1008 	 */
1009 	if (rnp->exp_tasks != NULL)
1010 		tb = rnp->exp_tasks;
1011 	else
1012 		tb = rnp->boost_tasks;
1013 
1014 	/*
1015 	 * We boost task t by manufacturing an rt_mutex that appears to
1016 	 * be held by task t.  We leave a pointer to that rt_mutex where
1017 	 * task t can find it, and task t will release the mutex when it
1018 	 * exits its outermost RCU read-side critical section.  Then
1019 	 * simply acquiring this artificial rt_mutex will boost task
1020 	 * t's priority.  (Thanks to tglx for suggesting this approach!)
1021 	 *
1022 	 * Note that task t must acquire rnp->lock to remove itself from
1023 	 * the ->blkd_tasks list, which it will do from exit() if from
1024 	 * nowhere else.  We therefore are guaranteed that task t will
1025 	 * stay around at least until we drop rnp->lock.  Note that
1026 	 * rnp->lock also resolves races between our priority boosting
1027 	 * and task t's exiting its outermost RCU read-side critical
1028 	 * section.
1029 	 */
1030 	t = container_of(tb, struct task_struct, rcu_node_entry);
1031 	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
1032 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1033 	/* Lock only for side effect: boosts task t's priority. */
1034 	rt_mutex_lock(&rnp->boost_mtx);
1035 	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
1036 
1037 	return READ_ONCE(rnp->exp_tasks) != NULL ||
1038 	       READ_ONCE(rnp->boost_tasks) != NULL;
1039 }
1040 
1041 /*
1042  * Priority-boosting kthread, one per leaf rcu_node.
1043  */
rcu_boost_kthread(void * arg)1044 static int rcu_boost_kthread(void *arg)
1045 {
1046 	struct rcu_node *rnp = (struct rcu_node *)arg;
1047 	int spincnt = 0;
1048 	int more2boost;
1049 
1050 	trace_rcu_utilization(TPS("Start boost kthread@init"));
1051 	for (;;) {
1052 		WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING);
1053 		trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1054 		rcu_wait(READ_ONCE(rnp->boost_tasks) ||
1055 			 READ_ONCE(rnp->exp_tasks));
1056 		trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1057 		WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING);
1058 		more2boost = rcu_boost(rnp);
1059 		if (more2boost)
1060 			spincnt++;
1061 		else
1062 			spincnt = 0;
1063 		if (spincnt > 10) {
1064 			WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING);
1065 			trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1066 			schedule_timeout_idle(2);
1067 			trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1068 			spincnt = 0;
1069 		}
1070 	}
1071 	/* NOTREACHED */
1072 	trace_rcu_utilization(TPS("End boost kthread@notreached"));
1073 	return 0;
1074 }
1075 
1076 /*
1077  * Check to see if it is time to start boosting RCU readers that are
1078  * blocking the current grace period, and, if so, tell the per-rcu_node
1079  * kthread to start boosting them.  If there is an expedited grace
1080  * period in progress, it is always time to boost.
1081  *
1082  * The caller must hold rnp->lock, which this function releases.
1083  * The ->boost_kthread_task is immortal, so we don't need to worry
1084  * about it going away.
1085  */
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1086 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1087 	__releases(rnp->lock)
1088 {
1089 	raw_lockdep_assert_held_rcu_node(rnp);
1090 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1091 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1092 		return;
1093 	}
1094 	if (rnp->exp_tasks != NULL ||
1095 	    (rnp->gp_tasks != NULL &&
1096 	     rnp->boost_tasks == NULL &&
1097 	     rnp->qsmask == 0 &&
1098 	     (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld))) {
1099 		if (rnp->exp_tasks == NULL)
1100 			WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks);
1101 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1102 		rcu_wake_cond(rnp->boost_kthread_task,
1103 			      READ_ONCE(rnp->boost_kthread_status));
1104 	} else {
1105 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1106 	}
1107 }
1108 
1109 /*
1110  * Is the current CPU running the RCU-callbacks kthread?
1111  * Caller must have preemption disabled.
1112  */
rcu_is_callbacks_kthread(void)1113 static bool rcu_is_callbacks_kthread(void)
1114 {
1115 	return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current;
1116 }
1117 
1118 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1119 
1120 /*
1121  * Do priority-boost accounting for the start of a new grace period.
1122  */
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1123 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1124 {
1125 	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1126 }
1127 
1128 /*
1129  * Create an RCU-boost kthread for the specified node if one does not
1130  * already exist.  We only create this kthread for preemptible RCU.
1131  * Returns zero if all is well, a negated errno otherwise.
1132  */
rcu_spawn_one_boost_kthread(struct rcu_node * rnp)1133 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1134 {
1135 	int rnp_index = rnp - rcu_get_root();
1136 	unsigned long flags;
1137 	struct sched_param sp;
1138 	struct task_struct *t;
1139 
1140 	if (!IS_ENABLED(CONFIG_PREEMPT_RCU))
1141 		return;
1142 
1143 	if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
1144 		return;
1145 
1146 	rcu_state.boost = 1;
1147 
1148 	if (rnp->boost_kthread_task != NULL)
1149 		return;
1150 
1151 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
1152 			   "rcub/%d", rnp_index);
1153 	if (WARN_ON_ONCE(IS_ERR(t)))
1154 		return;
1155 
1156 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1157 	rnp->boost_kthread_task = t;
1158 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1159 	sp.sched_priority = kthread_prio;
1160 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1161 	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1162 }
1163 
1164 /*
1165  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1166  * served by the rcu_node in question.  The CPU hotplug lock is still
1167  * held, so the value of rnp->qsmaskinit will be stable.
1168  *
1169  * We don't include outgoingcpu in the affinity set, use -1 if there is
1170  * no outgoing CPU.  If there are no CPUs left in the affinity set,
1171  * this function allows the kthread to execute on any CPU.
1172  */
rcu_boost_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1173 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1174 {
1175 	struct task_struct *t = rnp->boost_kthread_task;
1176 	unsigned long mask = rcu_rnp_online_cpus(rnp);
1177 	cpumask_var_t cm;
1178 	int cpu;
1179 
1180 	if (!t)
1181 		return;
1182 	if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1183 		return;
1184 	for_each_leaf_node_possible_cpu(rnp, cpu)
1185 		if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
1186 		    cpu != outgoingcpu)
1187 			cpumask_set_cpu(cpu, cm);
1188 	if (cpumask_weight(cm) == 0)
1189 		cpumask_setall(cm);
1190 	set_cpus_allowed_ptr(t, cm);
1191 	free_cpumask_var(cm);
1192 }
1193 
1194 /*
1195  * Spawn boost kthreads -- called as soon as the scheduler is running.
1196  */
rcu_spawn_boost_kthreads(void)1197 static void __init rcu_spawn_boost_kthreads(void)
1198 {
1199 	struct rcu_node *rnp;
1200 
1201 	rcu_for_each_leaf_node(rnp)
1202 		rcu_spawn_one_boost_kthread(rnp);
1203 }
1204 
rcu_prepare_kthreads(int cpu)1205 static void rcu_prepare_kthreads(int cpu)
1206 {
1207 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1208 	struct rcu_node *rnp = rdp->mynode;
1209 
1210 	/* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1211 	if (rcu_scheduler_fully_active)
1212 		rcu_spawn_one_boost_kthread(rnp);
1213 }
1214 
1215 #else /* #ifdef CONFIG_RCU_BOOST */
1216 
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1217 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1218 	__releases(rnp->lock)
1219 {
1220 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1221 }
1222 
rcu_is_callbacks_kthread(void)1223 static bool rcu_is_callbacks_kthread(void)
1224 {
1225 	return false;
1226 }
1227 
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1228 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1229 {
1230 }
1231 
rcu_boost_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1232 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1233 {
1234 }
1235 
rcu_spawn_boost_kthreads(void)1236 static void __init rcu_spawn_boost_kthreads(void)
1237 {
1238 }
1239 
rcu_prepare_kthreads(int cpu)1240 static void rcu_prepare_kthreads(int cpu)
1241 {
1242 }
1243 
1244 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1245 
1246 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1247 
1248 /*
1249  * Check to see if any future non-offloaded RCU-related work will need
1250  * to be done by the current CPU, even if none need be done immediately,
1251  * returning 1 if so.  This function is part of the RCU implementation;
1252  * it is -not- an exported member of the RCU API.
1253  *
1254  * Because we not have RCU_FAST_NO_HZ, just check whether or not this
1255  * CPU has RCU callbacks queued.
1256  */
rcu_needs_cpu(u64 basemono,u64 * nextevt)1257 int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1258 {
1259 	*nextevt = KTIME_MAX;
1260 	return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
1261 	       !rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist);
1262 }
1263 
1264 /*
1265  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1266  * after it.
1267  */
rcu_cleanup_after_idle(void)1268 static void rcu_cleanup_after_idle(void)
1269 {
1270 }
1271 
1272 /*
1273  * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1274  * is nothing.
1275  */
rcu_prepare_for_idle(void)1276 static void rcu_prepare_for_idle(void)
1277 {
1278 }
1279 
1280 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1281 
1282 /*
1283  * This code is invoked when a CPU goes idle, at which point we want
1284  * to have the CPU do everything required for RCU so that it can enter
1285  * the energy-efficient dyntick-idle mode.
1286  *
1287  * The following preprocessor symbol controls this:
1288  *
1289  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1290  *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
1291  *	is sized to be roughly one RCU grace period.  Those energy-efficiency
1292  *	benchmarkers who might otherwise be tempted to set this to a large
1293  *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1294  *	system.  And if you are -that- concerned about energy efficiency,
1295  *	just power the system down and be done with it!
1296  *
1297  * The value below works well in practice.  If future workloads require
1298  * adjustment, they can be converted into kernel config parameters, though
1299  * making the state machine smarter might be a better option.
1300  */
1301 #define RCU_IDLE_GP_DELAY 4		/* Roughly one grace period. */
1302 
1303 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1304 module_param(rcu_idle_gp_delay, int, 0644);
1305 
1306 /*
1307  * Try to advance callbacks on the current CPU, but only if it has been
1308  * awhile since the last time we did so.  Afterwards, if there are any
1309  * callbacks ready for immediate invocation, return true.
1310  */
rcu_try_advance_all_cbs(void)1311 static bool __maybe_unused rcu_try_advance_all_cbs(void)
1312 {
1313 	bool cbs_ready = false;
1314 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1315 	struct rcu_node *rnp;
1316 
1317 	/* Exit early if we advanced recently. */
1318 	if (jiffies == rdp->last_advance_all)
1319 		return false;
1320 	rdp->last_advance_all = jiffies;
1321 
1322 	rnp = rdp->mynode;
1323 
1324 	/*
1325 	 * Don't bother checking unless a grace period has
1326 	 * completed since we last checked and there are
1327 	 * callbacks not yet ready to invoke.
1328 	 */
1329 	if ((rcu_seq_completed_gp(rdp->gp_seq,
1330 				  rcu_seq_current(&rnp->gp_seq)) ||
1331 	     unlikely(READ_ONCE(rdp->gpwrap))) &&
1332 	    rcu_segcblist_pend_cbs(&rdp->cblist))
1333 		note_gp_changes(rdp);
1334 
1335 	if (rcu_segcblist_ready_cbs(&rdp->cblist))
1336 		cbs_ready = true;
1337 	return cbs_ready;
1338 }
1339 
1340 /*
1341  * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1342  * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
1343  * caller about what to set the timeout.
1344  *
1345  * The caller must have disabled interrupts.
1346  */
rcu_needs_cpu(u64 basemono,u64 * nextevt)1347 int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1348 {
1349 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1350 	unsigned long dj;
1351 
1352 	lockdep_assert_irqs_disabled();
1353 
1354 	/* If no non-offloaded callbacks, RCU doesn't need the CPU. */
1355 	if (rcu_segcblist_empty(&rdp->cblist) ||
1356 	    rcu_segcblist_is_offloaded(&this_cpu_ptr(&rcu_data)->cblist)) {
1357 		*nextevt = KTIME_MAX;
1358 		return 0;
1359 	}
1360 
1361 	/* Attempt to advance callbacks. */
1362 	if (rcu_try_advance_all_cbs()) {
1363 		/* Some ready to invoke, so initiate later invocation. */
1364 		invoke_rcu_core();
1365 		return 1;
1366 	}
1367 	rdp->last_accelerate = jiffies;
1368 
1369 	/* Request timer and round. */
1370 	dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies;
1371 
1372 	*nextevt = basemono + dj * TICK_NSEC;
1373 	return 0;
1374 }
1375 
1376 /*
1377  * Prepare a CPU for idle from an RCU perspective.  The first major task is to
1378  * sense whether nohz mode has been enabled or disabled via sysfs.  The second
1379  * major task is to accelerate (that is, assign grace-period numbers to) any
1380  * recently arrived callbacks.
1381  *
1382  * The caller must have disabled interrupts.
1383  */
rcu_prepare_for_idle(void)1384 static void rcu_prepare_for_idle(void)
1385 {
1386 	bool needwake;
1387 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1388 	struct rcu_node *rnp;
1389 	int tne;
1390 
1391 	lockdep_assert_irqs_disabled();
1392 	if (rcu_segcblist_is_offloaded(&rdp->cblist))
1393 		return;
1394 
1395 	/* Handle nohz enablement switches conservatively. */
1396 	tne = READ_ONCE(tick_nohz_active);
1397 	if (tne != rdp->tick_nohz_enabled_snap) {
1398 		if (!rcu_segcblist_empty(&rdp->cblist))
1399 			invoke_rcu_core(); /* force nohz to see update. */
1400 		rdp->tick_nohz_enabled_snap = tne;
1401 		return;
1402 	}
1403 	if (!tne)
1404 		return;
1405 
1406 	/*
1407 	 * If we have not yet accelerated this jiffy, accelerate all
1408 	 * callbacks on this CPU.
1409 	 */
1410 	if (rdp->last_accelerate == jiffies)
1411 		return;
1412 	rdp->last_accelerate = jiffies;
1413 	if (rcu_segcblist_pend_cbs(&rdp->cblist)) {
1414 		rnp = rdp->mynode;
1415 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1416 		needwake = rcu_accelerate_cbs(rnp, rdp);
1417 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1418 		if (needwake)
1419 			rcu_gp_kthread_wake();
1420 	}
1421 }
1422 
1423 /*
1424  * Clean up for exit from idle.  Attempt to advance callbacks based on
1425  * any grace periods that elapsed while the CPU was idle, and if any
1426  * callbacks are now ready to invoke, initiate invocation.
1427  */
rcu_cleanup_after_idle(void)1428 static void rcu_cleanup_after_idle(void)
1429 {
1430 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1431 
1432 	lockdep_assert_irqs_disabled();
1433 	if (rcu_segcblist_is_offloaded(&rdp->cblist))
1434 		return;
1435 	if (rcu_try_advance_all_cbs())
1436 		invoke_rcu_core();
1437 }
1438 
1439 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1440 
1441 #ifdef CONFIG_RCU_NOCB_CPU
1442 
1443 /*
1444  * Offload callback processing from the boot-time-specified set of CPUs
1445  * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
1446  * created that pull the callbacks from the corresponding CPU, wait for
1447  * a grace period to elapse, and invoke the callbacks.  These kthreads
1448  * are organized into GP kthreads, which manage incoming callbacks, wait for
1449  * grace periods, and awaken CB kthreads, and the CB kthreads, which only
1450  * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
1451  * do a wake_up() on their GP kthread when they insert a callback into any
1452  * empty list, unless the rcu_nocb_poll boot parameter has been specified,
1453  * in which case each kthread actively polls its CPU.  (Which isn't so great
1454  * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
1455  *
1456  * This is intended to be used in conjunction with Frederic Weisbecker's
1457  * adaptive-idle work, which would seriously reduce OS jitter on CPUs
1458  * running CPU-bound user-mode computations.
1459  *
1460  * Offloading of callbacks can also be used as an energy-efficiency
1461  * measure because CPUs with no RCU callbacks queued are more aggressive
1462  * about entering dyntick-idle mode.
1463  */
1464 
1465 
1466 /*
1467  * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
1468  * The string after the "rcu_nocbs=" is either "all" for all CPUs, or a
1469  * comma-separated list of CPUs and/or CPU ranges.  If an invalid list is
1470  * given, a warning is emitted and all CPUs are offloaded.
1471  */
rcu_nocb_setup(char * str)1472 static int __init rcu_nocb_setup(char *str)
1473 {
1474 	alloc_bootmem_cpumask_var(&rcu_nocb_mask);
1475 	if (!strcasecmp(str, "all"))
1476 		cpumask_setall(rcu_nocb_mask);
1477 	else
1478 		if (cpulist_parse(str, rcu_nocb_mask)) {
1479 			pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
1480 			cpumask_setall(rcu_nocb_mask);
1481 		}
1482 	return 1;
1483 }
1484 __setup("rcu_nocbs=", rcu_nocb_setup);
1485 
parse_rcu_nocb_poll(char * arg)1486 static int __init parse_rcu_nocb_poll(char *arg)
1487 {
1488 	rcu_nocb_poll = true;
1489 	return 0;
1490 }
1491 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
1492 
1493 /*
1494  * Don't bother bypassing ->cblist if the call_rcu() rate is low.
1495  * After all, the main point of bypassing is to avoid lock contention
1496  * on ->nocb_lock, which only can happen at high call_rcu() rates.
1497  */
1498 int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
1499 module_param(nocb_nobypass_lim_per_jiffy, int, 0);
1500 
1501 /*
1502  * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
1503  * lock isn't immediately available, increment ->nocb_lock_contended to
1504  * flag the contention.
1505  */
rcu_nocb_bypass_lock(struct rcu_data * rdp)1506 static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
1507 	__acquires(&rdp->nocb_bypass_lock)
1508 {
1509 	lockdep_assert_irqs_disabled();
1510 	if (raw_spin_trylock(&rdp->nocb_bypass_lock))
1511 		return;
1512 	atomic_inc(&rdp->nocb_lock_contended);
1513 	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
1514 	smp_mb__after_atomic(); /* atomic_inc() before lock. */
1515 	raw_spin_lock(&rdp->nocb_bypass_lock);
1516 	smp_mb__before_atomic(); /* atomic_dec() after lock. */
1517 	atomic_dec(&rdp->nocb_lock_contended);
1518 }
1519 
1520 /*
1521  * Spinwait until the specified rcu_data structure's ->nocb_lock is
1522  * not contended.  Please note that this is extremely special-purpose,
1523  * relying on the fact that at most two kthreads and one CPU contend for
1524  * this lock, and also that the two kthreads are guaranteed to have frequent
1525  * grace-period-duration time intervals between successive acquisitions
1526  * of the lock.  This allows us to use an extremely simple throttling
1527  * mechanism, and further to apply it only to the CPU doing floods of
1528  * call_rcu() invocations.  Don't try this at home!
1529  */
rcu_nocb_wait_contended(struct rcu_data * rdp)1530 static void rcu_nocb_wait_contended(struct rcu_data *rdp)
1531 {
1532 	WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
1533 	while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
1534 		cpu_relax();
1535 }
1536 
1537 /*
1538  * Conditionally acquire the specified rcu_data structure's
1539  * ->nocb_bypass_lock.
1540  */
rcu_nocb_bypass_trylock(struct rcu_data * rdp)1541 static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
1542 {
1543 	lockdep_assert_irqs_disabled();
1544 	return raw_spin_trylock(&rdp->nocb_bypass_lock);
1545 }
1546 
1547 /*
1548  * Release the specified rcu_data structure's ->nocb_bypass_lock.
1549  */
rcu_nocb_bypass_unlock(struct rcu_data * rdp)1550 static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
1551 	__releases(&rdp->nocb_bypass_lock)
1552 {
1553 	lockdep_assert_irqs_disabled();
1554 	raw_spin_unlock(&rdp->nocb_bypass_lock);
1555 }
1556 
1557 /*
1558  * Acquire the specified rcu_data structure's ->nocb_lock, but only
1559  * if it corresponds to a no-CBs CPU.
1560  */
rcu_nocb_lock(struct rcu_data * rdp)1561 static void rcu_nocb_lock(struct rcu_data *rdp)
1562 {
1563 	lockdep_assert_irqs_disabled();
1564 	if (!rcu_segcblist_is_offloaded(&rdp->cblist))
1565 		return;
1566 	raw_spin_lock(&rdp->nocb_lock);
1567 }
1568 
1569 /*
1570  * Release the specified rcu_data structure's ->nocb_lock, but only
1571  * if it corresponds to a no-CBs CPU.
1572  */
rcu_nocb_unlock(struct rcu_data * rdp)1573 static void rcu_nocb_unlock(struct rcu_data *rdp)
1574 {
1575 	if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
1576 		lockdep_assert_irqs_disabled();
1577 		raw_spin_unlock(&rdp->nocb_lock);
1578 	}
1579 }
1580 
1581 /*
1582  * Release the specified rcu_data structure's ->nocb_lock and restore
1583  * interrupts, but only if it corresponds to a no-CBs CPU.
1584  */
rcu_nocb_unlock_irqrestore(struct rcu_data * rdp,unsigned long flags)1585 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
1586 				       unsigned long flags)
1587 {
1588 	if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
1589 		lockdep_assert_irqs_disabled();
1590 		raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1591 	} else {
1592 		local_irq_restore(flags);
1593 	}
1594 }
1595 
1596 /* Lockdep check that ->cblist may be safely accessed. */
rcu_lockdep_assert_cblist_protected(struct rcu_data * rdp)1597 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
1598 {
1599 	lockdep_assert_irqs_disabled();
1600 	if (rcu_segcblist_is_offloaded(&rdp->cblist))
1601 		lockdep_assert_held(&rdp->nocb_lock);
1602 }
1603 
1604 /*
1605  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
1606  * grace period.
1607  */
rcu_nocb_gp_cleanup(struct swait_queue_head * sq)1608 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1609 {
1610 	swake_up_all(sq);
1611 }
1612 
rcu_nocb_gp_get(struct rcu_node * rnp)1613 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1614 {
1615 	return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
1616 }
1617 
rcu_init_one_nocb(struct rcu_node * rnp)1618 static void rcu_init_one_nocb(struct rcu_node *rnp)
1619 {
1620 	init_swait_queue_head(&rnp->nocb_gp_wq[0]);
1621 	init_swait_queue_head(&rnp->nocb_gp_wq[1]);
1622 }
1623 
1624 /* Is the specified CPU a no-CBs CPU? */
rcu_is_nocb_cpu(int cpu)1625 bool rcu_is_nocb_cpu(int cpu)
1626 {
1627 	if (cpumask_available(rcu_nocb_mask))
1628 		return cpumask_test_cpu(cpu, rcu_nocb_mask);
1629 	return false;
1630 }
1631 
1632 /*
1633  * Kick the GP kthread for this NOCB group.  Caller holds ->nocb_lock
1634  * and this function releases it.
1635  */
wake_nocb_gp(struct rcu_data * rdp,bool force,unsigned long flags)1636 static void wake_nocb_gp(struct rcu_data *rdp, bool force,
1637 			   unsigned long flags)
1638 	__releases(rdp->nocb_lock)
1639 {
1640 	bool needwake = false;
1641 	struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1642 
1643 	lockdep_assert_held(&rdp->nocb_lock);
1644 	if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
1645 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
1646 				    TPS("AlreadyAwake"));
1647 		rcu_nocb_unlock_irqrestore(rdp, flags);
1648 		return;
1649 	}
1650 
1651 	if (READ_ONCE(rdp->nocb_defer_wakeup) > RCU_NOCB_WAKE_NOT) {
1652 		WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
1653 		del_timer(&rdp->nocb_timer);
1654 	}
1655 	rcu_nocb_unlock_irqrestore(rdp, flags);
1656 	raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
1657 	if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
1658 		WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
1659 		needwake = true;
1660 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
1661 	}
1662 	raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
1663 	if (needwake)
1664 		wake_up_process(rdp_gp->nocb_gp_kthread);
1665 }
1666 
1667 /*
1668  * Arrange to wake the GP kthread for this NOCB group at some future
1669  * time when it is safe to do so.
1670  */
wake_nocb_gp_defer(struct rcu_data * rdp,int waketype,const char * reason)1671 static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
1672 			       const char *reason)
1673 {
1674 	if (rdp->nocb_defer_wakeup == RCU_NOCB_WAKE_NOT)
1675 		mod_timer(&rdp->nocb_timer, jiffies + 1);
1676 	if (rdp->nocb_defer_wakeup < waketype)
1677 		WRITE_ONCE(rdp->nocb_defer_wakeup, waketype);
1678 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
1679 }
1680 
1681 /*
1682  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
1683  * However, if there is a callback to be enqueued and if ->nocb_bypass
1684  * proves to be initially empty, just return false because the no-CB GP
1685  * kthread may need to be awakened in this case.
1686  *
1687  * Note that this function always returns true if rhp is NULL.
1688  */
rcu_nocb_do_flush_bypass(struct rcu_data * rdp,struct rcu_head * rhp,unsigned long j)1689 static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1690 				     unsigned long j)
1691 {
1692 	struct rcu_cblist rcl;
1693 
1694 	WARN_ON_ONCE(!rcu_segcblist_is_offloaded(&rdp->cblist));
1695 	rcu_lockdep_assert_cblist_protected(rdp);
1696 	lockdep_assert_held(&rdp->nocb_bypass_lock);
1697 	if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
1698 		raw_spin_unlock(&rdp->nocb_bypass_lock);
1699 		return false;
1700 	}
1701 	/* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
1702 	if (rhp)
1703 		rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
1704 	rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
1705 	rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
1706 	WRITE_ONCE(rdp->nocb_bypass_first, j);
1707 	rcu_nocb_bypass_unlock(rdp);
1708 	return true;
1709 }
1710 
1711 /*
1712  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
1713  * However, if there is a callback to be enqueued and if ->nocb_bypass
1714  * proves to be initially empty, just return false because the no-CB GP
1715  * kthread may need to be awakened in this case.
1716  *
1717  * Note that this function always returns true if rhp is NULL.
1718  */
rcu_nocb_flush_bypass(struct rcu_data * rdp,struct rcu_head * rhp,unsigned long j)1719 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1720 				  unsigned long j)
1721 {
1722 	if (!rcu_segcblist_is_offloaded(&rdp->cblist))
1723 		return true;
1724 	rcu_lockdep_assert_cblist_protected(rdp);
1725 	rcu_nocb_bypass_lock(rdp);
1726 	return rcu_nocb_do_flush_bypass(rdp, rhp, j);
1727 }
1728 
1729 /*
1730  * If the ->nocb_bypass_lock is immediately available, flush the
1731  * ->nocb_bypass queue into ->cblist.
1732  */
rcu_nocb_try_flush_bypass(struct rcu_data * rdp,unsigned long j)1733 static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
1734 {
1735 	rcu_lockdep_assert_cblist_protected(rdp);
1736 	if (!rcu_segcblist_is_offloaded(&rdp->cblist) ||
1737 	    !rcu_nocb_bypass_trylock(rdp))
1738 		return;
1739 	WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
1740 }
1741 
1742 /*
1743  * See whether it is appropriate to use the ->nocb_bypass list in order
1744  * to control contention on ->nocb_lock.  A limited number of direct
1745  * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
1746  * is non-empty, further callbacks must be placed into ->nocb_bypass,
1747  * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
1748  * back to direct use of ->cblist.  However, ->nocb_bypass should not be
1749  * used if ->cblist is empty, because otherwise callbacks can be stranded
1750  * on ->nocb_bypass because we cannot count on the current CPU ever again
1751  * invoking call_rcu().  The general rule is that if ->nocb_bypass is
1752  * non-empty, the corresponding no-CBs grace-period kthread must not be
1753  * in an indefinite sleep state.
1754  *
1755  * Finally, it is not permitted to use the bypass during early boot,
1756  * as doing so would confuse the auto-initialization code.  Besides
1757  * which, there is no point in worrying about lock contention while
1758  * there is only one CPU in operation.
1759  */
rcu_nocb_try_bypass(struct rcu_data * rdp,struct rcu_head * rhp,bool * was_alldone,unsigned long flags)1760 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1761 				bool *was_alldone, unsigned long flags)
1762 {
1763 	unsigned long c;
1764 	unsigned long cur_gp_seq;
1765 	unsigned long j = jiffies;
1766 	long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
1767 
1768 	if (!rcu_segcblist_is_offloaded(&rdp->cblist)) {
1769 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
1770 		return false; /* Not offloaded, no bypassing. */
1771 	}
1772 	lockdep_assert_irqs_disabled();
1773 
1774 	// Don't use ->nocb_bypass during early boot.
1775 	if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
1776 		rcu_nocb_lock(rdp);
1777 		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1778 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
1779 		return false;
1780 	}
1781 
1782 	// If we have advanced to a new jiffy, reset counts to allow
1783 	// moving back from ->nocb_bypass to ->cblist.
1784 	if (j == rdp->nocb_nobypass_last) {
1785 		c = rdp->nocb_nobypass_count + 1;
1786 	} else {
1787 		WRITE_ONCE(rdp->nocb_nobypass_last, j);
1788 		c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
1789 		if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
1790 				 nocb_nobypass_lim_per_jiffy))
1791 			c = 0;
1792 		else if (c > nocb_nobypass_lim_per_jiffy)
1793 			c = nocb_nobypass_lim_per_jiffy;
1794 	}
1795 	WRITE_ONCE(rdp->nocb_nobypass_count, c);
1796 
1797 	// If there hasn't yet been all that many ->cblist enqueues
1798 	// this jiffy, tell the caller to enqueue onto ->cblist.  But flush
1799 	// ->nocb_bypass first.
1800 	if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
1801 		rcu_nocb_lock(rdp);
1802 		*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
1803 		if (*was_alldone)
1804 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
1805 					    TPS("FirstQ"));
1806 		WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
1807 		WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1808 		return false; // Caller must enqueue the callback.
1809 	}
1810 
1811 	// If ->nocb_bypass has been used too long or is too full,
1812 	// flush ->nocb_bypass to ->cblist.
1813 	if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
1814 	    ncbs >= qhimark) {
1815 		rcu_nocb_lock(rdp);
1816 		if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
1817 			*was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
1818 			if (*was_alldone)
1819 				trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
1820 						    TPS("FirstQ"));
1821 			WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1822 			return false; // Caller must enqueue the callback.
1823 		}
1824 		if (j != rdp->nocb_gp_adv_time &&
1825 		    rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
1826 		    rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
1827 			rcu_advance_cbs_nowake(rdp->mynode, rdp);
1828 			rdp->nocb_gp_adv_time = j;
1829 		}
1830 		rcu_nocb_unlock_irqrestore(rdp, flags);
1831 		return true; // Callback already enqueued.
1832 	}
1833 
1834 	// We need to use the bypass.
1835 	rcu_nocb_wait_contended(rdp);
1836 	rcu_nocb_bypass_lock(rdp);
1837 	ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
1838 	rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
1839 	rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
1840 	if (!ncbs) {
1841 		WRITE_ONCE(rdp->nocb_bypass_first, j);
1842 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
1843 	}
1844 	rcu_nocb_bypass_unlock(rdp);
1845 	smp_mb(); /* Order enqueue before wake. */
1846 	if (ncbs) {
1847 		local_irq_restore(flags);
1848 	} else {
1849 		// No-CBs GP kthread might be indefinitely asleep, if so, wake.
1850 		rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
1851 		if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
1852 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
1853 					    TPS("FirstBQwake"));
1854 			__call_rcu_nocb_wake(rdp, true, flags);
1855 		} else {
1856 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
1857 					    TPS("FirstBQnoWake"));
1858 			rcu_nocb_unlock_irqrestore(rdp, flags);
1859 		}
1860 	}
1861 	return true; // Callback already enqueued.
1862 }
1863 
1864 /*
1865  * Awaken the no-CBs grace-period kthead if needed, either due to it
1866  * legitimately being asleep or due to overload conditions.
1867  *
1868  * If warranted, also wake up the kthread servicing this CPUs queues.
1869  */
__call_rcu_nocb_wake(struct rcu_data * rdp,bool was_alldone,unsigned long flags)1870 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
1871 				 unsigned long flags)
1872 				 __releases(rdp->nocb_lock)
1873 {
1874 	unsigned long cur_gp_seq;
1875 	unsigned long j;
1876 	long len;
1877 	struct task_struct *t;
1878 
1879 	// If we are being polled or there is no kthread, just leave.
1880 	t = READ_ONCE(rdp->nocb_gp_kthread);
1881 	if (rcu_nocb_poll || !t) {
1882 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
1883 				    TPS("WakeNotPoll"));
1884 		rcu_nocb_unlock_irqrestore(rdp, flags);
1885 		return;
1886 	}
1887 	// Need to actually to a wakeup.
1888 	len = rcu_segcblist_n_cbs(&rdp->cblist);
1889 	if (was_alldone) {
1890 		rdp->qlen_last_fqs_check = len;
1891 		if (!irqs_disabled_flags(flags)) {
1892 			/* ... if queue was empty ... */
1893 			wake_nocb_gp(rdp, false, flags);
1894 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
1895 					    TPS("WakeEmpty"));
1896 		} else {
1897 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
1898 					   TPS("WakeEmptyIsDeferred"));
1899 			rcu_nocb_unlock_irqrestore(rdp, flags);
1900 		}
1901 	} else if (len > rdp->qlen_last_fqs_check + qhimark) {
1902 		/* ... or if many callbacks queued. */
1903 		rdp->qlen_last_fqs_check = len;
1904 		j = jiffies;
1905 		if (j != rdp->nocb_gp_adv_time &&
1906 		    rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
1907 		    rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
1908 			rcu_advance_cbs_nowake(rdp->mynode, rdp);
1909 			rdp->nocb_gp_adv_time = j;
1910 		}
1911 		smp_mb(); /* Enqueue before timer_pending(). */
1912 		if ((rdp->nocb_cb_sleep ||
1913 		     !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
1914 		    !timer_pending(&rdp->nocb_bypass_timer))
1915 			wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
1916 					   TPS("WakeOvfIsDeferred"));
1917 		rcu_nocb_unlock_irqrestore(rdp, flags);
1918 	} else {
1919 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
1920 		rcu_nocb_unlock_irqrestore(rdp, flags);
1921 	}
1922 	return;
1923 }
1924 
1925 /* Wake up the no-CBs GP kthread to flush ->nocb_bypass. */
do_nocb_bypass_wakeup_timer(struct timer_list * t)1926 static void do_nocb_bypass_wakeup_timer(struct timer_list *t)
1927 {
1928 	unsigned long flags;
1929 	struct rcu_data *rdp = from_timer(rdp, t, nocb_bypass_timer);
1930 
1931 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
1932 	rcu_nocb_lock_irqsave(rdp, flags);
1933 	smp_mb__after_spinlock(); /* Timer expire before wakeup. */
1934 	__call_rcu_nocb_wake(rdp, true, flags);
1935 }
1936 
1937 /*
1938  * No-CBs GP kthreads come here to wait for additional callbacks to show up
1939  * or for grace periods to end.
1940  */
nocb_gp_wait(struct rcu_data * my_rdp)1941 static void nocb_gp_wait(struct rcu_data *my_rdp)
1942 {
1943 	bool bypass = false;
1944 	long bypass_ncbs;
1945 	int __maybe_unused cpu = my_rdp->cpu;
1946 	unsigned long cur_gp_seq;
1947 	unsigned long flags;
1948 	bool gotcbs = false;
1949 	unsigned long j = jiffies;
1950 	bool needwait_gp = false; // This prevents actual uninitialized use.
1951 	bool needwake;
1952 	bool needwake_gp;
1953 	struct rcu_data *rdp;
1954 	struct rcu_node *rnp;
1955 	unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
1956 	bool wasempty = false;
1957 
1958 	/*
1959 	 * Each pass through the following loop checks for CBs and for the
1960 	 * nearest grace period (if any) to wait for next.  The CB kthreads
1961 	 * and the global grace-period kthread are awakened if needed.
1962 	 */
1963 	WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
1964 	for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
1965 		trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
1966 		rcu_nocb_lock_irqsave(rdp, flags);
1967 		bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
1968 		if (bypass_ncbs &&
1969 		    (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
1970 		     bypass_ncbs > 2 * qhimark)) {
1971 			// Bypass full or old, so flush it.
1972 			(void)rcu_nocb_try_flush_bypass(rdp, j);
1973 			bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
1974 		} else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
1975 			rcu_nocb_unlock_irqrestore(rdp, flags);
1976 			continue; /* No callbacks here, try next. */
1977 		}
1978 		if (bypass_ncbs) {
1979 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
1980 					    TPS("Bypass"));
1981 			bypass = true;
1982 		}
1983 		rnp = rdp->mynode;
1984 		if (bypass) {  // Avoid race with first bypass CB.
1985 			WRITE_ONCE(my_rdp->nocb_defer_wakeup,
1986 				   RCU_NOCB_WAKE_NOT);
1987 			del_timer(&my_rdp->nocb_timer);
1988 		}
1989 		// Advance callbacks if helpful and low contention.
1990 		needwake_gp = false;
1991 		if (!rcu_segcblist_restempty(&rdp->cblist,
1992 					     RCU_NEXT_READY_TAIL) ||
1993 		    (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
1994 		     rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
1995 			raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
1996 			needwake_gp = rcu_advance_cbs(rnp, rdp);
1997 			wasempty = rcu_segcblist_restempty(&rdp->cblist,
1998 							   RCU_NEXT_READY_TAIL);
1999 			raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
2000 		}
2001 		// Need to wait on some grace period?
2002 		WARN_ON_ONCE(wasempty &&
2003 			     !rcu_segcblist_restempty(&rdp->cblist,
2004 						      RCU_NEXT_READY_TAIL));
2005 		if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
2006 			if (!needwait_gp ||
2007 			    ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
2008 				wait_gp_seq = cur_gp_seq;
2009 			needwait_gp = true;
2010 			trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
2011 					    TPS("NeedWaitGP"));
2012 		}
2013 		if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
2014 			needwake = rdp->nocb_cb_sleep;
2015 			WRITE_ONCE(rdp->nocb_cb_sleep, false);
2016 			smp_mb(); /* CB invocation -after- GP end. */
2017 		} else {
2018 			needwake = false;
2019 		}
2020 		rcu_nocb_unlock_irqrestore(rdp, flags);
2021 		if (needwake) {
2022 			swake_up_one(&rdp->nocb_cb_wq);
2023 			gotcbs = true;
2024 		}
2025 		if (needwake_gp)
2026 			rcu_gp_kthread_wake();
2027 	}
2028 
2029 	my_rdp->nocb_gp_bypass = bypass;
2030 	my_rdp->nocb_gp_gp = needwait_gp;
2031 	my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
2032 	if (bypass && !rcu_nocb_poll) {
2033 		// At least one child with non-empty ->nocb_bypass, so set
2034 		// timer in order to avoid stranding its callbacks.
2035 		raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
2036 		mod_timer(&my_rdp->nocb_bypass_timer, j + 2);
2037 		raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
2038 	}
2039 	if (rcu_nocb_poll) {
2040 		/* Polling, so trace if first poll in the series. */
2041 		if (gotcbs)
2042 			trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
2043 		schedule_timeout_idle(1);
2044 	} else if (!needwait_gp) {
2045 		/* Wait for callbacks to appear. */
2046 		trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
2047 		swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
2048 				!READ_ONCE(my_rdp->nocb_gp_sleep));
2049 		trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
2050 	} else {
2051 		rnp = my_rdp->mynode;
2052 		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
2053 		swait_event_interruptible_exclusive(
2054 			rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
2055 			rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
2056 			!READ_ONCE(my_rdp->nocb_gp_sleep));
2057 		trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
2058 	}
2059 	if (!rcu_nocb_poll) {
2060 		raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
2061 		if (bypass)
2062 			del_timer(&my_rdp->nocb_bypass_timer);
2063 		WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
2064 		raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
2065 	}
2066 	my_rdp->nocb_gp_seq = -1;
2067 	WARN_ON(signal_pending(current));
2068 }
2069 
2070 /*
2071  * No-CBs grace-period-wait kthread.  There is one of these per group
2072  * of CPUs, but only once at least one CPU in that group has come online
2073  * at least once since boot.  This kthread checks for newly posted
2074  * callbacks from any of the CPUs it is responsible for, waits for a
2075  * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
2076  * that then have callback-invocation work to do.
2077  */
rcu_nocb_gp_kthread(void * arg)2078 static int rcu_nocb_gp_kthread(void *arg)
2079 {
2080 	struct rcu_data *rdp = arg;
2081 
2082 	for (;;) {
2083 		WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
2084 		nocb_gp_wait(rdp);
2085 		cond_resched_tasks_rcu_qs();
2086 	}
2087 	return 0;
2088 }
2089 
2090 /*
2091  * Invoke any ready callbacks from the corresponding no-CBs CPU,
2092  * then, if there are no more, wait for more to appear.
2093  */
nocb_cb_wait(struct rcu_data * rdp)2094 static void nocb_cb_wait(struct rcu_data *rdp)
2095 {
2096 	unsigned long cur_gp_seq;
2097 	unsigned long flags;
2098 	bool needwake_gp = false;
2099 	struct rcu_node *rnp = rdp->mynode;
2100 
2101 	local_irq_save(flags);
2102 	rcu_momentary_dyntick_idle();
2103 	local_irq_restore(flags);
2104 	local_bh_disable();
2105 	rcu_do_batch(rdp);
2106 	local_bh_enable();
2107 	lockdep_assert_irqs_enabled();
2108 	rcu_nocb_lock_irqsave(rdp, flags);
2109 	if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
2110 	    rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
2111 	    raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
2112 		needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
2113 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2114 	}
2115 	if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
2116 		rcu_nocb_unlock_irqrestore(rdp, flags);
2117 		if (needwake_gp)
2118 			rcu_gp_kthread_wake();
2119 		return;
2120 	}
2121 
2122 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
2123 	WRITE_ONCE(rdp->nocb_cb_sleep, true);
2124 	rcu_nocb_unlock_irqrestore(rdp, flags);
2125 	if (needwake_gp)
2126 		rcu_gp_kthread_wake();
2127 	swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
2128 				 !READ_ONCE(rdp->nocb_cb_sleep));
2129 	if (!smp_load_acquire(&rdp->nocb_cb_sleep)) { /* VVV */
2130 		/* ^^^ Ensure CB invocation follows _sleep test. */
2131 		return;
2132 	}
2133 	WARN_ON(signal_pending(current));
2134 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
2135 }
2136 
2137 /*
2138  * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
2139  * nocb_cb_wait() to do the dirty work.
2140  */
rcu_nocb_cb_kthread(void * arg)2141 static int rcu_nocb_cb_kthread(void *arg)
2142 {
2143 	struct rcu_data *rdp = arg;
2144 
2145 	// Each pass through this loop does one callback batch, and,
2146 	// if there are no more ready callbacks, waits for them.
2147 	for (;;) {
2148 		nocb_cb_wait(rdp);
2149 		cond_resched_tasks_rcu_qs();
2150 	}
2151 	return 0;
2152 }
2153 
2154 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
rcu_nocb_need_deferred_wakeup(struct rcu_data * rdp)2155 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2156 {
2157 	return READ_ONCE(rdp->nocb_defer_wakeup);
2158 }
2159 
2160 /* Do a deferred wakeup of rcu_nocb_kthread(). */
do_nocb_deferred_wakeup_common(struct rcu_data * rdp)2161 static void do_nocb_deferred_wakeup_common(struct rcu_data *rdp)
2162 {
2163 	unsigned long flags;
2164 	int ndw;
2165 
2166 	rcu_nocb_lock_irqsave(rdp, flags);
2167 	if (!rcu_nocb_need_deferred_wakeup(rdp)) {
2168 		rcu_nocb_unlock_irqrestore(rdp, flags);
2169 		return;
2170 	}
2171 	ndw = READ_ONCE(rdp->nocb_defer_wakeup);
2172 	wake_nocb_gp(rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
2173 	trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
2174 }
2175 
2176 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
do_nocb_deferred_wakeup_timer(struct timer_list * t)2177 static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
2178 {
2179 	struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
2180 
2181 	do_nocb_deferred_wakeup_common(rdp);
2182 }
2183 
2184 /*
2185  * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
2186  * This means we do an inexact common-case check.  Note that if
2187  * we miss, ->nocb_timer will eventually clean things up.
2188  */
do_nocb_deferred_wakeup(struct rcu_data * rdp)2189 static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2190 {
2191 	if (rcu_nocb_need_deferred_wakeup(rdp))
2192 		do_nocb_deferred_wakeup_common(rdp);
2193 }
2194 
rcu_nocb_flush_deferred_wakeup(void)2195 void rcu_nocb_flush_deferred_wakeup(void)
2196 {
2197 	do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
2198 }
2199 
rcu_init_nohz(void)2200 void __init rcu_init_nohz(void)
2201 {
2202 	int cpu;
2203 	bool need_rcu_nocb_mask = false;
2204 	struct rcu_data *rdp;
2205 
2206 #if defined(CONFIG_NO_HZ_FULL)
2207 	if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
2208 		need_rcu_nocb_mask = true;
2209 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
2210 
2211 	if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
2212 		if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
2213 			pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
2214 			return;
2215 		}
2216 	}
2217 	if (!cpumask_available(rcu_nocb_mask))
2218 		return;
2219 
2220 #if defined(CONFIG_NO_HZ_FULL)
2221 	if (tick_nohz_full_running)
2222 		cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
2223 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
2224 
2225 	if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
2226 		pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
2227 		cpumask_and(rcu_nocb_mask, cpu_possible_mask,
2228 			    rcu_nocb_mask);
2229 	}
2230 	if (cpumask_empty(rcu_nocb_mask))
2231 		pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
2232 	else
2233 		pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
2234 			cpumask_pr_args(rcu_nocb_mask));
2235 	if (rcu_nocb_poll)
2236 		pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
2237 
2238 	for_each_cpu(cpu, rcu_nocb_mask) {
2239 		rdp = per_cpu_ptr(&rcu_data, cpu);
2240 		if (rcu_segcblist_empty(&rdp->cblist))
2241 			rcu_segcblist_init(&rdp->cblist);
2242 		rcu_segcblist_offload(&rdp->cblist);
2243 	}
2244 	rcu_organize_nocb_kthreads();
2245 }
2246 
2247 /* Initialize per-rcu_data variables for no-CBs CPUs. */
rcu_boot_init_nocb_percpu_data(struct rcu_data * rdp)2248 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2249 {
2250 	init_swait_queue_head(&rdp->nocb_cb_wq);
2251 	init_swait_queue_head(&rdp->nocb_gp_wq);
2252 	raw_spin_lock_init(&rdp->nocb_lock);
2253 	raw_spin_lock_init(&rdp->nocb_bypass_lock);
2254 	raw_spin_lock_init(&rdp->nocb_gp_lock);
2255 	timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
2256 	timer_setup(&rdp->nocb_bypass_timer, do_nocb_bypass_wakeup_timer, 0);
2257 	rcu_cblist_init(&rdp->nocb_bypass);
2258 }
2259 
2260 /*
2261  * If the specified CPU is a no-CBs CPU that does not already have its
2262  * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
2263  * for this CPU's group has not yet been created, spawn it as well.
2264  */
rcu_spawn_one_nocb_kthread(int cpu)2265 static void rcu_spawn_one_nocb_kthread(int cpu)
2266 {
2267 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2268 	struct rcu_data *rdp_gp;
2269 	struct task_struct *t;
2270 
2271 	/*
2272 	 * If this isn't a no-CBs CPU or if it already has an rcuo kthread,
2273 	 * then nothing to do.
2274 	 */
2275 	if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
2276 		return;
2277 
2278 	/* If we didn't spawn the GP kthread first, reorganize! */
2279 	rdp_gp = rdp->nocb_gp_rdp;
2280 	if (!rdp_gp->nocb_gp_kthread) {
2281 		t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
2282 				"rcuog/%d", rdp_gp->cpu);
2283 		if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
2284 			return;
2285 		WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
2286 	}
2287 
2288 	/* Spawn the kthread for this CPU. */
2289 	t = kthread_run(rcu_nocb_cb_kthread, rdp,
2290 			"rcuo%c/%d", rcu_state.abbr, cpu);
2291 	if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
2292 		return;
2293 	WRITE_ONCE(rdp->nocb_cb_kthread, t);
2294 	WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
2295 }
2296 
2297 /*
2298  * If the specified CPU is a no-CBs CPU that does not already have its
2299  * rcuo kthread, spawn it.
2300  */
rcu_spawn_cpu_nocb_kthread(int cpu)2301 static void rcu_spawn_cpu_nocb_kthread(int cpu)
2302 {
2303 	if (rcu_scheduler_fully_active)
2304 		rcu_spawn_one_nocb_kthread(cpu);
2305 }
2306 
2307 /*
2308  * Once the scheduler is running, spawn rcuo kthreads for all online
2309  * no-CBs CPUs.  This assumes that the early_initcall()s happen before
2310  * non-boot CPUs come online -- if this changes, we will need to add
2311  * some mutual exclusion.
2312  */
rcu_spawn_nocb_kthreads(void)2313 static void __init rcu_spawn_nocb_kthreads(void)
2314 {
2315 	int cpu;
2316 
2317 	for_each_online_cpu(cpu)
2318 		rcu_spawn_cpu_nocb_kthread(cpu);
2319 }
2320 
2321 /* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
2322 static int rcu_nocb_gp_stride = -1;
2323 module_param(rcu_nocb_gp_stride, int, 0444);
2324 
2325 /*
2326  * Initialize GP-CB relationships for all no-CBs CPU.
2327  */
rcu_organize_nocb_kthreads(void)2328 static void __init rcu_organize_nocb_kthreads(void)
2329 {
2330 	int cpu;
2331 	bool firsttime = true;
2332 	bool gotnocbs = false;
2333 	bool gotnocbscbs = true;
2334 	int ls = rcu_nocb_gp_stride;
2335 	int nl = 0;  /* Next GP kthread. */
2336 	struct rcu_data *rdp;
2337 	struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
2338 	struct rcu_data *rdp_prev = NULL;
2339 
2340 	if (!cpumask_available(rcu_nocb_mask))
2341 		return;
2342 	if (ls == -1) {
2343 		ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
2344 		rcu_nocb_gp_stride = ls;
2345 	}
2346 
2347 	/*
2348 	 * Each pass through this loop sets up one rcu_data structure.
2349 	 * Should the corresponding CPU come online in the future, then
2350 	 * we will spawn the needed set of rcu_nocb_kthread() kthreads.
2351 	 */
2352 	for_each_cpu(cpu, rcu_nocb_mask) {
2353 		rdp = per_cpu_ptr(&rcu_data, cpu);
2354 		if (rdp->cpu >= nl) {
2355 			/* New GP kthread, set up for CBs & next GP. */
2356 			gotnocbs = true;
2357 			nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
2358 			rdp->nocb_gp_rdp = rdp;
2359 			rdp_gp = rdp;
2360 			if (dump_tree) {
2361 				if (!firsttime)
2362 					pr_cont("%s\n", gotnocbscbs
2363 							? "" : " (self only)");
2364 				gotnocbscbs = false;
2365 				firsttime = false;
2366 				pr_alert("%s: No-CB GP kthread CPU %d:",
2367 					 __func__, cpu);
2368 			}
2369 		} else {
2370 			/* Another CB kthread, link to previous GP kthread. */
2371 			gotnocbscbs = true;
2372 			rdp->nocb_gp_rdp = rdp_gp;
2373 			rdp_prev->nocb_next_cb_rdp = rdp;
2374 			if (dump_tree)
2375 				pr_cont(" %d", cpu);
2376 		}
2377 		rdp_prev = rdp;
2378 	}
2379 	if (gotnocbs && dump_tree)
2380 		pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
2381 }
2382 
2383 /*
2384  * Bind the current task to the offloaded CPUs.  If there are no offloaded
2385  * CPUs, leave the task unbound.  Splat if the bind attempt fails.
2386  */
rcu_bind_current_to_nocb(void)2387 void rcu_bind_current_to_nocb(void)
2388 {
2389 	if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
2390 		WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
2391 }
2392 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
2393 
2394 /*
2395  * Dump out nocb grace-period kthread state for the specified rcu_data
2396  * structure.
2397  */
show_rcu_nocb_gp_state(struct rcu_data * rdp)2398 static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
2399 {
2400 	struct rcu_node *rnp = rdp->mynode;
2401 
2402 	pr_info("nocb GP %d %c%c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu\n",
2403 		rdp->cpu,
2404 		"kK"[!!rdp->nocb_gp_kthread],
2405 		"lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
2406 		"dD"[!!rdp->nocb_defer_wakeup],
2407 		"tT"[timer_pending(&rdp->nocb_timer)],
2408 		"bB"[timer_pending(&rdp->nocb_bypass_timer)],
2409 		"sS"[!!rdp->nocb_gp_sleep],
2410 		".W"[swait_active(&rdp->nocb_gp_wq)],
2411 		".W"[swait_active(&rnp->nocb_gp_wq[0])],
2412 		".W"[swait_active(&rnp->nocb_gp_wq[1])],
2413 		".B"[!!rdp->nocb_gp_bypass],
2414 		".G"[!!rdp->nocb_gp_gp],
2415 		(long)rdp->nocb_gp_seq,
2416 		rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops));
2417 }
2418 
2419 /* Dump out nocb kthread state for the specified rcu_data structure. */
show_rcu_nocb_state(struct rcu_data * rdp)2420 static void show_rcu_nocb_state(struct rcu_data *rdp)
2421 {
2422 	struct rcu_segcblist *rsclp = &rdp->cblist;
2423 	bool waslocked;
2424 	bool wastimer;
2425 	bool wassleep;
2426 
2427 	if (rdp->nocb_gp_rdp == rdp)
2428 		show_rcu_nocb_gp_state(rdp);
2429 
2430 	pr_info("   CB %d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%c%c%c q%ld\n",
2431 		rdp->cpu, rdp->nocb_gp_rdp->cpu,
2432 		"kK"[!!rdp->nocb_cb_kthread],
2433 		"bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
2434 		"cC"[!!atomic_read(&rdp->nocb_lock_contended)],
2435 		"lL"[raw_spin_is_locked(&rdp->nocb_lock)],
2436 		"sS"[!!rdp->nocb_cb_sleep],
2437 		".W"[swait_active(&rdp->nocb_cb_wq)],
2438 		jiffies - rdp->nocb_bypass_first,
2439 		jiffies - rdp->nocb_nobypass_last,
2440 		rdp->nocb_nobypass_count,
2441 		".D"[rcu_segcblist_ready_cbs(rsclp)],
2442 		".W"[!rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)],
2443 		".R"[!rcu_segcblist_restempty(rsclp, RCU_WAIT_TAIL)],
2444 		".N"[!rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL)],
2445 		".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
2446 		rcu_segcblist_n_cbs(&rdp->cblist));
2447 
2448 	/* It is OK for GP kthreads to have GP state. */
2449 	if (rdp->nocb_gp_rdp == rdp)
2450 		return;
2451 
2452 	waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
2453 	wastimer = timer_pending(&rdp->nocb_bypass_timer);
2454 	wassleep = swait_active(&rdp->nocb_gp_wq);
2455 	if (!rdp->nocb_gp_sleep && !waslocked && !wastimer && !wassleep)
2456 		return;  /* Nothing untowards. */
2457 
2458 	pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c%c %c\n",
2459 		"lL"[waslocked],
2460 		"dD"[!!rdp->nocb_defer_wakeup],
2461 		"tT"[wastimer],
2462 		"sS"[!!rdp->nocb_gp_sleep],
2463 		".W"[wassleep]);
2464 }
2465 
2466 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
2467 
2468 /* No ->nocb_lock to acquire.  */
rcu_nocb_lock(struct rcu_data * rdp)2469 static void rcu_nocb_lock(struct rcu_data *rdp)
2470 {
2471 }
2472 
2473 /* No ->nocb_lock to release.  */
rcu_nocb_unlock(struct rcu_data * rdp)2474 static void rcu_nocb_unlock(struct rcu_data *rdp)
2475 {
2476 }
2477 
2478 /* No ->nocb_lock to release.  */
rcu_nocb_unlock_irqrestore(struct rcu_data * rdp,unsigned long flags)2479 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
2480 				       unsigned long flags)
2481 {
2482 	local_irq_restore(flags);
2483 }
2484 
2485 /* Lockdep check that ->cblist may be safely accessed. */
rcu_lockdep_assert_cblist_protected(struct rcu_data * rdp)2486 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
2487 {
2488 	lockdep_assert_irqs_disabled();
2489 }
2490 
rcu_nocb_gp_cleanup(struct swait_queue_head * sq)2491 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
2492 {
2493 }
2494 
rcu_nocb_gp_get(struct rcu_node * rnp)2495 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
2496 {
2497 	return NULL;
2498 }
2499 
rcu_init_one_nocb(struct rcu_node * rnp)2500 static void rcu_init_one_nocb(struct rcu_node *rnp)
2501 {
2502 }
2503 
rcu_nocb_flush_bypass(struct rcu_data * rdp,struct rcu_head * rhp,unsigned long j)2504 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
2505 				  unsigned long j)
2506 {
2507 	return true;
2508 }
2509 
rcu_nocb_try_bypass(struct rcu_data * rdp,struct rcu_head * rhp,bool * was_alldone,unsigned long flags)2510 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
2511 				bool *was_alldone, unsigned long flags)
2512 {
2513 	return false;
2514 }
2515 
__call_rcu_nocb_wake(struct rcu_data * rdp,bool was_empty,unsigned long flags)2516 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
2517 				 unsigned long flags)
2518 {
2519 	WARN_ON_ONCE(1);  /* Should be dead code! */
2520 }
2521 
rcu_boot_init_nocb_percpu_data(struct rcu_data * rdp)2522 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
2523 {
2524 }
2525 
rcu_nocb_need_deferred_wakeup(struct rcu_data * rdp)2526 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
2527 {
2528 	return false;
2529 }
2530 
do_nocb_deferred_wakeup(struct rcu_data * rdp)2531 static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
2532 {
2533 }
2534 
rcu_spawn_cpu_nocb_kthread(int cpu)2535 static void rcu_spawn_cpu_nocb_kthread(int cpu)
2536 {
2537 }
2538 
rcu_spawn_nocb_kthreads(void)2539 static void __init rcu_spawn_nocb_kthreads(void)
2540 {
2541 }
2542 
show_rcu_nocb_state(struct rcu_data * rdp)2543 static void show_rcu_nocb_state(struct rcu_data *rdp)
2544 {
2545 }
2546 
2547 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
2548 
2549 /*
2550  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
2551  * grace-period kthread will do force_quiescent_state() processing?
2552  * The idea is to avoid waking up RCU core processing on such a
2553  * CPU unless the grace period has extended for too long.
2554  *
2555  * This code relies on the fact that all NO_HZ_FULL CPUs are also
2556  * CONFIG_RCU_NOCB_CPU CPUs.
2557  */
rcu_nohz_full_cpu(void)2558 static bool rcu_nohz_full_cpu(void)
2559 {
2560 #ifdef CONFIG_NO_HZ_FULL
2561 	if (tick_nohz_full_cpu(smp_processor_id()) &&
2562 	    (!rcu_gp_in_progress() ||
2563 	     time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
2564 		return true;
2565 #endif /* #ifdef CONFIG_NO_HZ_FULL */
2566 	return false;
2567 }
2568 
2569 /*
2570  * Bind the RCU grace-period kthreads to the housekeeping CPU.
2571  */
rcu_bind_gp_kthread(void)2572 static void rcu_bind_gp_kthread(void)
2573 {
2574 	if (!tick_nohz_full_enabled())
2575 		return;
2576 	housekeeping_affine(current, HK_FLAG_RCU);
2577 }
2578 
2579 /* Record the current task on dyntick-idle entry. */
rcu_dynticks_task_enter(void)2580 static __always_inline void rcu_dynticks_task_enter(void)
2581 {
2582 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
2583 	WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
2584 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
2585 }
2586 
2587 /* Record no current task on dyntick-idle exit. */
rcu_dynticks_task_exit(void)2588 static __always_inline void rcu_dynticks_task_exit(void)
2589 {
2590 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
2591 	WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
2592 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
2593 }
2594 
2595 /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
rcu_dynticks_task_trace_enter(void)2596 static __always_inline void rcu_dynticks_task_trace_enter(void)
2597 {
2598 #ifdef CONFIG_TASKS_TRACE_RCU
2599 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
2600 		current->trc_reader_special.b.need_mb = true;
2601 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
2602 }
2603 
2604 /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
rcu_dynticks_task_trace_exit(void)2605 static __always_inline void rcu_dynticks_task_trace_exit(void)
2606 {
2607 #ifdef CONFIG_TASKS_TRACE_RCU
2608 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
2609 		current->trc_reader_special.b.need_mb = false;
2610 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
2611 }
2612