• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  * Internal non-public definitions that provide either classic
5  * or preemptible semantics.
6  *
7  * Copyright Red Hat, 2009
8  * Copyright IBM Corporation, 2009
9  *
10  * Author: Ingo Molnar <mingo@elte.hu>
11  *	   Paul E. McKenney <paulmck@linux.ibm.com>
12  */
13 
14 #include "../locking/rtmutex_common.h"
15 
rcu_rdp_is_offloaded(struct rcu_data * rdp)16 static bool rcu_rdp_is_offloaded(struct rcu_data *rdp)
17 {
18 	/*
19 	 * In order to read the offloaded state of an rdp is a safe
20 	 * and stable way and prevent from its value to be changed
21 	 * under us, we must either hold the barrier mutex, the cpu
22 	 * hotplug lock (read or write) or the nocb lock. Local
23 	 * non-preemptible reads are also safe. NOCB kthreads and
24 	 * timers have their own means of synchronization against the
25 	 * offloaded state updaters.
26 	 */
27 	RCU_LOCKDEP_WARN(
28 		!(lockdep_is_held(&rcu_state.barrier_mutex) ||
29 		  (IS_ENABLED(CONFIG_HOTPLUG_CPU) && lockdep_is_cpus_held()) ||
30 		  rcu_lockdep_is_held_nocb(rdp) ||
31 		  (rdp == this_cpu_ptr(&rcu_data) &&
32 		   !(IS_ENABLED(CONFIG_PREEMPT_COUNT) && preemptible())) ||
33 		  rcu_current_is_nocb_kthread(rdp)),
34 		"Unsafe read of RCU_NOCB offloaded state"
35 	);
36 
37 	return rcu_segcblist_is_offloaded(&rdp->cblist);
38 }
39 
40 /*
41  * Check the RCU kernel configuration parameters and print informative
42  * messages about anything out of the ordinary.
43  */
rcu_bootup_announce_oddness(void)44 static void __init rcu_bootup_announce_oddness(void)
45 {
46 	if (IS_ENABLED(CONFIG_RCU_TRACE))
47 		pr_info("\tRCU event tracing is enabled.\n");
48 	if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
49 	    (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
50 		pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
51 			RCU_FANOUT);
52 	if (rcu_fanout_exact)
53 		pr_info("\tHierarchical RCU autobalancing is disabled.\n");
54 	if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
55 		pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
56 	if (IS_ENABLED(CONFIG_PROVE_RCU))
57 		pr_info("\tRCU lockdep checking is enabled.\n");
58 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
59 		pr_info("\tRCU strict (and thus non-scalable) grace periods enabled.\n");
60 	if (RCU_NUM_LVLS >= 4)
61 		pr_info("\tFour(or more)-level hierarchy is enabled.\n");
62 	if (RCU_FANOUT_LEAF != 16)
63 		pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
64 			RCU_FANOUT_LEAF);
65 	if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
66 		pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
67 			rcu_fanout_leaf);
68 	if (nr_cpu_ids != NR_CPUS)
69 		pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
70 #ifdef CONFIG_RCU_BOOST
71 	pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
72 		kthread_prio, CONFIG_RCU_BOOST_DELAY);
73 #endif
74 	if (blimit != DEFAULT_RCU_BLIMIT)
75 		pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
76 	if (qhimark != DEFAULT_RCU_QHIMARK)
77 		pr_info("\tBoot-time adjustment of callback high-water mark to %ld.\n", qhimark);
78 	if (qlowmark != DEFAULT_RCU_QLOMARK)
79 		pr_info("\tBoot-time adjustment of callback low-water mark to %ld.\n", qlowmark);
80 	if (qovld != DEFAULT_RCU_QOVLD)
81 		pr_info("\tBoot-time adjustment of callback overload level to %ld.\n", qovld);
82 	if (jiffies_till_first_fqs != ULONG_MAX)
83 		pr_info("\tBoot-time adjustment of first FQS scan delay to %ld jiffies.\n", jiffies_till_first_fqs);
84 	if (jiffies_till_next_fqs != ULONG_MAX)
85 		pr_info("\tBoot-time adjustment of subsequent FQS scan delay to %ld jiffies.\n", jiffies_till_next_fqs);
86 	if (jiffies_till_sched_qs != ULONG_MAX)
87 		pr_info("\tBoot-time adjustment of scheduler-enlistment delay to %ld jiffies.\n", jiffies_till_sched_qs);
88 	if (rcu_kick_kthreads)
89 		pr_info("\tKick kthreads if too-long grace period.\n");
90 	if (IS_ENABLED(CONFIG_DEBUG_OBJECTS_RCU_HEAD))
91 		pr_info("\tRCU callback double-/use-after-free debug enabled.\n");
92 	if (gp_preinit_delay)
93 		pr_info("\tRCU debug GP pre-init slowdown %d jiffies.\n", gp_preinit_delay);
94 	if (gp_init_delay)
95 		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_init_delay);
96 	if (gp_cleanup_delay)
97 		pr_info("\tRCU debug GP init slowdown %d jiffies.\n", gp_cleanup_delay);
98 	if (!use_softirq)
99 		pr_info("\tRCU_SOFTIRQ processing moved to rcuc kthreads.\n");
100 	if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG))
101 		pr_info("\tRCU debug extended QS entry/exit.\n");
102 	rcupdate_announce_bootup_oddness();
103 }
104 
105 #ifdef CONFIG_PREEMPT_RCU
106 
107 static void rcu_report_exp_rnp(struct rcu_node *rnp, bool wake);
108 static void rcu_read_unlock_special(struct task_struct *t);
109 
110 /*
111  * Tell them what RCU they are running.
112  */
rcu_bootup_announce(void)113 static void __init rcu_bootup_announce(void)
114 {
115 	pr_info("Preemptible hierarchical RCU implementation.\n");
116 	rcu_bootup_announce_oddness();
117 }
118 
119 /* Flags for rcu_preempt_ctxt_queue() decision table. */
120 #define RCU_GP_TASKS	0x8
121 #define RCU_EXP_TASKS	0x4
122 #define RCU_GP_BLKD	0x2
123 #define RCU_EXP_BLKD	0x1
124 
125 /*
126  * Queues a task preempted within an RCU-preempt read-side critical
127  * section into the appropriate location within the ->blkd_tasks list,
128  * depending on the states of any ongoing normal and expedited grace
129  * periods.  The ->gp_tasks pointer indicates which element the normal
130  * grace period is waiting on (NULL if none), and the ->exp_tasks pointer
131  * indicates which element the expedited grace period is waiting on (again,
132  * NULL if none).  If a grace period is waiting on a given element in the
133  * ->blkd_tasks list, it also waits on all subsequent elements.  Thus,
134  * adding a task to the tail of the list blocks any grace period that is
135  * already waiting on one of the elements.  In contrast, adding a task
136  * to the head of the list won't block any grace period that is already
137  * waiting on one of the elements.
138  *
139  * This queuing is imprecise, and can sometimes make an ongoing grace
140  * period wait for a task that is not strictly speaking blocking it.
141  * Given the choice, we needlessly block a normal grace period rather than
142  * blocking an expedited grace period.
143  *
144  * Note that an endless sequence of expedited grace periods still cannot
145  * indefinitely postpone a normal grace period.  Eventually, all of the
146  * fixed number of preempted tasks blocking the normal grace period that are
147  * not also blocking the expedited grace period will resume and complete
148  * their RCU read-side critical sections.  At that point, the ->gp_tasks
149  * pointer will equal the ->exp_tasks pointer, at which point the end of
150  * the corresponding expedited grace period will also be the end of the
151  * normal grace period.
152  */
rcu_preempt_ctxt_queue(struct rcu_node * rnp,struct rcu_data * rdp)153 static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
154 	__releases(rnp->lock) /* But leaves rrupts disabled. */
155 {
156 	int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
157 			 (rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
158 			 (rnp->qsmask & rdp->grpmask ? RCU_GP_BLKD : 0) +
159 			 (rnp->expmask & rdp->grpmask ? RCU_EXP_BLKD : 0);
160 	struct task_struct *t = current;
161 
162 	raw_lockdep_assert_held_rcu_node(rnp);
163 	WARN_ON_ONCE(rdp->mynode != rnp);
164 	WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
165 	/* RCU better not be waiting on newly onlined CPUs! */
166 	WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
167 		     rdp->grpmask);
168 
169 	/*
170 	 * Decide where to queue the newly blocked task.  In theory,
171 	 * this could be an if-statement.  In practice, when I tried
172 	 * that, it was quite messy.
173 	 */
174 	switch (blkd_state) {
175 	case 0:
176 	case                RCU_EXP_TASKS:
177 	case                RCU_EXP_TASKS + RCU_GP_BLKD:
178 	case RCU_GP_TASKS:
179 	case RCU_GP_TASKS + RCU_EXP_TASKS:
180 
181 		/*
182 		 * Blocking neither GP, or first task blocking the normal
183 		 * GP but not blocking the already-waiting expedited GP.
184 		 * Queue at the head of the list to avoid unnecessarily
185 		 * blocking the already-waiting GPs.
186 		 */
187 		list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
188 		break;
189 
190 	case                                              RCU_EXP_BLKD:
191 	case                                RCU_GP_BLKD:
192 	case                                RCU_GP_BLKD + RCU_EXP_BLKD:
193 	case RCU_GP_TASKS +                               RCU_EXP_BLKD:
194 	case RCU_GP_TASKS +                 RCU_GP_BLKD + RCU_EXP_BLKD:
195 	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
196 
197 		/*
198 		 * First task arriving that blocks either GP, or first task
199 		 * arriving that blocks the expedited GP (with the normal
200 		 * GP already waiting), or a task arriving that blocks
201 		 * both GPs with both GPs already waiting.  Queue at the
202 		 * tail of the list to avoid any GP waiting on any of the
203 		 * already queued tasks that are not blocking it.
204 		 */
205 		list_add_tail(&t->rcu_node_entry, &rnp->blkd_tasks);
206 		break;
207 
208 	case                RCU_EXP_TASKS +               RCU_EXP_BLKD:
209 	case                RCU_EXP_TASKS + RCU_GP_BLKD + RCU_EXP_BLKD:
210 	case RCU_GP_TASKS + RCU_EXP_TASKS +               RCU_EXP_BLKD:
211 
212 		/*
213 		 * Second or subsequent task blocking the expedited GP.
214 		 * The task either does not block the normal GP, or is the
215 		 * first task blocking the normal GP.  Queue just after
216 		 * the first task blocking the expedited GP.
217 		 */
218 		list_add(&t->rcu_node_entry, rnp->exp_tasks);
219 		break;
220 
221 	case RCU_GP_TASKS +                 RCU_GP_BLKD:
222 	case RCU_GP_TASKS + RCU_EXP_TASKS + RCU_GP_BLKD:
223 
224 		/*
225 		 * Second or subsequent task blocking the normal GP.
226 		 * The task does not block the expedited GP. Queue just
227 		 * after the first task blocking the normal GP.
228 		 */
229 		list_add(&t->rcu_node_entry, rnp->gp_tasks);
230 		break;
231 
232 	default:
233 
234 		/* Yet another exercise in excessive paranoia. */
235 		WARN_ON_ONCE(1);
236 		break;
237 	}
238 
239 	/*
240 	 * We have now queued the task.  If it was the first one to
241 	 * block either grace period, update the ->gp_tasks and/or
242 	 * ->exp_tasks pointers, respectively, to reference the newly
243 	 * blocked tasks.
244 	 */
245 	if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
246 		WRITE_ONCE(rnp->gp_tasks, &t->rcu_node_entry);
247 		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
248 	}
249 	if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
250 		WRITE_ONCE(rnp->exp_tasks, &t->rcu_node_entry);
251 	WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
252 		     !(rnp->qsmask & rdp->grpmask));
253 	WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
254 		     !(rnp->expmask & rdp->grpmask));
255 	raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
256 
257 	/*
258 	 * Report the quiescent state for the expedited GP.  This expedited
259 	 * GP should not be able to end until we report, so there should be
260 	 * no need to check for a subsequent expedited GP.  (Though we are
261 	 * still in a quiescent state in any case.)
262 	 */
263 	if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
264 		rcu_report_exp_rdp(rdp);
265 	else
266 		WARN_ON_ONCE(rdp->exp_deferred_qs);
267 }
268 
269 /*
270  * Record a preemptible-RCU quiescent state for the specified CPU.
271  * Note that this does not necessarily mean that the task currently running
272  * on the CPU is in a quiescent state:  Instead, it means that the current
273  * grace period need not wait on any RCU read-side critical section that
274  * starts later on this CPU.  It also means that if the current task is
275  * in an RCU read-side critical section, it has already added itself to
276  * some leaf rcu_node structure's ->blkd_tasks list.  In addition to the
277  * current task, there might be any number of other tasks blocked while
278  * in an RCU read-side critical section.
279  *
280  * Callers to this function must disable preemption.
281  */
rcu_qs(void)282 static void rcu_qs(void)
283 {
284 	RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!\n");
285 	if (__this_cpu_read(rcu_data.cpu_no_qs.s)) {
286 		trace_rcu_grace_period(TPS("rcu_preempt"),
287 				       __this_cpu_read(rcu_data.gp_seq),
288 				       TPS("cpuqs"));
289 		__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
290 		barrier(); /* Coordinate with rcu_flavor_sched_clock_irq(). */
291 		WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, false);
292 	}
293 }
294 
295 /*
296  * We have entered the scheduler, and the current task might soon be
297  * context-switched away from.  If this task is in an RCU read-side
298  * critical section, we will no longer be able to rely on the CPU to
299  * record that fact, so we enqueue the task on the blkd_tasks list.
300  * The task will dequeue itself when it exits the outermost enclosing
301  * RCU read-side critical section.  Therefore, the current grace period
302  * cannot be permitted to complete until the blkd_tasks list entries
303  * predating the current grace period drain, in other words, until
304  * rnp->gp_tasks becomes NULL.
305  *
306  * Caller must disable interrupts.
307  */
rcu_note_context_switch(bool preempt)308 void rcu_note_context_switch(bool preempt)
309 {
310 	struct task_struct *t = current;
311 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
312 	struct rcu_node *rnp;
313 
314 	trace_rcu_utilization(TPS("Start context switch"));
315 	lockdep_assert_irqs_disabled();
316 	WARN_ONCE(!preempt && rcu_preempt_depth() > 0, "Voluntary context switch within RCU read-side critical section!");
317 	if (rcu_preempt_depth() > 0 &&
318 	    !t->rcu_read_unlock_special.b.blocked) {
319 
320 		/* Possibly blocking in an RCU read-side critical section. */
321 		rnp = rdp->mynode;
322 		raw_spin_lock_rcu_node(rnp);
323 		t->rcu_read_unlock_special.b.blocked = true;
324 		t->rcu_blocked_node = rnp;
325 
326 		/*
327 		 * Verify the CPU's sanity, trace the preemption, and
328 		 * then queue the task as required based on the states
329 		 * of any ongoing and expedited grace periods.
330 		 */
331 		WARN_ON_ONCE((rdp->grpmask & rcu_rnp_online_cpus(rnp)) == 0);
332 		WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
333 		trace_rcu_preempt_task(rcu_state.name,
334 				       t->pid,
335 				       (rnp->qsmask & rdp->grpmask)
336 				       ? rnp->gp_seq
337 				       : rcu_seq_snap(&rnp->gp_seq));
338 		rcu_preempt_ctxt_queue(rnp, rdp);
339 	} else {
340 		rcu_preempt_deferred_qs(t);
341 	}
342 
343 	/*
344 	 * Either we were not in an RCU read-side critical section to
345 	 * begin with, or we have now recorded that critical section
346 	 * globally.  Either way, we can now note a quiescent state
347 	 * for this CPU.  Again, if we were in an RCU read-side critical
348 	 * section, and if that critical section was blocking the current
349 	 * grace period, then the fact that the task has been enqueued
350 	 * means that we continue to block the current grace period.
351 	 */
352 	rcu_qs();
353 	if (rdp->exp_deferred_qs)
354 		rcu_report_exp_rdp(rdp);
355 	rcu_tasks_qs(current, preempt);
356 	trace_rcu_utilization(TPS("End context switch"));
357 }
358 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
359 
360 /*
361  * Check for preempted RCU readers blocking the current grace period
362  * for the specified rcu_node structure.  If the caller needs a reliable
363  * answer, it must hold the rcu_node's ->lock.
364  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)365 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
366 {
367 	return READ_ONCE(rnp->gp_tasks) != NULL;
368 }
369 
370 /* limit value for ->rcu_read_lock_nesting. */
371 #define RCU_NEST_PMAX (INT_MAX / 2)
372 
rcu_preempt_read_enter(void)373 static void rcu_preempt_read_enter(void)
374 {
375 	WRITE_ONCE(current->rcu_read_lock_nesting, READ_ONCE(current->rcu_read_lock_nesting) + 1);
376 }
377 
rcu_preempt_read_exit(void)378 static int rcu_preempt_read_exit(void)
379 {
380 	int ret = READ_ONCE(current->rcu_read_lock_nesting) - 1;
381 
382 	WRITE_ONCE(current->rcu_read_lock_nesting, ret);
383 	return ret;
384 }
385 
rcu_preempt_depth_set(int val)386 static void rcu_preempt_depth_set(int val)
387 {
388 	WRITE_ONCE(current->rcu_read_lock_nesting, val);
389 }
390 
391 /*
392  * Preemptible RCU implementation for rcu_read_lock().
393  * Just increment ->rcu_read_lock_nesting, shared state will be updated
394  * if we block.
395  */
__rcu_read_lock(void)396 void __rcu_read_lock(void)
397 {
398 	rcu_preempt_read_enter();
399 	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
400 		WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
401 	if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) && rcu_state.gp_kthread)
402 		WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
403 	barrier();  /* critical section after entry code. */
404 }
405 EXPORT_SYMBOL_GPL(__rcu_read_lock);
406 
407 /*
408  * Preemptible RCU implementation for rcu_read_unlock().
409  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
410  * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
411  * invoke rcu_read_unlock_special() to clean up after a context switch
412  * in an RCU read-side critical section and other special cases.
413  */
__rcu_read_unlock(void)414 void __rcu_read_unlock(void)
415 {
416 	struct task_struct *t = current;
417 
418 	barrier();  // critical section before exit code.
419 	if (rcu_preempt_read_exit() == 0) {
420 		barrier();  // critical-section exit before .s check.
421 		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
422 			rcu_read_unlock_special(t);
423 	}
424 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
425 		int rrln = rcu_preempt_depth();
426 
427 		WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX);
428 	}
429 }
430 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
431 
432 /*
433  * Advance a ->blkd_tasks-list pointer to the next entry, instead
434  * returning NULL if at the end of the list.
435  */
rcu_next_node_entry(struct task_struct * t,struct rcu_node * rnp)436 static struct list_head *rcu_next_node_entry(struct task_struct *t,
437 					     struct rcu_node *rnp)
438 {
439 	struct list_head *np;
440 
441 	np = t->rcu_node_entry.next;
442 	if (np == &rnp->blkd_tasks)
443 		np = NULL;
444 	return np;
445 }
446 
447 /*
448  * Return true if the specified rcu_node structure has tasks that were
449  * preempted within an RCU read-side critical section.
450  */
rcu_preempt_has_tasks(struct rcu_node * rnp)451 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
452 {
453 	return !list_empty(&rnp->blkd_tasks);
454 }
455 
456 /*
457  * Report deferred quiescent states.  The deferral time can
458  * be quite short, for example, in the case of the call from
459  * rcu_read_unlock_special().
460  */
461 static notrace void
rcu_preempt_deferred_qs_irqrestore(struct task_struct * t,unsigned long flags)462 rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
463 {
464 	bool empty_exp;
465 	bool empty_norm;
466 	bool empty_exp_now;
467 	struct list_head *np;
468 	bool drop_boost_mutex = false;
469 	struct rcu_data *rdp;
470 	struct rcu_node *rnp;
471 	union rcu_special special;
472 
473 	/*
474 	 * If RCU core is waiting for this CPU to exit its critical section,
475 	 * report the fact that it has exited.  Because irqs are disabled,
476 	 * t->rcu_read_unlock_special cannot change.
477 	 */
478 	special = t->rcu_read_unlock_special;
479 	rdp = this_cpu_ptr(&rcu_data);
480 	if (!special.s && !rdp->exp_deferred_qs) {
481 		local_irq_restore(flags);
482 		return;
483 	}
484 	t->rcu_read_unlock_special.s = 0;
485 	if (special.b.need_qs) {
486 		if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
487 			rcu_report_qs_rdp(rdp);
488 			udelay(rcu_unlock_delay);
489 		} else {
490 			rcu_qs();
491 		}
492 	}
493 
494 	/*
495 	 * Respond to a request by an expedited grace period for a
496 	 * quiescent state from this CPU.  Note that requests from
497 	 * tasks are handled when removing the task from the
498 	 * blocked-tasks list below.
499 	 */
500 	if (rdp->exp_deferred_qs)
501 		rcu_report_exp_rdp(rdp);
502 
503 	/* Clean up if blocked during RCU read-side critical section. */
504 	if (special.b.blocked) {
505 
506 		/*
507 		 * Remove this task from the list it blocked on.  The task
508 		 * now remains queued on the rcu_node corresponding to the
509 		 * CPU it first blocked on, so there is no longer any need
510 		 * to loop.  Retain a WARN_ON_ONCE() out of sheer paranoia.
511 		 */
512 		rnp = t->rcu_blocked_node;
513 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
514 		WARN_ON_ONCE(rnp != t->rcu_blocked_node);
515 		WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
516 		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
517 		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
518 			     (!empty_norm || rnp->qsmask));
519 		empty_exp = sync_rcu_exp_done(rnp);
520 		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
521 		np = rcu_next_node_entry(t, rnp);
522 		list_del_init(&t->rcu_node_entry);
523 		t->rcu_blocked_node = NULL;
524 		trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
525 						rnp->gp_seq, t->pid);
526 		if (&t->rcu_node_entry == rnp->gp_tasks)
527 			WRITE_ONCE(rnp->gp_tasks, np);
528 		if (&t->rcu_node_entry == rnp->exp_tasks)
529 			WRITE_ONCE(rnp->exp_tasks, np);
530 		if (IS_ENABLED(CONFIG_RCU_BOOST)) {
531 			/* Snapshot ->boost_mtx ownership w/rnp->lock held. */
532 			drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx.rtmutex) == t;
533 			if (&t->rcu_node_entry == rnp->boost_tasks)
534 				WRITE_ONCE(rnp->boost_tasks, np);
535 		}
536 
537 		/*
538 		 * If this was the last task on the current list, and if
539 		 * we aren't waiting on any CPUs, report the quiescent state.
540 		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
541 		 * so we must take a snapshot of the expedited state.
542 		 */
543 		empty_exp_now = sync_rcu_exp_done(rnp);
544 		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
545 			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
546 							 rnp->gp_seq,
547 							 0, rnp->qsmask,
548 							 rnp->level,
549 							 rnp->grplo,
550 							 rnp->grphi,
551 							 !!rnp->gp_tasks);
552 			rcu_report_unblock_qs_rnp(rnp, flags);
553 		} else {
554 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
555 		}
556 
557 		/*
558 		 * If this was the last task on the expedited lists,
559 		 * then we need to report up the rcu_node hierarchy.
560 		 */
561 		if (!empty_exp && empty_exp_now)
562 			rcu_report_exp_rnp(rnp, true);
563 
564 		/* Unboost if we were boosted. */
565 		if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
566 			rt_mutex_futex_unlock(&rnp->boost_mtx.rtmutex);
567 	} else {
568 		local_irq_restore(flags);
569 	}
570 }
571 
572 /*
573  * Is a deferred quiescent-state pending, and are we also not in
574  * an RCU read-side critical section?  It is the caller's responsibility
575  * to ensure it is otherwise safe to report any deferred quiescent
576  * states.  The reason for this is that it is safe to report a
577  * quiescent state during context switch even though preemption
578  * is disabled.  This function cannot be expected to understand these
579  * nuances, so the caller must handle them.
580  */
rcu_preempt_need_deferred_qs(struct task_struct * t)581 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
582 {
583 	return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
584 		READ_ONCE(t->rcu_read_unlock_special.s)) &&
585 	       rcu_preempt_depth() == 0;
586 }
587 
588 /*
589  * Report a deferred quiescent state if needed and safe to do so.
590  * As with rcu_preempt_need_deferred_qs(), "safe" involves only
591  * not being in an RCU read-side critical section.  The caller must
592  * evaluate safety in terms of interrupt, softirq, and preemption
593  * disabling.
594  */
rcu_preempt_deferred_qs(struct task_struct * t)595 static notrace void rcu_preempt_deferred_qs(struct task_struct *t)
596 {
597 	unsigned long flags;
598 
599 	if (!rcu_preempt_need_deferred_qs(t))
600 		return;
601 	local_irq_save(flags);
602 	rcu_preempt_deferred_qs_irqrestore(t, flags);
603 }
604 
605 /*
606  * Minimal handler to give the scheduler a chance to re-evaluate.
607  */
rcu_preempt_deferred_qs_handler(struct irq_work * iwp)608 static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
609 {
610 	struct rcu_data *rdp;
611 
612 	rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
613 	rdp->defer_qs_iw_pending = false;
614 }
615 
616 /*
617  * Handle special cases during rcu_read_unlock(), such as needing to
618  * notify RCU core processing or task having blocked during the RCU
619  * read-side critical section.
620  */
rcu_read_unlock_special(struct task_struct * t)621 static void rcu_read_unlock_special(struct task_struct *t)
622 {
623 	unsigned long flags;
624 	bool irqs_were_disabled;
625 	bool preempt_bh_were_disabled =
626 			!!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
627 
628 	/* NMI handlers cannot block and cannot safely manipulate state. */
629 	if (in_nmi())
630 		return;
631 
632 	local_irq_save(flags);
633 	irqs_were_disabled = irqs_disabled_flags(flags);
634 	if (preempt_bh_were_disabled || irqs_were_disabled) {
635 		bool expboost; // Expedited GP in flight or possible boosting.
636 		struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
637 		struct rcu_node *rnp = rdp->mynode;
638 
639 		expboost = (t->rcu_blocked_node && READ_ONCE(t->rcu_blocked_node->exp_tasks)) ||
640 			   (rdp->grpmask & READ_ONCE(rnp->expmask)) ||
641 			   (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
642 			   ((rdp->grpmask & READ_ONCE(rnp->qsmask)) || t->rcu_blocked_node)) ||
643 			   (IS_ENABLED(CONFIG_RCU_BOOST) && irqs_were_disabled &&
644 			    t->rcu_blocked_node);
645 		// Need to defer quiescent state until everything is enabled.
646 		if (use_softirq && (in_irq() || (expboost && !irqs_were_disabled))) {
647 			// Using softirq, safe to awaken, and either the
648 			// wakeup is free or there is either an expedited
649 			// GP in flight or a potential need to deboost.
650 			raise_softirq_irqoff(RCU_SOFTIRQ);
651 		} else {
652 			// Enabling BH or preempt does reschedule, so...
653 			// Also if no expediting and no possible deboosting,
654 			// slow is OK.  Plus nohz_full CPUs eventually get
655 			// tick enabled.
656 			set_tsk_need_resched(current);
657 			set_preempt_need_resched();
658 			if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
659 			    expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
660 				// Get scheduler to re-evaluate and call hooks.
661 				// If !IRQ_WORK, FQS scan will eventually IPI.
662 				init_irq_work(&rdp->defer_qs_iw, rcu_preempt_deferred_qs_handler);
663 				rdp->defer_qs_iw_pending = true;
664 				irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
665 			}
666 		}
667 		local_irq_restore(flags);
668 		return;
669 	}
670 	rcu_preempt_deferred_qs_irqrestore(t, flags);
671 }
672 
673 /*
674  * Check that the list of blocked tasks for the newly completed grace
675  * period is in fact empty.  It is a serious bug to complete a grace
676  * period that still has RCU readers blocked!  This function must be
677  * invoked -before- updating this rnp's ->gp_seq.
678  *
679  * Also, if there are blocked tasks on the list, they automatically
680  * block the newly created grace period, so set up ->gp_tasks accordingly.
681  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)682 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
683 {
684 	struct task_struct *t;
685 
686 	RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
687 	raw_lockdep_assert_held_rcu_node(rnp);
688 	if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
689 		dump_blkd_tasks(rnp, 10);
690 	if (rcu_preempt_has_tasks(rnp) &&
691 	    (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
692 		WRITE_ONCE(rnp->gp_tasks, rnp->blkd_tasks.next);
693 		t = container_of(rnp->gp_tasks, struct task_struct,
694 				 rcu_node_entry);
695 		trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
696 						rnp->gp_seq, t->pid);
697 	}
698 	WARN_ON_ONCE(rnp->qsmask);
699 }
700 
701 /*
702  * Check for a quiescent state from the current CPU, including voluntary
703  * context switches for Tasks RCU.  When a task blocks, the task is
704  * recorded in the corresponding CPU's rcu_node structure, which is checked
705  * elsewhere, hence this function need only check for quiescent states
706  * related to the current CPU, not to those related to tasks.
707  */
rcu_flavor_sched_clock_irq(int user)708 static void rcu_flavor_sched_clock_irq(int user)
709 {
710 	struct task_struct *t = current;
711 
712 	lockdep_assert_irqs_disabled();
713 	if (user || rcu_is_cpu_rrupt_from_idle()) {
714 		rcu_note_voluntary_context_switch(current);
715 	}
716 	if (rcu_preempt_depth() > 0 ||
717 	    (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
718 		/* No QS, force context switch if deferred. */
719 		if (rcu_preempt_need_deferred_qs(t)) {
720 			set_tsk_need_resched(t);
721 			set_preempt_need_resched();
722 		}
723 	} else if (rcu_preempt_need_deferred_qs(t)) {
724 		rcu_preempt_deferred_qs(t); /* Report deferred QS. */
725 		return;
726 	} else if (!WARN_ON_ONCE(rcu_preempt_depth())) {
727 		rcu_qs(); /* Report immediate QS. */
728 		return;
729 	}
730 
731 	/* If GP is oldish, ask for help from rcu_read_unlock_special(). */
732 	if (rcu_preempt_depth() > 0 &&
733 	    __this_cpu_read(rcu_data.core_needs_qs) &&
734 	    __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
735 	    !t->rcu_read_unlock_special.b.need_qs &&
736 	    time_after(jiffies, rcu_state.gp_start + HZ))
737 		t->rcu_read_unlock_special.b.need_qs = true;
738 }
739 
740 /*
741  * Check for a task exiting while in a preemptible-RCU read-side
742  * critical section, clean up if so.  No need to issue warnings, as
743  * debug_check_no_locks_held() already does this if lockdep is enabled.
744  * Besides, if this function does anything other than just immediately
745  * return, there was a bug of some sort.  Spewing warnings from this
746  * function is like as not to simply obscure important prior warnings.
747  */
exit_rcu(void)748 void exit_rcu(void)
749 {
750 	struct task_struct *t = current;
751 
752 	if (unlikely(!list_empty(&current->rcu_node_entry))) {
753 		rcu_preempt_depth_set(1);
754 		barrier();
755 		WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
756 	} else if (unlikely(rcu_preempt_depth())) {
757 		rcu_preempt_depth_set(1);
758 	} else {
759 		return;
760 	}
761 	__rcu_read_unlock();
762 	rcu_preempt_deferred_qs(current);
763 }
764 
765 /*
766  * Dump the blocked-tasks state, but limit the list dump to the
767  * specified number of elements.
768  */
769 static void
dump_blkd_tasks(struct rcu_node * rnp,int ncheck)770 dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
771 {
772 	int cpu;
773 	int i;
774 	struct list_head *lhp;
775 	bool onl;
776 	struct rcu_data *rdp;
777 	struct rcu_node *rnp1;
778 
779 	raw_lockdep_assert_held_rcu_node(rnp);
780 	pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
781 		__func__, rnp->grplo, rnp->grphi, rnp->level,
782 		(long)READ_ONCE(rnp->gp_seq), (long)rnp->completedqs);
783 	for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
784 		pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
785 			__func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
786 	pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
787 		__func__, READ_ONCE(rnp->gp_tasks), data_race(rnp->boost_tasks),
788 		READ_ONCE(rnp->exp_tasks));
789 	pr_info("%s: ->blkd_tasks", __func__);
790 	i = 0;
791 	list_for_each(lhp, &rnp->blkd_tasks) {
792 		pr_cont(" %p", lhp);
793 		if (++i >= ncheck)
794 			break;
795 	}
796 	pr_cont("\n");
797 	for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
798 		rdp = per_cpu_ptr(&rcu_data, cpu);
799 		onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
800 		pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
801 			cpu, ".o"[onl],
802 			(long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
803 			(long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
804 	}
805 }
806 
807 #else /* #ifdef CONFIG_PREEMPT_RCU */
808 
809 /*
810  * If strict grace periods are enabled, and if the calling
811  * __rcu_read_unlock() marks the beginning of a quiescent state, immediately
812  * report that quiescent state and, if requested, spin for a bit.
813  */
rcu_read_unlock_strict(void)814 void rcu_read_unlock_strict(void)
815 {
816 	struct rcu_data *rdp;
817 
818 	if (!IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ||
819 	   irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
820 		return;
821 	rdp = this_cpu_ptr(&rcu_data);
822 	rcu_report_qs_rdp(rdp);
823 	udelay(rcu_unlock_delay);
824 }
825 EXPORT_SYMBOL_GPL(rcu_read_unlock_strict);
826 
827 /*
828  * Tell them what RCU they are running.
829  */
rcu_bootup_announce(void)830 static void __init rcu_bootup_announce(void)
831 {
832 	pr_info("Hierarchical RCU implementation.\n");
833 	rcu_bootup_announce_oddness();
834 }
835 
836 /*
837  * Note a quiescent state for PREEMPTION=n.  Because we do not need to know
838  * how many quiescent states passed, just if there was at least one since
839  * the start of the grace period, this just sets a flag.  The caller must
840  * have disabled preemption.
841  */
rcu_qs(void)842 static void rcu_qs(void)
843 {
844 	RCU_LOCKDEP_WARN(preemptible(), "rcu_qs() invoked with preemption enabled!!!");
845 	if (!__this_cpu_read(rcu_data.cpu_no_qs.s))
846 		return;
847 	trace_rcu_grace_period(TPS("rcu_sched"),
848 			       __this_cpu_read(rcu_data.gp_seq), TPS("cpuqs"));
849 	__this_cpu_write(rcu_data.cpu_no_qs.b.norm, false);
850 	if (!__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
851 		return;
852 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, false);
853 	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
854 }
855 
856 /*
857  * Register an urgently needed quiescent state.  If there is an
858  * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
859  * dyntick-idle quiescent state visible to other CPUs, which will in
860  * some cases serve for expedited as well as normal grace periods.
861  * Either way, register a lightweight quiescent state.
862  */
rcu_all_qs(void)863 void rcu_all_qs(void)
864 {
865 	unsigned long flags;
866 
867 	if (!raw_cpu_read(rcu_data.rcu_urgent_qs))
868 		return;
869 	preempt_disable();
870 	/* Load rcu_urgent_qs before other flags. */
871 	if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
872 		preempt_enable();
873 		return;
874 	}
875 	this_cpu_write(rcu_data.rcu_urgent_qs, false);
876 	if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs))) {
877 		local_irq_save(flags);
878 		rcu_momentary_dyntick_idle();
879 		local_irq_restore(flags);
880 	}
881 	rcu_qs();
882 	preempt_enable();
883 }
884 EXPORT_SYMBOL_GPL(rcu_all_qs);
885 
886 /*
887  * Note a PREEMPTION=n context switch. The caller must have disabled interrupts.
888  */
rcu_note_context_switch(bool preempt)889 void rcu_note_context_switch(bool preempt)
890 {
891 	trace_rcu_utilization(TPS("Start context switch"));
892 	rcu_qs();
893 	/* Load rcu_urgent_qs before other flags. */
894 	if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs)))
895 		goto out;
896 	this_cpu_write(rcu_data.rcu_urgent_qs, false);
897 	if (unlikely(raw_cpu_read(rcu_data.rcu_need_heavy_qs)))
898 		rcu_momentary_dyntick_idle();
899 	rcu_tasks_qs(current, preempt);
900 out:
901 	trace_rcu_utilization(TPS("End context switch"));
902 }
903 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
904 
905 /*
906  * Because preemptible RCU does not exist, there are never any preempted
907  * RCU readers.
908  */
rcu_preempt_blocked_readers_cgp(struct rcu_node * rnp)909 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
910 {
911 	return 0;
912 }
913 
914 /*
915  * Because there is no preemptible RCU, there can be no readers blocked.
916  */
rcu_preempt_has_tasks(struct rcu_node * rnp)917 static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
918 {
919 	return false;
920 }
921 
922 /*
923  * Because there is no preemptible RCU, there can be no deferred quiescent
924  * states.
925  */
rcu_preempt_need_deferred_qs(struct task_struct * t)926 static notrace bool rcu_preempt_need_deferred_qs(struct task_struct *t)
927 {
928 	return false;
929 }
rcu_preempt_deferred_qs(struct task_struct * t)930 static void rcu_preempt_deferred_qs(struct task_struct *t) { }
931 
932 /*
933  * Because there is no preemptible RCU, there can be no readers blocked,
934  * so there is no need to check for blocked tasks.  So check only for
935  * bogus qsmask values.
936  */
rcu_preempt_check_blocked_tasks(struct rcu_node * rnp)937 static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
938 {
939 	WARN_ON_ONCE(rnp->qsmask);
940 }
941 
942 /*
943  * Check to see if this CPU is in a non-context-switch quiescent state,
944  * namely user mode and idle loop.
945  */
rcu_flavor_sched_clock_irq(int user)946 static void rcu_flavor_sched_clock_irq(int user)
947 {
948 	if (user || rcu_is_cpu_rrupt_from_idle()) {
949 
950 		/*
951 		 * Get here if this CPU took its interrupt from user
952 		 * mode or from the idle loop, and if this is not a
953 		 * nested interrupt.  In this case, the CPU is in
954 		 * a quiescent state, so note it.
955 		 *
956 		 * No memory barrier is required here because rcu_qs()
957 		 * references only CPU-local variables that other CPUs
958 		 * neither access nor modify, at least not while the
959 		 * corresponding CPU is online.
960 		 */
961 
962 		rcu_qs();
963 	}
964 }
965 
966 /*
967  * Because preemptible RCU does not exist, tasks cannot possibly exit
968  * while in preemptible RCU read-side critical sections.
969  */
exit_rcu(void)970 void exit_rcu(void)
971 {
972 }
973 
974 /*
975  * Dump the guaranteed-empty blocked-tasks state.  Trust but verify.
976  */
977 static void
dump_blkd_tasks(struct rcu_node * rnp,int ncheck)978 dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
979 {
980 	WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
981 }
982 
983 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
984 
985 /*
986  * If boosting, set rcuc kthreads to realtime priority.
987  */
rcu_cpu_kthread_setup(unsigned int cpu)988 static void rcu_cpu_kthread_setup(unsigned int cpu)
989 {
990 #ifdef CONFIG_RCU_BOOST
991 	struct sched_param sp;
992 
993 	sp.sched_priority = kthread_prio;
994 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
995 #endif /* #ifdef CONFIG_RCU_BOOST */
996 }
997 
998 #ifdef CONFIG_RCU_BOOST
999 
1000 /*
1001  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1002  * or ->boost_tasks, advancing the pointer to the next task in the
1003  * ->blkd_tasks list.
1004  *
1005  * Note that irqs must be enabled: boosting the task can block.
1006  * Returns 1 if there are more tasks needing to be boosted.
1007  */
rcu_boost(struct rcu_node * rnp)1008 static int rcu_boost(struct rcu_node *rnp)
1009 {
1010 	unsigned long flags;
1011 	struct task_struct *t;
1012 	struct list_head *tb;
1013 
1014 	if (READ_ONCE(rnp->exp_tasks) == NULL &&
1015 	    READ_ONCE(rnp->boost_tasks) == NULL)
1016 		return 0;  /* Nothing left to boost. */
1017 
1018 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1019 
1020 	/*
1021 	 * Recheck under the lock: all tasks in need of boosting
1022 	 * might exit their RCU read-side critical sections on their own.
1023 	 */
1024 	if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
1025 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1026 		return 0;
1027 	}
1028 
1029 	/*
1030 	 * Preferentially boost tasks blocking expedited grace periods.
1031 	 * This cannot starve the normal grace periods because a second
1032 	 * expedited grace period must boost all blocked tasks, including
1033 	 * those blocking the pre-existing normal grace period.
1034 	 */
1035 	if (rnp->exp_tasks != NULL)
1036 		tb = rnp->exp_tasks;
1037 	else
1038 		tb = rnp->boost_tasks;
1039 
1040 	/*
1041 	 * We boost task t by manufacturing an rt_mutex that appears to
1042 	 * be held by task t.  We leave a pointer to that rt_mutex where
1043 	 * task t can find it, and task t will release the mutex when it
1044 	 * exits its outermost RCU read-side critical section.  Then
1045 	 * simply acquiring this artificial rt_mutex will boost task
1046 	 * t's priority.  (Thanks to tglx for suggesting this approach!)
1047 	 *
1048 	 * Note that task t must acquire rnp->lock to remove itself from
1049 	 * the ->blkd_tasks list, which it will do from exit() if from
1050 	 * nowhere else.  We therefore are guaranteed that task t will
1051 	 * stay around at least until we drop rnp->lock.  Note that
1052 	 * rnp->lock also resolves races between our priority boosting
1053 	 * and task t's exiting its outermost RCU read-side critical
1054 	 * section.
1055 	 */
1056 	t = container_of(tb, struct task_struct, rcu_node_entry);
1057 	rt_mutex_init_proxy_locked(&rnp->boost_mtx.rtmutex, t);
1058 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1059 	/* Lock only for side effect: boosts task t's priority. */
1060 	rt_mutex_lock(&rnp->boost_mtx);
1061 	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
1062 	rnp->n_boosts++;
1063 
1064 	return READ_ONCE(rnp->exp_tasks) != NULL ||
1065 	       READ_ONCE(rnp->boost_tasks) != NULL;
1066 }
1067 
1068 /*
1069  * Priority-boosting kthread, one per leaf rcu_node.
1070  */
rcu_boost_kthread(void * arg)1071 static int rcu_boost_kthread(void *arg)
1072 {
1073 	struct rcu_node *rnp = (struct rcu_node *)arg;
1074 	int spincnt = 0;
1075 	int more2boost;
1076 
1077 	trace_rcu_utilization(TPS("Start boost kthread@init"));
1078 	for (;;) {
1079 		WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_WAITING);
1080 		trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
1081 		rcu_wait(READ_ONCE(rnp->boost_tasks) ||
1082 			 READ_ONCE(rnp->exp_tasks));
1083 		trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
1084 		WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_RUNNING);
1085 		more2boost = rcu_boost(rnp);
1086 		if (more2boost)
1087 			spincnt++;
1088 		else
1089 			spincnt = 0;
1090 		if (spincnt > 10) {
1091 			WRITE_ONCE(rnp->boost_kthread_status, RCU_KTHREAD_YIELDING);
1092 			trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
1093 			schedule_timeout_idle(2);
1094 			trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
1095 			spincnt = 0;
1096 		}
1097 	}
1098 	/* NOTREACHED */
1099 	trace_rcu_utilization(TPS("End boost kthread@notreached"));
1100 	return 0;
1101 }
1102 
1103 /*
1104  * Check to see if it is time to start boosting RCU readers that are
1105  * blocking the current grace period, and, if so, tell the per-rcu_node
1106  * kthread to start boosting them.  If there is an expedited grace
1107  * period in progress, it is always time to boost.
1108  *
1109  * The caller must hold rnp->lock, which this function releases.
1110  * The ->boost_kthread_task is immortal, so we don't need to worry
1111  * about it going away.
1112  */
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1113 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1114 	__releases(rnp->lock)
1115 {
1116 	raw_lockdep_assert_held_rcu_node(rnp);
1117 	if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
1118 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1119 		return;
1120 	}
1121 	if (rnp->exp_tasks != NULL ||
1122 	    (rnp->gp_tasks != NULL &&
1123 	     rnp->boost_tasks == NULL &&
1124 	     rnp->qsmask == 0 &&
1125 	     (!time_after(rnp->boost_time, jiffies) || rcu_state.cbovld))) {
1126 		if (rnp->exp_tasks == NULL)
1127 			WRITE_ONCE(rnp->boost_tasks, rnp->gp_tasks);
1128 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1129 		rcu_wake_cond(rnp->boost_kthread_task,
1130 			      READ_ONCE(rnp->boost_kthread_status));
1131 	} else {
1132 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1133 	}
1134 }
1135 
1136 /*
1137  * Is the current CPU running the RCU-callbacks kthread?
1138  * Caller must have preemption disabled.
1139  */
rcu_is_callbacks_kthread(void)1140 static bool rcu_is_callbacks_kthread(void)
1141 {
1142 	return __this_cpu_read(rcu_data.rcu_cpu_kthread_task) == current;
1143 }
1144 
1145 #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
1146 
1147 /*
1148  * Do priority-boost accounting for the start of a new grace period.
1149  */
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1150 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1151 {
1152 	rnp->boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
1153 }
1154 
1155 /*
1156  * Create an RCU-boost kthread for the specified node if one does not
1157  * already exist.  We only create this kthread for preemptible RCU.
1158  * Returns zero if all is well, a negated errno otherwise.
1159  */
rcu_spawn_one_boost_kthread(struct rcu_node * rnp)1160 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1161 {
1162 	unsigned long flags;
1163 	int rnp_index = rnp - rcu_get_root();
1164 	struct sched_param sp;
1165 	struct task_struct *t;
1166 
1167 	if (rnp->boost_kthread_task || !rcu_scheduler_fully_active)
1168 		return;
1169 
1170 	rcu_state.boost = 1;
1171 
1172 	t = kthread_create(rcu_boost_kthread, (void *)rnp,
1173 			   "rcub/%d", rnp_index);
1174 	if (WARN_ON_ONCE(IS_ERR(t)))
1175 		return;
1176 
1177 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
1178 	rnp->boost_kthread_task = t;
1179 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1180 	sp.sched_priority = kthread_prio;
1181 	sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1182 	wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1183 }
1184 
1185 /*
1186  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1187  * served by the rcu_node in question.  The CPU hotplug lock is still
1188  * held, so the value of rnp->qsmaskinit will be stable.
1189  *
1190  * We don't include outgoingcpu in the affinity set, use -1 if there is
1191  * no outgoing CPU.  If there are no CPUs left in the affinity set,
1192  * this function allows the kthread to execute on any CPU.
1193  */
rcu_boost_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1194 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1195 {
1196 	struct task_struct *t = rnp->boost_kthread_task;
1197 	unsigned long mask = rcu_rnp_online_cpus(rnp);
1198 	cpumask_var_t cm;
1199 	int cpu;
1200 
1201 	if (!t)
1202 		return;
1203 	if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
1204 		return;
1205 	for_each_leaf_node_possible_cpu(rnp, cpu)
1206 		if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
1207 		    cpu != outgoingcpu)
1208 			cpumask_set_cpu(cpu, cm);
1209 	if (cpumask_weight(cm) == 0)
1210 		cpumask_setall(cm);
1211 	set_cpus_allowed_ptr(t, cm);
1212 	free_cpumask_var(cm);
1213 }
1214 
1215 /*
1216  * Spawn boost kthreads -- called as soon as the scheduler is running.
1217  */
rcu_spawn_boost_kthreads(void)1218 static void __init rcu_spawn_boost_kthreads(void)
1219 {
1220 	struct rcu_node *rnp;
1221 
1222 	rcu_for_each_leaf_node(rnp)
1223 		if (rcu_rnp_online_cpus(rnp))
1224 			rcu_spawn_one_boost_kthread(rnp);
1225 }
1226 
1227 #else /* #ifdef CONFIG_RCU_BOOST */
1228 
rcu_initiate_boost(struct rcu_node * rnp,unsigned long flags)1229 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1230 	__releases(rnp->lock)
1231 {
1232 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1233 }
1234 
rcu_is_callbacks_kthread(void)1235 static bool rcu_is_callbacks_kthread(void)
1236 {
1237 	return false;
1238 }
1239 
rcu_preempt_boost_start_gp(struct rcu_node * rnp)1240 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1241 {
1242 }
1243 
rcu_spawn_one_boost_kthread(struct rcu_node * rnp)1244 static void rcu_spawn_one_boost_kthread(struct rcu_node *rnp)
1245 {
1246 }
1247 
rcu_boost_kthread_setaffinity(struct rcu_node * rnp,int outgoingcpu)1248 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1249 {
1250 }
1251 
rcu_spawn_boost_kthreads(void)1252 static void __init rcu_spawn_boost_kthreads(void)
1253 {
1254 }
1255 
1256 #endif /* #else #ifdef CONFIG_RCU_BOOST */
1257 
1258 #if !defined(CONFIG_RCU_FAST_NO_HZ)
1259 
1260 /*
1261  * Check to see if any future non-offloaded RCU-related work will need
1262  * to be done by the current CPU, even if none need be done immediately,
1263  * returning 1 if so.  This function is part of the RCU implementation;
1264  * it is -not- an exported member of the RCU API.
1265  *
1266  * Because we not have RCU_FAST_NO_HZ, just check whether or not this
1267  * CPU has RCU callbacks queued.
1268  */
rcu_needs_cpu(u64 basemono,u64 * nextevt)1269 int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1270 {
1271 	*nextevt = KTIME_MAX;
1272 	return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
1273 		!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
1274 }
1275 
1276 /*
1277  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
1278  * after it.
1279  */
rcu_cleanup_after_idle(void)1280 static void rcu_cleanup_after_idle(void)
1281 {
1282 }
1283 
1284 /*
1285  * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
1286  * is nothing.
1287  */
rcu_prepare_for_idle(void)1288 static void rcu_prepare_for_idle(void)
1289 {
1290 }
1291 
1292 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1293 
1294 /*
1295  * This code is invoked when a CPU goes idle, at which point we want
1296  * to have the CPU do everything required for RCU so that it can enter
1297  * the energy-efficient dyntick-idle mode.
1298  *
1299  * The following preprocessor symbol controls this:
1300  *
1301  * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
1302  *	to sleep in dyntick-idle mode with RCU callbacks pending.  This
1303  *	is sized to be roughly one RCU grace period.  Those energy-efficiency
1304  *	benchmarkers who might otherwise be tempted to set this to a large
1305  *	number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
1306  *	system.  And if you are -that- concerned about energy efficiency,
1307  *	just power the system down and be done with it!
1308  *
1309  * The value below works well in practice.  If future workloads require
1310  * adjustment, they can be converted into kernel config parameters, though
1311  * making the state machine smarter might be a better option.
1312  */
1313 #define RCU_IDLE_GP_DELAY 4		/* Roughly one grace period. */
1314 
1315 static int rcu_idle_gp_delay = RCU_IDLE_GP_DELAY;
1316 module_param(rcu_idle_gp_delay, int, 0644);
1317 
1318 /*
1319  * Try to advance callbacks on the current CPU, but only if it has been
1320  * awhile since the last time we did so.  Afterwards, if there are any
1321  * callbacks ready for immediate invocation, return true.
1322  */
rcu_try_advance_all_cbs(void)1323 static bool __maybe_unused rcu_try_advance_all_cbs(void)
1324 {
1325 	bool cbs_ready = false;
1326 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1327 	struct rcu_node *rnp;
1328 
1329 	/* Exit early if we advanced recently. */
1330 	if (jiffies == rdp->last_advance_all)
1331 		return false;
1332 	rdp->last_advance_all = jiffies;
1333 
1334 	rnp = rdp->mynode;
1335 
1336 	/*
1337 	 * Don't bother checking unless a grace period has
1338 	 * completed since we last checked and there are
1339 	 * callbacks not yet ready to invoke.
1340 	 */
1341 	if ((rcu_seq_completed_gp(rdp->gp_seq,
1342 				  rcu_seq_current(&rnp->gp_seq)) ||
1343 	     unlikely(READ_ONCE(rdp->gpwrap))) &&
1344 	    rcu_segcblist_pend_cbs(&rdp->cblist))
1345 		note_gp_changes(rdp);
1346 
1347 	if (rcu_segcblist_ready_cbs(&rdp->cblist))
1348 		cbs_ready = true;
1349 	return cbs_ready;
1350 }
1351 
1352 /*
1353  * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
1354  * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
1355  * caller about what to set the timeout.
1356  *
1357  * The caller must have disabled interrupts.
1358  */
rcu_needs_cpu(u64 basemono,u64 * nextevt)1359 int rcu_needs_cpu(u64 basemono, u64 *nextevt)
1360 {
1361 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1362 	unsigned long dj;
1363 
1364 	lockdep_assert_irqs_disabled();
1365 
1366 	/* If no non-offloaded callbacks, RCU doesn't need the CPU. */
1367 	if (rcu_segcblist_empty(&rdp->cblist) ||
1368 	    rcu_rdp_is_offloaded(rdp)) {
1369 		*nextevt = KTIME_MAX;
1370 		return 0;
1371 	}
1372 
1373 	/* Attempt to advance callbacks. */
1374 	if (rcu_try_advance_all_cbs()) {
1375 		/* Some ready to invoke, so initiate later invocation. */
1376 		invoke_rcu_core();
1377 		return 1;
1378 	}
1379 	rdp->last_accelerate = jiffies;
1380 
1381 	/* Request timer and round. */
1382 	dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies;
1383 
1384 	*nextevt = basemono + dj * TICK_NSEC;
1385 	return 0;
1386 }
1387 
1388 /*
1389  * Prepare a CPU for idle from an RCU perspective.  The first major task is to
1390  * sense whether nohz mode has been enabled or disabled via sysfs.  The second
1391  * major task is to accelerate (that is, assign grace-period numbers to) any
1392  * recently arrived callbacks.
1393  *
1394  * The caller must have disabled interrupts.
1395  */
rcu_prepare_for_idle(void)1396 static void rcu_prepare_for_idle(void)
1397 {
1398 	bool needwake;
1399 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1400 	struct rcu_node *rnp;
1401 	int tne;
1402 
1403 	lockdep_assert_irqs_disabled();
1404 	if (rcu_rdp_is_offloaded(rdp))
1405 		return;
1406 
1407 	/* Handle nohz enablement switches conservatively. */
1408 	tne = READ_ONCE(tick_nohz_active);
1409 	if (tne != rdp->tick_nohz_enabled_snap) {
1410 		if (!rcu_segcblist_empty(&rdp->cblist))
1411 			invoke_rcu_core(); /* force nohz to see update. */
1412 		rdp->tick_nohz_enabled_snap = tne;
1413 		return;
1414 	}
1415 	if (!tne)
1416 		return;
1417 
1418 	/*
1419 	 * If we have not yet accelerated this jiffy, accelerate all
1420 	 * callbacks on this CPU.
1421 	 */
1422 	if (rdp->last_accelerate == jiffies)
1423 		return;
1424 	rdp->last_accelerate = jiffies;
1425 	if (rcu_segcblist_pend_cbs(&rdp->cblist)) {
1426 		rnp = rdp->mynode;
1427 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1428 		needwake = rcu_accelerate_cbs(rnp, rdp);
1429 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1430 		if (needwake)
1431 			rcu_gp_kthread_wake();
1432 	}
1433 }
1434 
1435 /*
1436  * Clean up for exit from idle.  Attempt to advance callbacks based on
1437  * any grace periods that elapsed while the CPU was idle, and if any
1438  * callbacks are now ready to invoke, initiate invocation.
1439  */
rcu_cleanup_after_idle(void)1440 static void rcu_cleanup_after_idle(void)
1441 {
1442 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1443 
1444 	lockdep_assert_irqs_disabled();
1445 	if (rcu_rdp_is_offloaded(rdp))
1446 		return;
1447 	if (rcu_try_advance_all_cbs())
1448 		invoke_rcu_core();
1449 }
1450 
1451 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
1452 
1453 /*
1454  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
1455  * grace-period kthread will do force_quiescent_state() processing?
1456  * The idea is to avoid waking up RCU core processing on such a
1457  * CPU unless the grace period has extended for too long.
1458  *
1459  * This code relies on the fact that all NO_HZ_FULL CPUs are also
1460  * CONFIG_RCU_NOCB_CPU CPUs.
1461  */
rcu_nohz_full_cpu(void)1462 static bool rcu_nohz_full_cpu(void)
1463 {
1464 #ifdef CONFIG_NO_HZ_FULL
1465 	if (tick_nohz_full_cpu(smp_processor_id()) &&
1466 	    (!rcu_gp_in_progress() ||
1467 	     time_before(jiffies, READ_ONCE(rcu_state.gp_start) + HZ)))
1468 		return true;
1469 #endif /* #ifdef CONFIG_NO_HZ_FULL */
1470 	return false;
1471 }
1472 
1473 /*
1474  * Bind the RCU grace-period kthreads to the housekeeping CPU.
1475  */
rcu_bind_gp_kthread(void)1476 static void rcu_bind_gp_kthread(void)
1477 {
1478 	if (!tick_nohz_full_enabled())
1479 		return;
1480 	housekeeping_affine(current, HK_FLAG_RCU);
1481 }
1482 
1483 /* Record the current task on dyntick-idle entry. */
rcu_dynticks_task_enter(void)1484 static __always_inline void rcu_dynticks_task_enter(void)
1485 {
1486 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
1487 	WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
1488 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
1489 }
1490 
1491 /* Record no current task on dyntick-idle exit. */
rcu_dynticks_task_exit(void)1492 static __always_inline void rcu_dynticks_task_exit(void)
1493 {
1494 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
1495 	WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
1496 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
1497 }
1498 
1499 /* Turn on heavyweight RCU tasks trace readers on idle/user entry. */
rcu_dynticks_task_trace_enter(void)1500 static __always_inline void rcu_dynticks_task_trace_enter(void)
1501 {
1502 #ifdef CONFIG_TASKS_TRACE_RCU
1503 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1504 		current->trc_reader_special.b.need_mb = true;
1505 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
1506 }
1507 
1508 /* Turn off heavyweight RCU tasks trace readers on idle/user exit. */
rcu_dynticks_task_trace_exit(void)1509 static __always_inline void rcu_dynticks_task_trace_exit(void)
1510 {
1511 #ifdef CONFIG_TASKS_TRACE_RCU
1512 	if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1513 		current->trc_reader_special.b.need_mb = false;
1514 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
1515 }
1516