• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * RCU expedited grace periods
4  *
5  * Copyright IBM Corporation, 2016
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9 
10 #include <linux/lockdep.h>
11 
12 static void rcu_exp_handler(void *unused);
13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14 
15 /*
16  * Record the start of an expedited grace period.
17  */
rcu_exp_gp_seq_start(void)18 static void rcu_exp_gp_seq_start(void)
19 {
20 	rcu_seq_start(&rcu_state.expedited_sequence);
21 }
22 
23 /*
24  * Return then value that expedited-grace-period counter will have
25  * at the end of the current grace period.
26  */
rcu_exp_gp_seq_endval(void)27 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
28 {
29 	return rcu_seq_endval(&rcu_state.expedited_sequence);
30 }
31 
32 /*
33  * Record the end of an expedited grace period.
34  */
rcu_exp_gp_seq_end(void)35 static void rcu_exp_gp_seq_end(void)
36 {
37 	rcu_seq_end(&rcu_state.expedited_sequence);
38 	smp_mb(); /* Ensure that consecutive grace periods serialize. */
39 }
40 
41 /*
42  * Take a snapshot of the expedited-grace-period counter.
43  */
rcu_exp_gp_seq_snap(void)44 static unsigned long rcu_exp_gp_seq_snap(void)
45 {
46 	unsigned long s;
47 
48 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
49 	s = rcu_seq_snap(&rcu_state.expedited_sequence);
50 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
51 	return s;
52 }
53 
54 /*
55  * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
56  * if a full expedited grace period has elapsed since that snapshot
57  * was taken.
58  */
rcu_exp_gp_seq_done(unsigned long s)59 static bool rcu_exp_gp_seq_done(unsigned long s)
60 {
61 	return rcu_seq_done(&rcu_state.expedited_sequence, s);
62 }
63 
64 /*
65  * Reset the ->expmaskinit values in the rcu_node tree to reflect any
66  * recent CPU-online activity.  Note that these masks are not cleared
67  * when CPUs go offline, so they reflect the union of all CPUs that have
68  * ever been online.  This means that this function normally takes its
69  * no-work-to-do fastpath.
70  */
sync_exp_reset_tree_hotplug(void)71 static void sync_exp_reset_tree_hotplug(void)
72 {
73 	bool done;
74 	unsigned long flags;
75 	unsigned long mask;
76 	unsigned long oldmask;
77 	int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
78 	struct rcu_node *rnp;
79 	struct rcu_node *rnp_up;
80 
81 	/* If no new CPUs onlined since last time, nothing to do. */
82 	if (likely(ncpus == rcu_state.ncpus_snap))
83 		return;
84 	rcu_state.ncpus_snap = ncpus;
85 
86 	/*
87 	 * Each pass through the following loop propagates newly onlined
88 	 * CPUs for the current rcu_node structure up the rcu_node tree.
89 	 */
90 	rcu_for_each_leaf_node(rnp) {
91 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
92 		if (rnp->expmaskinit == rnp->expmaskinitnext) {
93 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
94 			continue;  /* No new CPUs, nothing to do. */
95 		}
96 
97 		/* Update this node's mask, track old value for propagation. */
98 		oldmask = rnp->expmaskinit;
99 		rnp->expmaskinit = rnp->expmaskinitnext;
100 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
101 
102 		/* If was already nonzero, nothing to propagate. */
103 		if (oldmask)
104 			continue;
105 
106 		/* Propagate the new CPU up the tree. */
107 		mask = rnp->grpmask;
108 		rnp_up = rnp->parent;
109 		done = false;
110 		while (rnp_up) {
111 			raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
112 			if (rnp_up->expmaskinit)
113 				done = true;
114 			rnp_up->expmaskinit |= mask;
115 			raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
116 			if (done)
117 				break;
118 			mask = rnp_up->grpmask;
119 			rnp_up = rnp_up->parent;
120 		}
121 	}
122 }
123 
124 /*
125  * Reset the ->expmask values in the rcu_node tree in preparation for
126  * a new expedited grace period.
127  */
sync_exp_reset_tree(void)128 static void __maybe_unused sync_exp_reset_tree(void)
129 {
130 	unsigned long flags;
131 	struct rcu_node *rnp;
132 
133 	sync_exp_reset_tree_hotplug();
134 	rcu_for_each_node_breadth_first(rnp) {
135 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
136 		WARN_ON_ONCE(rnp->expmask);
137 		WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
138 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
139 	}
140 }
141 
142 /*
143  * Return non-zero if there is no RCU expedited grace period in progress
144  * for the specified rcu_node structure, in other words, if all CPUs and
145  * tasks covered by the specified rcu_node structure have done their bit
146  * for the current expedited grace period.  Works only for preemptible
147  * RCU -- other RCU implementation use other means.
148  *
149  * Caller must hold the specificed rcu_node structure's ->lock
150  */
sync_rcu_preempt_exp_done(struct rcu_node * rnp)151 static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
152 {
153 	raw_lockdep_assert_held_rcu_node(rnp);
154 
155 	return rnp->exp_tasks == NULL &&
156 	       READ_ONCE(rnp->expmask) == 0;
157 }
158 
159 /*
160  * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
161  * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
162  * itself
163  */
sync_rcu_preempt_exp_done_unlocked(struct rcu_node * rnp)164 static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
165 {
166 	unsigned long flags;
167 	bool ret;
168 
169 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
170 	ret = sync_rcu_preempt_exp_done(rnp);
171 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
172 
173 	return ret;
174 }
175 
176 
177 /*
178  * Report the exit from RCU read-side critical section for the last task
179  * that queued itself during or before the current expedited preemptible-RCU
180  * grace period.  This event is reported either to the rcu_node structure on
181  * which the task was queued or to one of that rcu_node structure's ancestors,
182  * recursively up the tree.  (Calm down, calm down, we do the recursion
183  * iteratively!)
184  *
185  * Caller must hold the specified rcu_node structure's ->lock.
186  */
__rcu_report_exp_rnp(struct rcu_node * rnp,bool wake,unsigned long flags)187 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
188 				 bool wake, unsigned long flags)
189 	__releases(rnp->lock)
190 {
191 	unsigned long mask;
192 
193 	for (;;) {
194 		if (!sync_rcu_preempt_exp_done(rnp)) {
195 			if (!rnp->expmask)
196 				rcu_initiate_boost(rnp, flags);
197 			else
198 				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199 			break;
200 		}
201 		if (rnp->parent == NULL) {
202 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
203 			if (wake) {
204 				smp_mb(); /* EGP done before wake_up(). */
205 				swake_up_one(&rcu_state.expedited_wq);
206 			}
207 			break;
208 		}
209 		mask = rnp->grpmask;
210 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
211 		rnp = rnp->parent;
212 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
213 		WARN_ON_ONCE(!(rnp->expmask & mask));
214 		WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
215 	}
216 }
217 
218 /*
219  * Report expedited quiescent state for specified node.  This is a
220  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
221  */
rcu_report_exp_rnp(struct rcu_node * rnp,bool wake)222 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
223 {
224 	unsigned long flags;
225 
226 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
227 	__rcu_report_exp_rnp(rnp, wake, flags);
228 }
229 
230 /*
231  * Report expedited quiescent state for multiple CPUs, all covered by the
232  * specified leaf rcu_node structure.
233  */
rcu_report_exp_cpu_mult(struct rcu_node * rnp,unsigned long mask,bool wake)234 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
235 				    unsigned long mask, bool wake)
236 {
237 	unsigned long flags;
238 
239 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
240 	if (!(rnp->expmask & mask)) {
241 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
242 		return;
243 	}
244 	WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
245 	__rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
246 }
247 
248 /*
249  * Report expedited quiescent state for specified rcu_data (CPU).
250  */
rcu_report_exp_rdp(struct rcu_data * rdp)251 static void rcu_report_exp_rdp(struct rcu_data *rdp)
252 {
253 	WRITE_ONCE(rdp->exp_deferred_qs, false);
254 	rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
255 }
256 
257 /* Common code for work-done checking. */
sync_exp_work_done(unsigned long s)258 static bool sync_exp_work_done(unsigned long s)
259 {
260 	if (rcu_exp_gp_seq_done(s)) {
261 		trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
262 		smp_mb(); /* Ensure test happens before caller kfree(). */
263 		return true;
264 	}
265 	return false;
266 }
267 
268 /*
269  * Funnel-lock acquisition for expedited grace periods.  Returns true
270  * if some other task completed an expedited grace period that this task
271  * can piggy-back on, and with no mutex held.  Otherwise, returns false
272  * with the mutex held, indicating that the caller must actually do the
273  * expedited grace period.
274  */
exp_funnel_lock(unsigned long s)275 static bool exp_funnel_lock(unsigned long s)
276 {
277 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
278 	struct rcu_node *rnp = rdp->mynode;
279 	struct rcu_node *rnp_root = rcu_get_root();
280 
281 	/* Low-contention fastpath. */
282 	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
283 	    (rnp == rnp_root ||
284 	     ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
285 	    mutex_trylock(&rcu_state.exp_mutex))
286 		goto fastpath;
287 
288 	/*
289 	 * Each pass through the following loop works its way up
290 	 * the rcu_node tree, returning if others have done the work or
291 	 * otherwise falls through to acquire ->exp_mutex.  The mapping
292 	 * from CPU to rcu_node structure can be inexact, as it is just
293 	 * promoting locality and is not strictly needed for correctness.
294 	 */
295 	for (; rnp != NULL; rnp = rnp->parent) {
296 		if (sync_exp_work_done(s))
297 			return true;
298 
299 		/* Work not done, either wait here or go up. */
300 		spin_lock(&rnp->exp_lock);
301 		if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
302 
303 			/* Someone else doing GP, so wait for them. */
304 			spin_unlock(&rnp->exp_lock);
305 			trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
306 						  rnp->grplo, rnp->grphi,
307 						  TPS("wait"));
308 			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
309 				   sync_exp_work_done(s));
310 			return true;
311 		}
312 		rnp->exp_seq_rq = s; /* Followers can wait on us. */
313 		spin_unlock(&rnp->exp_lock);
314 		trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
315 					  rnp->grplo, rnp->grphi, TPS("nxtlvl"));
316 	}
317 	mutex_lock(&rcu_state.exp_mutex);
318 fastpath:
319 	if (sync_exp_work_done(s)) {
320 		mutex_unlock(&rcu_state.exp_mutex);
321 		return true;
322 	}
323 	rcu_exp_gp_seq_start();
324 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
325 	return false;
326 }
327 
328 /*
329  * Select the CPUs within the specified rcu_node that the upcoming
330  * expedited grace period needs to wait for.
331  */
sync_rcu_exp_select_node_cpus(struct work_struct * wp)332 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
333 {
334 	int cpu;
335 	unsigned long flags;
336 	unsigned long mask_ofl_test;
337 	unsigned long mask_ofl_ipi;
338 	int ret;
339 	struct rcu_exp_work *rewp =
340 		container_of(wp, struct rcu_exp_work, rew_work);
341 	struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
342 
343 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
344 
345 	/* Each pass checks a CPU for identity, offline, and idle. */
346 	mask_ofl_test = 0;
347 	for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
348 		unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
349 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
350 		int snap;
351 
352 		if (raw_smp_processor_id() == cpu ||
353 		    !(rnp->qsmaskinitnext & mask)) {
354 			mask_ofl_test |= mask;
355 		} else {
356 			snap = rcu_dynticks_snap(rdp);
357 			if (rcu_dynticks_in_eqs(snap))
358 				mask_ofl_test |= mask;
359 			else
360 				rdp->exp_dynticks_snap = snap;
361 		}
362 	}
363 	mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
364 
365 	/*
366 	 * Need to wait for any blocked tasks as well.	Note that
367 	 * additional blocking tasks will also block the expedited GP
368 	 * until such time as the ->expmask bits are cleared.
369 	 */
370 	if (rcu_preempt_has_tasks(rnp))
371 		rnp->exp_tasks = rnp->blkd_tasks.next;
372 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
373 
374 	/* IPI the remaining CPUs for expedited quiescent state. */
375 	for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
376 		unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
377 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
378 
379 retry_ipi:
380 		if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
381 			mask_ofl_test |= mask;
382 			continue;
383 		}
384 		if (get_cpu() == cpu) {
385 			mask_ofl_test |= mask;
386 			put_cpu();
387 			continue;
388 		}
389 		ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
390 		put_cpu();
391 		if (!ret) {
392 			mask_ofl_ipi &= ~mask;
393 			continue;
394 		}
395 		/* Failed, raced with CPU hotplug operation. */
396 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
397 		if ((rnp->qsmaskinitnext & mask) &&
398 		    (rnp->expmask & mask)) {
399 			/* Online, so delay for a bit and try again. */
400 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
401 			trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
402 			schedule_timeout_uninterruptible(1);
403 			goto retry_ipi;
404 		}
405 		/* CPU really is offline, so we can ignore it. */
406 		if (!(rnp->expmask & mask))
407 			mask_ofl_ipi &= ~mask;
408 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
409 	}
410 	/* Report quiescent states for those that went offline. */
411 	mask_ofl_test |= mask_ofl_ipi;
412 	if (mask_ofl_test)
413 		rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
414 }
415 
416 /*
417  * Select the nodes that the upcoming expedited grace period needs
418  * to wait for.
419  */
sync_rcu_exp_select_cpus(void)420 static void sync_rcu_exp_select_cpus(void)
421 {
422 	int cpu;
423 	struct rcu_node *rnp;
424 
425 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
426 	sync_exp_reset_tree();
427 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
428 
429 	/* Schedule work for each leaf rcu_node structure. */
430 	rcu_for_each_leaf_node(rnp) {
431 		rnp->exp_need_flush = false;
432 		if (!READ_ONCE(rnp->expmask))
433 			continue; /* Avoid early boot non-existent wq. */
434 		if (!READ_ONCE(rcu_par_gp_wq) ||
435 		    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
436 		    rcu_is_last_leaf_node(rnp)) {
437 			/* No workqueues yet or last leaf, do direct call. */
438 			sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
439 			continue;
440 		}
441 		INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
442 		cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
443 		/* If all offline, queue the work on an unbound CPU. */
444 		if (unlikely(cpu > rnp->grphi - rnp->grplo))
445 			cpu = WORK_CPU_UNBOUND;
446 		else
447 			cpu += rnp->grplo;
448 		queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
449 		rnp->exp_need_flush = true;
450 	}
451 
452 	/* Wait for workqueue jobs (if any) to complete. */
453 	rcu_for_each_leaf_node(rnp)
454 		if (rnp->exp_need_flush)
455 			flush_work(&rnp->rew.rew_work);
456 }
457 
synchronize_sched_expedited_wait(void)458 static void synchronize_sched_expedited_wait(void)
459 {
460 	int cpu;
461 	unsigned long jiffies_stall;
462 	unsigned long jiffies_start;
463 	unsigned long mask;
464 	int ndetected;
465 	struct rcu_node *rnp;
466 	struct rcu_node *rnp_root = rcu_get_root();
467 	int ret;
468 
469 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
470 	jiffies_stall = rcu_jiffies_till_stall_check();
471 	jiffies_start = jiffies;
472 
473 	for (;;) {
474 		ret = swait_event_timeout_exclusive(
475 				rcu_state.expedited_wq,
476 				sync_rcu_preempt_exp_done_unlocked(rnp_root),
477 				jiffies_stall);
478 		if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
479 			return;
480 		WARN_ON(ret < 0);  /* workqueues should not be signaled. */
481 		if (rcu_cpu_stall_suppress)
482 			continue;
483 		panic_on_rcu_stall();
484 		pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
485 		       rcu_state.name);
486 		ndetected = 0;
487 		rcu_for_each_leaf_node(rnp) {
488 			ndetected += rcu_print_task_exp_stall(rnp);
489 			for_each_leaf_node_possible_cpu(rnp, cpu) {
490 				struct rcu_data *rdp;
491 
492 				mask = leaf_node_cpu_bit(rnp, cpu);
493 				if (!(READ_ONCE(rnp->expmask) & mask))
494 					continue;
495 				ndetected++;
496 				rdp = per_cpu_ptr(&rcu_data, cpu);
497 				pr_cont(" %d-%c%c%c", cpu,
498 					"O."[!!cpu_online(cpu)],
499 					"o."[!!(rdp->grpmask & rnp->expmaskinit)],
500 					"N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
501 			}
502 		}
503 		pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
504 			jiffies - jiffies_start, rcu_state.expedited_sequence,
505 			READ_ONCE(rnp_root->expmask),
506 			".T"[!!rnp_root->exp_tasks]);
507 		if (ndetected) {
508 			pr_err("blocking rcu_node structures:");
509 			rcu_for_each_node_breadth_first(rnp) {
510 				if (rnp == rnp_root)
511 					continue; /* printed unconditionally */
512 				if (sync_rcu_preempt_exp_done_unlocked(rnp))
513 					continue;
514 				pr_cont(" l=%u:%d-%d:%#lx/%c",
515 					rnp->level, rnp->grplo, rnp->grphi,
516 					READ_ONCE(rnp->expmask),
517 					".T"[!!rnp->exp_tasks]);
518 			}
519 			pr_cont("\n");
520 		}
521 		rcu_for_each_leaf_node(rnp) {
522 			for_each_leaf_node_possible_cpu(rnp, cpu) {
523 				mask = leaf_node_cpu_bit(rnp, cpu);
524 				if (!(READ_ONCE(rnp->expmask) & mask))
525 					continue;
526 				preempt_disable(); // For smp_processor_id() in dump_cpu_task().
527 				dump_cpu_task(cpu);
528 				preempt_enable();
529 			}
530 		}
531 		jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
532 	}
533 }
534 
535 /*
536  * Wait for the current expedited grace period to complete, and then
537  * wake up everyone who piggybacked on the just-completed expedited
538  * grace period.  Also update all the ->exp_seq_rq counters as needed
539  * in order to avoid counter-wrap problems.
540  */
rcu_exp_wait_wake(unsigned long s)541 static void rcu_exp_wait_wake(unsigned long s)
542 {
543 	struct rcu_node *rnp;
544 
545 	synchronize_sched_expedited_wait();
546 
547 	// Switch over to wakeup mode, allowing the next GP to proceed.
548 	// End the previous grace period only after acquiring the mutex
549 	// to ensure that only one GP runs concurrently with wakeups.
550 	mutex_lock(&rcu_state.exp_wake_mutex);
551 	rcu_exp_gp_seq_end();
552 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
553 
554 	rcu_for_each_node_breadth_first(rnp) {
555 		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
556 			spin_lock(&rnp->exp_lock);
557 			/* Recheck, avoid hang in case someone just arrived. */
558 			if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
559 				rnp->exp_seq_rq = s;
560 			spin_unlock(&rnp->exp_lock);
561 		}
562 		smp_mb(); /* All above changes before wakeup. */
563 		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
564 	}
565 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
566 	mutex_unlock(&rcu_state.exp_wake_mutex);
567 }
568 
569 /*
570  * Common code to drive an expedited grace period forward, used by
571  * workqueues and mid-boot-time tasks.
572  */
rcu_exp_sel_wait_wake(unsigned long s)573 static void rcu_exp_sel_wait_wake(unsigned long s)
574 {
575 	/* Initialize the rcu_node tree in preparation for the wait. */
576 	sync_rcu_exp_select_cpus();
577 
578 	/* Wait and clean up, including waking everyone. */
579 	rcu_exp_wait_wake(s);
580 }
581 
582 /*
583  * Work-queue handler to drive an expedited grace period forward.
584  */
wait_rcu_exp_gp(struct work_struct * wp)585 static void wait_rcu_exp_gp(struct work_struct *wp)
586 {
587 	struct rcu_exp_work *rewp;
588 
589 	rewp = container_of(wp, struct rcu_exp_work, rew_work);
590 	rcu_exp_sel_wait_wake(rewp->rew_s);
591 }
592 
593 #ifdef CONFIG_PREEMPT_RCU
594 
595 /*
596  * Remote handler for smp_call_function_single().  If there is an
597  * RCU read-side critical section in effect, request that the
598  * next rcu_read_unlock() record the quiescent state up the
599  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
600  * report the quiescent state.
601  */
rcu_exp_handler(void * unused)602 static void rcu_exp_handler(void *unused)
603 {
604 	unsigned long flags;
605 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
606 	struct rcu_node *rnp = rdp->mynode;
607 	struct task_struct *t = current;
608 
609 	/*
610 	 * First, the common case of not being in an RCU read-side
611 	 * critical section.  If also enabled or idle, immediately
612 	 * report the quiescent state, otherwise defer.
613 	 */
614 	if (!t->rcu_read_lock_nesting) {
615 		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
616 		    rcu_dynticks_curr_cpu_in_eqs()) {
617 			rcu_report_exp_rdp(rdp);
618 		} else {
619 			rdp->exp_deferred_qs = true;
620 			set_tsk_need_resched(t);
621 			set_preempt_need_resched();
622 		}
623 		return;
624 	}
625 
626 	/*
627 	 * Second, the less-common case of being in an RCU read-side
628 	 * critical section.  In this case we can count on a future
629 	 * rcu_read_unlock().  However, this rcu_read_unlock() might
630 	 * execute on some other CPU, but in that case there will be
631 	 * a future context switch.  Either way, if the expedited
632 	 * grace period is still waiting on this CPU, set ->deferred_qs
633 	 * so that the eventual quiescent state will be reported.
634 	 * Note that there is a large group of race conditions that
635 	 * can have caused this quiescent state to already have been
636 	 * reported, so we really do need to check ->expmask.
637 	 */
638 	if (t->rcu_read_lock_nesting > 0) {
639 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
640 		if (rnp->expmask & rdp->grpmask) {
641 			rdp->exp_deferred_qs = true;
642 			t->rcu_read_unlock_special.b.exp_hint = true;
643 		}
644 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
645 		return;
646 	}
647 
648 	/*
649 	 * The final and least likely case is where the interrupted
650 	 * code was just about to or just finished exiting the RCU-preempt
651 	 * read-side critical section, and no, we can't tell which.
652 	 * So either way, set ->deferred_qs to flag later code that
653 	 * a quiescent state is required.
654 	 *
655 	 * If the CPU is fully enabled (or if some buggy RCU-preempt
656 	 * read-side critical section is being used from idle), just
657 	 * invoke rcu_preempt_deferred_qs() to immediately report the
658 	 * quiescent state.  We cannot use rcu_read_unlock_special()
659 	 * because we are in an interrupt handler, which will cause that
660 	 * function to take an early exit without doing anything.
661 	 *
662 	 * Otherwise, force a context switch after the CPU enables everything.
663 	 */
664 	rdp->exp_deferred_qs = true;
665 	if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
666 	    WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
667 		rcu_preempt_deferred_qs(t);
668 	} else {
669 		set_tsk_need_resched(t);
670 		set_preempt_need_resched();
671 	}
672 }
673 
674 /* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */
sync_sched_exp_online_cleanup(int cpu)675 static void sync_sched_exp_online_cleanup(int cpu)
676 {
677 }
678 
679 /*
680  * Scan the current list of tasks blocked within RCU read-side critical
681  * sections, printing out the tid of each that is blocking the current
682  * expedited grace period.
683  */
rcu_print_task_exp_stall(struct rcu_node * rnp)684 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
685 {
686 	struct task_struct *t;
687 	int ndetected = 0;
688 
689 	if (!rnp->exp_tasks)
690 		return 0;
691 	t = list_entry(rnp->exp_tasks->prev,
692 		       struct task_struct, rcu_node_entry);
693 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
694 		pr_cont(" P%d", t->pid);
695 		ndetected++;
696 	}
697 	return ndetected;
698 }
699 
700 #else /* #ifdef CONFIG_PREEMPT_RCU */
701 
702 /* Request an expedited quiescent state. */
rcu_exp_need_qs(void)703 static void rcu_exp_need_qs(void)
704 {
705 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
706 	/* Store .exp before .rcu_urgent_qs. */
707 	smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
708 	set_tsk_need_resched(current);
709 	set_preempt_need_resched();
710 }
711 
712 /* Invoked on each online non-idle CPU for expedited quiescent state. */
rcu_exp_handler(void * unused)713 static void rcu_exp_handler(void *unused)
714 {
715 	struct rcu_data *rdp;
716 	struct rcu_node *rnp;
717 
718 	rdp = this_cpu_ptr(&rcu_data);
719 	rnp = rdp->mynode;
720 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
721 	    __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
722 		return;
723 	if (rcu_is_cpu_rrupt_from_idle()) {
724 		rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
725 		return;
726 	}
727 	rcu_exp_need_qs();
728 }
729 
730 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
sync_sched_exp_online_cleanup(int cpu)731 static void sync_sched_exp_online_cleanup(int cpu)
732 {
733 	unsigned long flags;
734 	int my_cpu;
735 	struct rcu_data *rdp;
736 	int ret;
737 	struct rcu_node *rnp;
738 
739 	rdp = per_cpu_ptr(&rcu_data, cpu);
740 	rnp = rdp->mynode;
741 	my_cpu = get_cpu();
742 	/* Quiescent state either not needed or already requested, leave. */
743 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
744 	    rdp->cpu_no_qs.b.exp) {
745 		put_cpu();
746 		return;
747 	}
748 	/* Quiescent state needed on current CPU, so set it up locally. */
749 	if (my_cpu == cpu) {
750 		local_irq_save(flags);
751 		rcu_exp_need_qs();
752 		local_irq_restore(flags);
753 		put_cpu();
754 		return;
755 	}
756 	/* Quiescent state needed on some other CPU, send IPI. */
757 	ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
758 	put_cpu();
759 	WARN_ON_ONCE(ret);
760 }
761 
762 /*
763  * Because preemptible RCU does not exist, we never have to check for
764  * tasks blocked within RCU read-side critical sections that are
765  * blocking the current expedited grace period.
766  */
rcu_print_task_exp_stall(struct rcu_node * rnp)767 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
768 {
769 	return 0;
770 }
771 
772 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
773 
774 /**
775  * synchronize_rcu_expedited - Brute-force RCU grace period
776  *
777  * Wait for an RCU grace period, but expedite it.  The basic idea is to
778  * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
779  * the CPU is in an RCU critical section, and if so, it sets a flag that
780  * causes the outermost rcu_read_unlock() to report the quiescent state
781  * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
782  * other hand, if the CPU is not in an RCU read-side critical section,
783  * the IPI handler reports the quiescent state immediately.
784  *
785  * Although this is a great improvement over previous expedited
786  * implementations, it is still unfriendly to real-time workloads, so is
787  * thus not recommended for any sort of common-case code.  In fact, if
788  * you are using synchronize_rcu_expedited() in a loop, please restructure
789  * your code to batch your updates, and then Use a single synchronize_rcu()
790  * instead.
791  *
792  * This has the same semantics as (but is more brutal than) synchronize_rcu().
793  */
synchronize_rcu_expedited(void)794 void synchronize_rcu_expedited(void)
795 {
796 	bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
797 	struct rcu_exp_work rew;
798 	struct rcu_node *rnp;
799 	unsigned long s;
800 
801 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
802 			 lock_is_held(&rcu_lock_map) ||
803 			 lock_is_held(&rcu_sched_lock_map),
804 			 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
805 
806 	/* Is the state is such that the call is a grace period? */
807 	if (rcu_blocking_is_gp())
808 		return;
809 
810 	/* If expedited grace periods are prohibited, fall back to normal. */
811 	if (rcu_gp_is_normal()) {
812 		wait_rcu_gp(call_rcu);
813 		return;
814 	}
815 
816 	/* Take a snapshot of the sequence number.  */
817 	s = rcu_exp_gp_seq_snap();
818 	if (exp_funnel_lock(s))
819 		return;  /* Someone else did our work for us. */
820 
821 	/* Ensure that load happens before action based on it. */
822 	if (unlikely(boottime)) {
823 		/* Direct call during scheduler init and early_initcalls(). */
824 		rcu_exp_sel_wait_wake(s);
825 	} else {
826 		/* Marshall arguments & schedule the expedited grace period. */
827 		rew.rew_s = s;
828 		INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
829 		queue_work(rcu_gp_wq, &rew.rew_work);
830 	}
831 
832 	/* Wait for expedited grace period to complete. */
833 	rnp = rcu_get_root();
834 	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
835 		   sync_exp_work_done(s));
836 	smp_mb(); /* Workqueue actions happen before return. */
837 
838 	/* Let the next expedited grace period start. */
839 	mutex_unlock(&rcu_state.exp_mutex);
840 
841 	if (likely(!boottime))
842 		destroy_work_on_stack(&rew.rew_work);
843 }
844 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
845