• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * RCU expedited grace periods
4  *
5  * Copyright IBM Corporation, 2016
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9 
10 #include <linux/lockdep.h>
11 
12 static void rcu_exp_handler(void *unused);
13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14 
15 /*
16  * Record the start of an expedited grace period.
17  */
rcu_exp_gp_seq_start(void)18 static void rcu_exp_gp_seq_start(void)
19 {
20 	rcu_seq_start(&rcu_state.expedited_sequence);
21 	rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
22 }
23 
24 /*
25  * Return the value that the expedited-grace-period counter will have
26  * at the end of the current grace period.
27  */
rcu_exp_gp_seq_endval(void)28 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
29 {
30 	return rcu_seq_endval(&rcu_state.expedited_sequence);
31 }
32 
33 /*
34  * Record the end of an expedited grace period.
35  */
rcu_exp_gp_seq_end(void)36 static void rcu_exp_gp_seq_end(void)
37 {
38 	rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
39 	rcu_seq_end(&rcu_state.expedited_sequence);
40 	smp_mb(); /* Ensure that consecutive grace periods serialize. */
41 }
42 
43 /*
44  * Take a snapshot of the expedited-grace-period counter, which is the
45  * earliest value that will indicate that a full grace period has
46  * elapsed since the current time.
47  */
rcu_exp_gp_seq_snap(void)48 static unsigned long rcu_exp_gp_seq_snap(void)
49 {
50 	unsigned long s;
51 
52 	smp_mb(); /* Caller's modifications seen first by other CPUs. */
53 	s = rcu_seq_snap(&rcu_state.expedited_sequence);
54 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
55 	return s;
56 }
57 
58 /*
59  * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
60  * if a full expedited grace period has elapsed since that snapshot
61  * was taken.
62  */
rcu_exp_gp_seq_done(unsigned long s)63 static bool rcu_exp_gp_seq_done(unsigned long s)
64 {
65 	return rcu_seq_done(&rcu_state.expedited_sequence, s);
66 }
67 
68 /*
69  * Reset the ->expmaskinit values in the rcu_node tree to reflect any
70  * recent CPU-online activity.  Note that these masks are not cleared
71  * when CPUs go offline, so they reflect the union of all CPUs that have
72  * ever been online.  This means that this function normally takes its
73  * no-work-to-do fastpath.
74  */
sync_exp_reset_tree_hotplug(void)75 static void sync_exp_reset_tree_hotplug(void)
76 {
77 	bool done;
78 	unsigned long flags;
79 	unsigned long mask;
80 	unsigned long oldmask;
81 	int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
82 	struct rcu_node *rnp;
83 	struct rcu_node *rnp_up;
84 
85 	/* If no new CPUs onlined since last time, nothing to do. */
86 	if (likely(ncpus == rcu_state.ncpus_snap))
87 		return;
88 	rcu_state.ncpus_snap = ncpus;
89 
90 	/*
91 	 * Each pass through the following loop propagates newly onlined
92 	 * CPUs for the current rcu_node structure up the rcu_node tree.
93 	 */
94 	rcu_for_each_leaf_node(rnp) {
95 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
96 		if (rnp->expmaskinit == rnp->expmaskinitnext) {
97 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
98 			continue;  /* No new CPUs, nothing to do. */
99 		}
100 
101 		/* Update this node's mask, track old value for propagation. */
102 		oldmask = rnp->expmaskinit;
103 		rnp->expmaskinit = rnp->expmaskinitnext;
104 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
105 
106 		/* If was already nonzero, nothing to propagate. */
107 		if (oldmask)
108 			continue;
109 
110 		/* Propagate the new CPU up the tree. */
111 		mask = rnp->grpmask;
112 		rnp_up = rnp->parent;
113 		done = false;
114 		while (rnp_up) {
115 			raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
116 			if (rnp_up->expmaskinit)
117 				done = true;
118 			rnp_up->expmaskinit |= mask;
119 			raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
120 			if (done)
121 				break;
122 			mask = rnp_up->grpmask;
123 			rnp_up = rnp_up->parent;
124 		}
125 	}
126 }
127 
128 /*
129  * Reset the ->expmask values in the rcu_node tree in preparation for
130  * a new expedited grace period.
131  */
sync_exp_reset_tree(void)132 static void __maybe_unused sync_exp_reset_tree(void)
133 {
134 	unsigned long flags;
135 	struct rcu_node *rnp;
136 
137 	sync_exp_reset_tree_hotplug();
138 	rcu_for_each_node_breadth_first(rnp) {
139 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
140 		WARN_ON_ONCE(rnp->expmask);
141 		WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
142 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
143 	}
144 }
145 
146 /*
147  * Return non-zero if there is no RCU expedited grace period in progress
148  * for the specified rcu_node structure, in other words, if all CPUs and
149  * tasks covered by the specified rcu_node structure have done their bit
150  * for the current expedited grace period.
151  */
sync_rcu_exp_done(struct rcu_node * rnp)152 static bool sync_rcu_exp_done(struct rcu_node *rnp)
153 {
154 	raw_lockdep_assert_held_rcu_node(rnp);
155 	return READ_ONCE(rnp->exp_tasks) == NULL &&
156 	       READ_ONCE(rnp->expmask) == 0;
157 }
158 
159 /*
160  * Like sync_rcu_exp_done(), but where the caller does not hold the
161  * rcu_node's ->lock.
162  */
sync_rcu_exp_done_unlocked(struct rcu_node * rnp)163 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
164 {
165 	unsigned long flags;
166 	bool ret;
167 
168 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
169 	ret = sync_rcu_exp_done(rnp);
170 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
171 
172 	return ret;
173 }
174 
175 /*
176  * Report the exit from RCU read-side critical section for the last task
177  * that queued itself during or before the current expedited preemptible-RCU
178  * grace period.  This event is reported either to the rcu_node structure on
179  * which the task was queued or to one of that rcu_node structure's ancestors,
180  * recursively up the tree.  (Calm down, calm down, we do the recursion
181  * iteratively!)
182  */
__rcu_report_exp_rnp(struct rcu_node * rnp,bool wake,unsigned long flags)183 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
184 				 bool wake, unsigned long flags)
185 	__releases(rnp->lock)
186 {
187 	unsigned long mask;
188 
189 	raw_lockdep_assert_held_rcu_node(rnp);
190 	for (;;) {
191 		if (!sync_rcu_exp_done(rnp)) {
192 			if (!rnp->expmask)
193 				rcu_initiate_boost(rnp, flags);
194 			else
195 				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
196 			break;
197 		}
198 		if (rnp->parent == NULL) {
199 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
200 			if (wake) {
201 				smp_mb(); /* EGP done before wake_up(). */
202 				swake_up_one_online(&rcu_state.expedited_wq);
203 			}
204 			break;
205 		}
206 		mask = rnp->grpmask;
207 		raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
208 		rnp = rnp->parent;
209 		raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
210 		WARN_ON_ONCE(!(rnp->expmask & mask));
211 		WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
212 	}
213 }
214 
215 /*
216  * Report expedited quiescent state for specified node.  This is a
217  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
218  */
rcu_report_exp_rnp(struct rcu_node * rnp,bool wake)219 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
220 {
221 	unsigned long flags;
222 
223 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
224 	__rcu_report_exp_rnp(rnp, wake, flags);
225 }
226 
227 /*
228  * Report expedited quiescent state for multiple CPUs, all covered by the
229  * specified leaf rcu_node structure.
230  */
rcu_report_exp_cpu_mult(struct rcu_node * rnp,unsigned long mask,bool wake)231 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
232 				    unsigned long mask, bool wake)
233 {
234 	int cpu;
235 	unsigned long flags;
236 	struct rcu_data *rdp;
237 
238 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
239 	if (!(rnp->expmask & mask)) {
240 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
241 		return;
242 	}
243 	WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
244 	for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
245 		rdp = per_cpu_ptr(&rcu_data, cpu);
246 		if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
247 			continue;
248 		rdp->rcu_forced_tick_exp = false;
249 		tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
250 	}
251 	__rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
252 }
253 
254 /*
255  * Report expedited quiescent state for specified rcu_data (CPU).
256  */
rcu_report_exp_rdp(struct rcu_data * rdp)257 static void rcu_report_exp_rdp(struct rcu_data *rdp)
258 {
259 	WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
260 	rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
261 }
262 
263 /* Common code for work-done checking. */
sync_exp_work_done(unsigned long s)264 static bool sync_exp_work_done(unsigned long s)
265 {
266 	if (rcu_exp_gp_seq_done(s)) {
267 		trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
268 		smp_mb(); /* Ensure test happens before caller kfree(). */
269 		return true;
270 	}
271 	return false;
272 }
273 
274 /*
275  * Funnel-lock acquisition for expedited grace periods.  Returns true
276  * if some other task completed an expedited grace period that this task
277  * can piggy-back on, and with no mutex held.  Otherwise, returns false
278  * with the mutex held, indicating that the caller must actually do the
279  * expedited grace period.
280  */
exp_funnel_lock(unsigned long s)281 static bool exp_funnel_lock(unsigned long s)
282 {
283 	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
284 	struct rcu_node *rnp = rdp->mynode;
285 	struct rcu_node *rnp_root = rcu_get_root();
286 
287 	/* Low-contention fastpath. */
288 	if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
289 	    (rnp == rnp_root ||
290 	     ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
291 	    mutex_trylock(&rcu_state.exp_mutex))
292 		goto fastpath;
293 
294 	/*
295 	 * Each pass through the following loop works its way up
296 	 * the rcu_node tree, returning if others have done the work or
297 	 * otherwise falls through to acquire ->exp_mutex.  The mapping
298 	 * from CPU to rcu_node structure can be inexact, as it is just
299 	 * promoting locality and is not strictly needed for correctness.
300 	 */
301 	for (; rnp != NULL; rnp = rnp->parent) {
302 		if (sync_exp_work_done(s))
303 			return true;
304 
305 		/* Work not done, either wait here or go up. */
306 		spin_lock(&rnp->exp_lock);
307 		if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
308 
309 			/* Someone else doing GP, so wait for them. */
310 			spin_unlock(&rnp->exp_lock);
311 			trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
312 						  rnp->grplo, rnp->grphi,
313 						  TPS("wait"));
314 			wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
315 				   sync_exp_work_done(s));
316 			return true;
317 		}
318 		WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
319 		spin_unlock(&rnp->exp_lock);
320 		trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
321 					  rnp->grplo, rnp->grphi, TPS("nxtlvl"));
322 	}
323 	mutex_lock(&rcu_state.exp_mutex);
324 fastpath:
325 	if (sync_exp_work_done(s)) {
326 		mutex_unlock(&rcu_state.exp_mutex);
327 		return true;
328 	}
329 	rcu_exp_gp_seq_start();
330 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
331 	return false;
332 }
333 
334 /*
335  * Select the CPUs within the specified rcu_node that the upcoming
336  * expedited grace period needs to wait for.
337  */
__sync_rcu_exp_select_node_cpus(struct rcu_exp_work * rewp)338 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
339 {
340 	int cpu;
341 	unsigned long flags;
342 	unsigned long mask_ofl_test;
343 	unsigned long mask_ofl_ipi;
344 	int ret;
345 	struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
346 
347 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
348 
349 	/* Each pass checks a CPU for identity, offline, and idle. */
350 	mask_ofl_test = 0;
351 	for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
352 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
353 		unsigned long mask = rdp->grpmask;
354 		int snap;
355 
356 		if (raw_smp_processor_id() == cpu ||
357 		    !(rnp->qsmaskinitnext & mask)) {
358 			mask_ofl_test |= mask;
359 		} else {
360 			snap = rcu_dynticks_snap(cpu);
361 			if (rcu_dynticks_in_eqs(snap))
362 				mask_ofl_test |= mask;
363 			else
364 				rdp->exp_dynticks_snap = snap;
365 		}
366 	}
367 	mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
368 
369 	/*
370 	 * Need to wait for any blocked tasks as well.	Note that
371 	 * additional blocking tasks will also block the expedited GP
372 	 * until such time as the ->expmask bits are cleared.
373 	 */
374 	if (rcu_preempt_has_tasks(rnp))
375 		WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
376 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
377 
378 	/* IPI the remaining CPUs for expedited quiescent state. */
379 	for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
380 		struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
381 		unsigned long mask = rdp->grpmask;
382 
383 retry_ipi:
384 		if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
385 			mask_ofl_test |= mask;
386 			continue;
387 		}
388 		if (get_cpu() == cpu) {
389 			mask_ofl_test |= mask;
390 			put_cpu();
391 			continue;
392 		}
393 		ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
394 		put_cpu();
395 		/* The CPU will report the QS in response to the IPI. */
396 		if (!ret)
397 			continue;
398 
399 		/* Failed, raced with CPU hotplug operation. */
400 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
401 		if ((rnp->qsmaskinitnext & mask) &&
402 		    (rnp->expmask & mask)) {
403 			/* Online, so delay for a bit and try again. */
404 			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
405 			trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
406 			schedule_timeout_idle(1);
407 			goto retry_ipi;
408 		}
409 		/* CPU really is offline, so we must report its QS. */
410 		if (rnp->expmask & mask)
411 			mask_ofl_test |= mask;
412 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
413 	}
414 	/* Report quiescent states for those that went offline. */
415 	if (mask_ofl_test)
416 		rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
417 }
418 
419 static void rcu_exp_sel_wait_wake(unsigned long s);
420 
421 #ifdef CONFIG_RCU_EXP_KTHREAD
sync_rcu_exp_select_node_cpus(struct kthread_work * wp)422 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
423 {
424 	struct rcu_exp_work *rewp =
425 		container_of(wp, struct rcu_exp_work, rew_work);
426 
427 	__sync_rcu_exp_select_node_cpus(rewp);
428 }
429 
rcu_gp_par_worker_started(void)430 static inline bool rcu_gp_par_worker_started(void)
431 {
432 	return !!READ_ONCE(rcu_exp_par_gp_kworker);
433 }
434 
sync_rcu_exp_select_cpus_queue_work(struct rcu_node * rnp)435 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
436 {
437 	kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
438 	/*
439 	 * Use rcu_exp_par_gp_kworker, because flushing a work item from
440 	 * another work item on the same kthread worker can result in
441 	 * deadlock.
442 	 */
443 	kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work);
444 }
445 
sync_rcu_exp_select_cpus_flush_work(struct rcu_node * rnp)446 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
447 {
448 	kthread_flush_work(&rnp->rew.rew_work);
449 }
450 
451 /*
452  * Work-queue handler to drive an expedited grace period forward.
453  */
wait_rcu_exp_gp(struct kthread_work * wp)454 static void wait_rcu_exp_gp(struct kthread_work *wp)
455 {
456 	struct rcu_exp_work *rewp;
457 
458 	rewp = container_of(wp, struct rcu_exp_work, rew_work);
459 	rcu_exp_sel_wait_wake(rewp->rew_s);
460 }
461 
synchronize_rcu_expedited_queue_work(struct rcu_exp_work * rew)462 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
463 {
464 	kthread_init_work(&rew->rew_work, wait_rcu_exp_gp);
465 	kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work);
466 }
467 
synchronize_rcu_expedited_destroy_work(struct rcu_exp_work * rew)468 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
469 {
470 }
471 #else /* !CONFIG_RCU_EXP_KTHREAD */
sync_rcu_exp_select_node_cpus(struct work_struct * wp)472 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
473 {
474 	struct rcu_exp_work *rewp =
475 		container_of(wp, struct rcu_exp_work, rew_work);
476 
477 	__sync_rcu_exp_select_node_cpus(rewp);
478 }
479 
rcu_gp_par_worker_started(void)480 static inline bool rcu_gp_par_worker_started(void)
481 {
482 	return !!READ_ONCE(rcu_par_gp_wq);
483 }
484 
sync_rcu_exp_select_cpus_queue_work(struct rcu_node * rnp)485 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
486 {
487 	int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
488 
489 	INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
490 	/* If all offline, queue the work on an unbound CPU. */
491 	if (unlikely(cpu > rnp->grphi - rnp->grplo))
492 		cpu = WORK_CPU_UNBOUND;
493 	else
494 		cpu += rnp->grplo;
495 	queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
496 }
497 
sync_rcu_exp_select_cpus_flush_work(struct rcu_node * rnp)498 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
499 {
500 	flush_work(&rnp->rew.rew_work);
501 }
502 
503 /*
504  * Work-queue handler to drive an expedited grace period forward.
505  */
wait_rcu_exp_gp(struct work_struct * wp)506 static void wait_rcu_exp_gp(struct work_struct *wp)
507 {
508 	struct rcu_exp_work *rewp;
509 
510 	rewp = container_of(wp, struct rcu_exp_work, rew_work);
511 	rcu_exp_sel_wait_wake(rewp->rew_s);
512 }
513 
synchronize_rcu_expedited_queue_work(struct rcu_exp_work * rew)514 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
515 {
516 	INIT_WORK_ONSTACK(&rew->rew_work, wait_rcu_exp_gp);
517 	queue_work(rcu_gp_wq, &rew->rew_work);
518 }
519 
synchronize_rcu_expedited_destroy_work(struct rcu_exp_work * rew)520 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
521 {
522 	destroy_work_on_stack(&rew->rew_work);
523 }
524 #endif /* CONFIG_RCU_EXP_KTHREAD */
525 
526 /*
527  * Select the nodes that the upcoming expedited grace period needs
528  * to wait for.
529  */
sync_rcu_exp_select_cpus(void)530 static void sync_rcu_exp_select_cpus(void)
531 {
532 	struct rcu_node *rnp;
533 
534 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
535 	sync_exp_reset_tree();
536 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
537 
538 	/* Schedule work for each leaf rcu_node structure. */
539 	rcu_for_each_leaf_node(rnp) {
540 		rnp->exp_need_flush = false;
541 		if (!READ_ONCE(rnp->expmask))
542 			continue; /* Avoid early boot non-existent wq. */
543 		if (!rcu_gp_par_worker_started() ||
544 		    rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
545 		    rcu_is_last_leaf_node(rnp)) {
546 			/* No worker started yet or last leaf, do direct call. */
547 			sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
548 			continue;
549 		}
550 		sync_rcu_exp_select_cpus_queue_work(rnp);
551 		rnp->exp_need_flush = true;
552 	}
553 
554 	/* Wait for jobs (if any) to complete. */
555 	rcu_for_each_leaf_node(rnp)
556 		if (rnp->exp_need_flush)
557 			sync_rcu_exp_select_cpus_flush_work(rnp);
558 }
559 
560 /*
561  * Wait for the expedited grace period to elapse, within time limit.
562  * If the time limit is exceeded without the grace period elapsing,
563  * return false, otherwise return true.
564  */
synchronize_rcu_expedited_wait_once(long tlimit)565 static bool synchronize_rcu_expedited_wait_once(long tlimit)
566 {
567 	int t;
568 	struct rcu_node *rnp_root = rcu_get_root();
569 
570 	t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
571 					  sync_rcu_exp_done_unlocked(rnp_root),
572 					  tlimit);
573 	// Workqueues should not be signaled.
574 	if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
575 		return true;
576 	WARN_ON(t < 0);  /* workqueues should not be signaled. */
577 	return false;
578 }
579 
580 /*
581  * Wait for the expedited grace period to elapse, issuing any needed
582  * RCU CPU stall warnings along the way.
583  */
synchronize_rcu_expedited_wait(void)584 static void synchronize_rcu_expedited_wait(void)
585 {
586 	int cpu;
587 	unsigned long j;
588 	unsigned long jiffies_stall;
589 	unsigned long jiffies_start;
590 	unsigned long mask;
591 	int ndetected;
592 	struct rcu_data *rdp;
593 	struct rcu_node *rnp;
594 	struct rcu_node *rnp_root = rcu_get_root();
595 
596 	trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
597 	jiffies_stall = rcu_exp_jiffies_till_stall_check();
598 	jiffies_start = jiffies;
599 	if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
600 		if (synchronize_rcu_expedited_wait_once(1))
601 			return;
602 		rcu_for_each_leaf_node(rnp) {
603 			mask = READ_ONCE(rnp->expmask);
604 			for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
605 				rdp = per_cpu_ptr(&rcu_data, cpu);
606 				if (rdp->rcu_forced_tick_exp)
607 					continue;
608 				rdp->rcu_forced_tick_exp = true;
609 				preempt_disable();
610 				if (cpu_online(cpu))
611 					tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
612 				preempt_enable();
613 			}
614 		}
615 		j = READ_ONCE(jiffies_till_first_fqs);
616 		if (synchronize_rcu_expedited_wait_once(j + HZ))
617 			return;
618 	}
619 
620 	for (;;) {
621 		if (synchronize_rcu_expedited_wait_once(jiffies_stall))
622 			return;
623 		if (rcu_stall_is_suppressed())
624 			continue;
625 		trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
626 		pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
627 		       rcu_state.name);
628 		ndetected = 0;
629 		rcu_for_each_leaf_node(rnp) {
630 			ndetected += rcu_print_task_exp_stall(rnp);
631 			for_each_leaf_node_possible_cpu(rnp, cpu) {
632 				struct rcu_data *rdp;
633 
634 				mask = leaf_node_cpu_bit(rnp, cpu);
635 				if (!(READ_ONCE(rnp->expmask) & mask))
636 					continue;
637 				ndetected++;
638 				rdp = per_cpu_ptr(&rcu_data, cpu);
639 				pr_cont(" %d-%c%c%c%c", cpu,
640 					"O."[!!cpu_online(cpu)],
641 					"o."[!!(rdp->grpmask & rnp->expmaskinit)],
642 					"N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
643 					"D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
644 			}
645 		}
646 		pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
647 			jiffies - jiffies_start, rcu_state.expedited_sequence,
648 			data_race(rnp_root->expmask),
649 			".T"[!!data_race(rnp_root->exp_tasks)]);
650 		if (ndetected) {
651 			pr_err("blocking rcu_node structures (internal RCU debug):");
652 			rcu_for_each_node_breadth_first(rnp) {
653 				if (rnp == rnp_root)
654 					continue; /* printed unconditionally */
655 				if (sync_rcu_exp_done_unlocked(rnp))
656 					continue;
657 				pr_cont(" l=%u:%d-%d:%#lx/%c",
658 					rnp->level, rnp->grplo, rnp->grphi,
659 					data_race(rnp->expmask),
660 					".T"[!!data_race(rnp->exp_tasks)]);
661 			}
662 			pr_cont("\n");
663 		}
664 		rcu_for_each_leaf_node(rnp) {
665 			for_each_leaf_node_possible_cpu(rnp, cpu) {
666 				mask = leaf_node_cpu_bit(rnp, cpu);
667 				if (!(READ_ONCE(rnp->expmask) & mask))
668 					continue;
669 				preempt_disable(); // For smp_processor_id() in dump_cpu_task().
670 				dump_cpu_task(cpu);
671 				preempt_enable();
672 			}
673 		}
674 		jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
675 		panic_on_rcu_stall();
676 	}
677 }
678 
679 /*
680  * Wait for the current expedited grace period to complete, and then
681  * wake up everyone who piggybacked on the just-completed expedited
682  * grace period.  Also update all the ->exp_seq_rq counters as needed
683  * in order to avoid counter-wrap problems.
684  */
rcu_exp_wait_wake(unsigned long s)685 static void rcu_exp_wait_wake(unsigned long s)
686 {
687 	struct rcu_node *rnp;
688 
689 	synchronize_rcu_expedited_wait();
690 
691 	// Switch over to wakeup mode, allowing the next GP to proceed.
692 	// End the previous grace period only after acquiring the mutex
693 	// to ensure that only one GP runs concurrently with wakeups.
694 	mutex_lock(&rcu_state.exp_wake_mutex);
695 	rcu_exp_gp_seq_end();
696 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
697 
698 	rcu_for_each_node_breadth_first(rnp) {
699 		if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
700 			spin_lock(&rnp->exp_lock);
701 			/* Recheck, avoid hang in case someone just arrived. */
702 			if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
703 				WRITE_ONCE(rnp->exp_seq_rq, s);
704 			spin_unlock(&rnp->exp_lock);
705 		}
706 		smp_mb(); /* All above changes before wakeup. */
707 		wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
708 	}
709 	trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
710 	mutex_unlock(&rcu_state.exp_wake_mutex);
711 }
712 
713 /*
714  * Common code to drive an expedited grace period forward, used by
715  * workqueues and mid-boot-time tasks.
716  */
rcu_exp_sel_wait_wake(unsigned long s)717 static void rcu_exp_sel_wait_wake(unsigned long s)
718 {
719 	/* Initialize the rcu_node tree in preparation for the wait. */
720 	sync_rcu_exp_select_cpus();
721 
722 	/* Wait and clean up, including waking everyone. */
723 	rcu_exp_wait_wake(s);
724 }
725 
726 #ifdef CONFIG_PREEMPT_RCU
727 
728 /*
729  * Remote handler for smp_call_function_single().  If there is an
730  * RCU read-side critical section in effect, request that the
731  * next rcu_read_unlock() record the quiescent state up the
732  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
733  * report the quiescent state.
734  */
rcu_exp_handler(void * unused)735 static void rcu_exp_handler(void *unused)
736 {
737 	int depth = rcu_preempt_depth();
738 	unsigned long flags;
739 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
740 	struct rcu_node *rnp = rdp->mynode;
741 	struct task_struct *t = current;
742 
743 	/*
744 	 * First, the common case of not being in an RCU read-side
745 	 * critical section.  If also enabled or idle, immediately
746 	 * report the quiescent state, otherwise defer.
747 	 */
748 	if (!depth) {
749 		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
750 		    rcu_is_cpu_rrupt_from_idle()) {
751 			rcu_report_exp_rdp(rdp);
752 		} else {
753 			WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
754 			set_tsk_need_resched(t);
755 			set_preempt_need_resched();
756 		}
757 		return;
758 	}
759 
760 	/*
761 	 * Second, the less-common case of being in an RCU read-side
762 	 * critical section.  In this case we can count on a future
763 	 * rcu_read_unlock().  However, this rcu_read_unlock() might
764 	 * execute on some other CPU, but in that case there will be
765 	 * a future context switch.  Either way, if the expedited
766 	 * grace period is still waiting on this CPU, set ->deferred_qs
767 	 * so that the eventual quiescent state will be reported.
768 	 * Note that there is a large group of race conditions that
769 	 * can have caused this quiescent state to already have been
770 	 * reported, so we really do need to check ->expmask.
771 	 */
772 	if (depth > 0) {
773 		raw_spin_lock_irqsave_rcu_node(rnp, flags);
774 		if (rnp->expmask & rdp->grpmask) {
775 			WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
776 			t->rcu_read_unlock_special.b.exp_hint = true;
777 		}
778 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
779 		return;
780 	}
781 
782 	// Finally, negative nesting depth should not happen.
783 	WARN_ON_ONCE(1);
784 }
785 
786 /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
sync_sched_exp_online_cleanup(int cpu)787 static void sync_sched_exp_online_cleanup(int cpu)
788 {
789 }
790 
791 /*
792  * Scan the current list of tasks blocked within RCU read-side critical
793  * sections, printing out the tid of each that is blocking the current
794  * expedited grace period.
795  */
rcu_print_task_exp_stall(struct rcu_node * rnp)796 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
797 {
798 	unsigned long flags;
799 	int ndetected = 0;
800 	struct task_struct *t;
801 
802 	raw_spin_lock_irqsave_rcu_node(rnp, flags);
803 	if (!rnp->exp_tasks) {
804 		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
805 		return 0;
806 	}
807 	t = list_entry(rnp->exp_tasks->prev,
808 		       struct task_struct, rcu_node_entry);
809 	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
810 		pr_cont(" P%d", t->pid);
811 		ndetected++;
812 	}
813 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
814 	return ndetected;
815 }
816 
817 #else /* #ifdef CONFIG_PREEMPT_RCU */
818 
819 /* Request an expedited quiescent state. */
rcu_exp_need_qs(void)820 static void rcu_exp_need_qs(void)
821 {
822 	__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
823 	/* Store .exp before .rcu_urgent_qs. */
824 	smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
825 	set_tsk_need_resched(current);
826 	set_preempt_need_resched();
827 }
828 
829 /* Invoked on each online non-idle CPU for expedited quiescent state. */
rcu_exp_handler(void * unused)830 static void rcu_exp_handler(void *unused)
831 {
832 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
833 	struct rcu_node *rnp = rdp->mynode;
834 	bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
835 
836 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
837 	    __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
838 		return;
839 	if (rcu_is_cpu_rrupt_from_idle() ||
840 	    (IS_ENABLED(CONFIG_PREEMPT_COUNT) && preempt_bh_enabled)) {
841 		rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
842 		return;
843 	}
844 	rcu_exp_need_qs();
845 }
846 
847 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
sync_sched_exp_online_cleanup(int cpu)848 static void sync_sched_exp_online_cleanup(int cpu)
849 {
850 	unsigned long flags;
851 	int my_cpu;
852 	struct rcu_data *rdp;
853 	int ret;
854 	struct rcu_node *rnp;
855 
856 	rdp = per_cpu_ptr(&rcu_data, cpu);
857 	rnp = rdp->mynode;
858 	my_cpu = get_cpu();
859 	/* Quiescent state either not needed or already requested, leave. */
860 	if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
861 	    READ_ONCE(rdp->cpu_no_qs.b.exp)) {
862 		put_cpu();
863 		return;
864 	}
865 	/* Quiescent state needed on current CPU, so set it up locally. */
866 	if (my_cpu == cpu) {
867 		local_irq_save(flags);
868 		rcu_exp_need_qs();
869 		local_irq_restore(flags);
870 		put_cpu();
871 		return;
872 	}
873 	/* Quiescent state needed on some other CPU, send IPI. */
874 	ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
875 	put_cpu();
876 	WARN_ON_ONCE(ret);
877 }
878 
879 /*
880  * Because preemptible RCU does not exist, we never have to check for
881  * tasks blocked within RCU read-side critical sections that are
882  * blocking the current expedited grace period.
883  */
rcu_print_task_exp_stall(struct rcu_node * rnp)884 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
885 {
886 	return 0;
887 }
888 
889 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
890 
891 /**
892  * synchronize_rcu_expedited - Brute-force RCU grace period
893  *
894  * Wait for an RCU grace period, but expedite it.  The basic idea is to
895  * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
896  * the CPU is in an RCU critical section, and if so, it sets a flag that
897  * causes the outermost rcu_read_unlock() to report the quiescent state
898  * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
899  * other hand, if the CPU is not in an RCU read-side critical section,
900  * the IPI handler reports the quiescent state immediately.
901  *
902  * Although this is a great improvement over previous expedited
903  * implementations, it is still unfriendly to real-time workloads, so is
904  * thus not recommended for any sort of common-case code.  In fact, if
905  * you are using synchronize_rcu_expedited() in a loop, please restructure
906  * your code to batch your updates, and then use a single synchronize_rcu()
907  * instead.
908  *
909  * This has the same semantics as (but is more brutal than) synchronize_rcu().
910  */
synchronize_rcu_expedited(void)911 void synchronize_rcu_expedited(void)
912 {
913 	bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
914 	unsigned long flags;
915 	struct rcu_exp_work rew;
916 	struct rcu_node *rnp;
917 	unsigned long s;
918 
919 	RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
920 			 lock_is_held(&rcu_lock_map) ||
921 			 lock_is_held(&rcu_sched_lock_map),
922 			 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
923 
924 	/* Is the state is such that the call is a grace period? */
925 	if (rcu_blocking_is_gp()) {
926 		// Note well that this code runs with !PREEMPT && !SMP.
927 		// In addition, all code that advances grace periods runs
928 		// at process level.  Therefore, this expedited GP overlaps
929 		// with other expedited GPs only by being fully nested within
930 		// them, which allows reuse of ->gp_seq_polled_exp_snap.
931 		rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
932 		rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
933 
934 		local_irq_save(flags);
935 		WARN_ON_ONCE(num_online_cpus() > 1);
936 		rcu_state.expedited_sequence += (1 << RCU_SEQ_CTR_SHIFT);
937 		local_irq_restore(flags);
938 		return;  // Context allows vacuous grace periods.
939 	}
940 
941 	/* If expedited grace periods are prohibited, fall back to normal. */
942 	if (rcu_gp_is_normal()) {
943 		wait_rcu_gp(call_rcu_hurry);
944 		return;
945 	}
946 
947 	/* Take a snapshot of the sequence number.  */
948 	s = rcu_exp_gp_seq_snap();
949 	if (exp_funnel_lock(s))
950 		return;  /* Someone else did our work for us. */
951 
952 	/* Ensure that load happens before action based on it. */
953 	if (unlikely(boottime)) {
954 		/* Direct call during scheduler init and early_initcalls(). */
955 		rcu_exp_sel_wait_wake(s);
956 	} else {
957 		/* Marshall arguments & schedule the expedited grace period. */
958 		rew.rew_s = s;
959 		synchronize_rcu_expedited_queue_work(&rew);
960 	}
961 
962 	/* Wait for expedited grace period to complete. */
963 	rnp = rcu_get_root();
964 	wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
965 		   sync_exp_work_done(s));
966 	smp_mb(); /* Work actions happen before return. */
967 
968 	/* Let the next expedited grace period start. */
969 	mutex_unlock(&rcu_state.exp_mutex);
970 
971 	if (likely(!boottime))
972 		synchronize_rcu_expedited_destroy_work(&rew);
973 }
974 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
975 
976 /*
977  * Ensure that start_poll_synchronize_rcu_expedited() has the expedited
978  * RCU grace periods that it needs.
979  */
sync_rcu_do_polled_gp(struct work_struct * wp)980 static void sync_rcu_do_polled_gp(struct work_struct *wp)
981 {
982 	unsigned long flags;
983 	int i = 0;
984 	struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq);
985 	unsigned long s;
986 
987 	raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
988 	s = rnp->exp_seq_poll_rq;
989 	rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
990 	raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
991 	if (s == RCU_GET_STATE_COMPLETED)
992 		return;
993 	while (!poll_state_synchronize_rcu(s)) {
994 		synchronize_rcu_expedited();
995 		if (i == 10 || i == 20)
996 			pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled));
997 		i++;
998 	}
999 	raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1000 	s = rnp->exp_seq_poll_rq;
1001 	if (poll_state_synchronize_rcu(s))
1002 		rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1003 	raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1004 }
1005 
1006 /**
1007  * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period
1008  *
1009  * Returns a cookie to pass to a call to cond_synchronize_rcu(),
1010  * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(),
1011  * allowing them to determine whether or not any sort of grace period has
1012  * elapsed in the meantime.  If the needed expedited grace period is not
1013  * already slated to start, initiates that grace period.
1014  */
start_poll_synchronize_rcu_expedited(void)1015 unsigned long start_poll_synchronize_rcu_expedited(void)
1016 {
1017 	unsigned long flags;
1018 	struct rcu_data *rdp;
1019 	struct rcu_node *rnp;
1020 	unsigned long s;
1021 
1022 	s = get_state_synchronize_rcu();
1023 	rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
1024 	rnp = rdp->mynode;
1025 	if (rcu_init_invoked())
1026 		raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1027 	if (!poll_state_synchronize_rcu(s)) {
1028 		rnp->exp_seq_poll_rq = s;
1029 		if (rcu_init_invoked())
1030 			queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
1031 	}
1032 	if (rcu_init_invoked())
1033 		raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1034 
1035 	return s;
1036 }
1037 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited);
1038 
1039 /**
1040  * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period
1041  * @rgosp: Place to put snapshot of grace-period state
1042  *
1043  * Places the normal and expedited grace-period states in rgosp.  This
1044  * state value can be passed to a later call to cond_synchronize_rcu_full()
1045  * or poll_state_synchronize_rcu_full() to determine whether or not a
1046  * grace period (whether normal or expedited) has elapsed in the meantime.
1047  * If the needed expedited grace period is not already slated to start,
1048  * initiates that grace period.
1049  */
start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate * rgosp)1050 void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1051 {
1052 	get_state_synchronize_rcu_full(rgosp);
1053 	(void)start_poll_synchronize_rcu_expedited();
1054 }
1055 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited_full);
1056 
1057 /**
1058  * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period
1059  *
1060  * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
1061  *
1062  * If any type of full RCU grace period has elapsed since the earlier
1063  * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(),
1064  * or start_poll_synchronize_rcu_expedited(), just return.  Otherwise,
1065  * invoke synchronize_rcu_expedited() to wait for a full grace period.
1066  *
1067  * Yes, this function does not take counter wrap into account.
1068  * But counter wrap is harmless.  If the counter wraps, we have waited for
1069  * more than 2 billion grace periods (and way more on a 64-bit system!),
1070  * so waiting for a couple of additional grace periods should be just fine.
1071  *
1072  * This function provides the same memory-ordering guarantees that
1073  * would be provided by a synchronize_rcu() that was invoked at the call
1074  * to the function that provided @oldstate and that returned at the end
1075  * of this function.
1076  */
cond_synchronize_rcu_expedited(unsigned long oldstate)1077 void cond_synchronize_rcu_expedited(unsigned long oldstate)
1078 {
1079 	if (!poll_state_synchronize_rcu(oldstate))
1080 		synchronize_rcu_expedited();
1081 }
1082 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited);
1083 
1084 /**
1085  * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period
1086  * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
1087  *
1088  * If a full RCU grace period has elapsed since the call to
1089  * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
1090  * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
1091  * obtained, just return.  Otherwise, invoke synchronize_rcu_expedited()
1092  * to wait for a full grace period.
1093  *
1094  * Yes, this function does not take counter wrap into account.
1095  * But counter wrap is harmless.  If the counter wraps, we have waited for
1096  * more than 2 billion grace periods (and way more on a 64-bit system!),
1097  * so waiting for a couple of additional grace periods should be just fine.
1098  *
1099  * This function provides the same memory-ordering guarantees that
1100  * would be provided by a synchronize_rcu() that was invoked at the call
1101  * to the function that provided @rgosp and that returned at the end of
1102  * this function.
1103  */
cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate * rgosp)1104 void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1105 {
1106 	if (!poll_state_synchronize_rcu_full(rgosp))
1107 		synchronize_rcu_expedited();
1108 }
1109 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited_full);
1110