• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Sleepable Read-Copy Update mechanism for mutual exclusion.
4  *
5  * Copyright (C) IBM Corporation, 2006
6  * Copyright (C) Fujitsu, 2012
7  *
8  * Authors: Paul McKenney <paulmck@linux.ibm.com>
9  *	   Lai Jiangshan <laijs@cn.fujitsu.com>
10  *
11  * For detailed explanation of Read-Copy Update mechanism see -
12  *		Documentation/RCU/ *.txt
13  *
14  */
15 
16 #define pr_fmt(fmt) "rcu: " fmt
17 
18 #include <linux/export.h>
19 #include <linux/mutex.h>
20 #include <linux/percpu.h>
21 #include <linux/preempt.h>
22 #include <linux/rcupdate_wait.h>
23 #include <linux/sched.h>
24 #include <linux/smp.h>
25 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/srcu.h>
28 
29 #include "rcu.h"
30 #include "rcu_segcblist.h"
31 
32 /* Holdoff in nanoseconds for auto-expediting. */
33 #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
34 static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
35 module_param(exp_holdoff, ulong, 0444);
36 
37 /* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
38 static ulong counter_wrap_check = (ULONG_MAX >> 2);
39 module_param(counter_wrap_check, ulong, 0444);
40 
41 /* Early-boot callback-management, so early that no lock is required! */
42 static LIST_HEAD(srcu_boot_list);
43 static bool __read_mostly srcu_init_done;
44 
45 static void srcu_invoke_callbacks(struct work_struct *work);
46 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
47 static void process_srcu(struct work_struct *work);
48 static void srcu_delay_timer(struct timer_list *t);
49 
50 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
51 #define spin_lock_rcu_node(p)					\
52 do {									\
53 	spin_lock(&ACCESS_PRIVATE(p, lock));			\
54 	smp_mb__after_unlock_lock();					\
55 } while (0)
56 
57 #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
58 
59 #define spin_lock_irq_rcu_node(p)					\
60 do {									\
61 	spin_lock_irq(&ACCESS_PRIVATE(p, lock));			\
62 	smp_mb__after_unlock_lock();					\
63 } while (0)
64 
65 #define spin_unlock_irq_rcu_node(p)					\
66 	spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
67 
68 #define spin_lock_irqsave_rcu_node(p, flags)			\
69 do {									\
70 	spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);	\
71 	smp_mb__after_unlock_lock();					\
72 } while (0)
73 
74 #define spin_unlock_irqrestore_rcu_node(p, flags)			\
75 	spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)	\
76 
77 /*
78  * Initialize SRCU combining tree.  Note that statically allocated
79  * srcu_struct structures might already have srcu_read_lock() and
80  * srcu_read_unlock() running against them.  So if the is_static parameter
81  * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
82  */
init_srcu_struct_nodes(struct srcu_struct * ssp,bool is_static)83 static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
84 {
85 	int cpu;
86 	int i;
87 	int level = 0;
88 	int levelspread[RCU_NUM_LVLS];
89 	struct srcu_data *sdp;
90 	struct srcu_node *snp;
91 	struct srcu_node *snp_first;
92 
93 	/* Initialize geometry if it has not already been initialized. */
94 	rcu_init_geometry();
95 
96 	/* Work out the overall tree geometry. */
97 	ssp->level[0] = &ssp->node[0];
98 	for (i = 1; i < rcu_num_lvls; i++)
99 		ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
100 	rcu_init_levelspread(levelspread, num_rcu_lvl);
101 
102 	/* Each pass through this loop initializes one srcu_node structure. */
103 	srcu_for_each_node_breadth_first(ssp, snp) {
104 		spin_lock_init(&ACCESS_PRIVATE(snp, lock));
105 		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
106 			     ARRAY_SIZE(snp->srcu_data_have_cbs));
107 		for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
108 			snp->srcu_have_cbs[i] = 0;
109 			snp->srcu_data_have_cbs[i] = 0;
110 		}
111 		snp->srcu_gp_seq_needed_exp = 0;
112 		snp->grplo = -1;
113 		snp->grphi = -1;
114 		if (snp == &ssp->node[0]) {
115 			/* Root node, special case. */
116 			snp->srcu_parent = NULL;
117 			continue;
118 		}
119 
120 		/* Non-root node. */
121 		if (snp == ssp->level[level + 1])
122 			level++;
123 		snp->srcu_parent = ssp->level[level - 1] +
124 				   (snp - ssp->level[level]) /
125 				   levelspread[level - 1];
126 	}
127 
128 	/*
129 	 * Initialize the per-CPU srcu_data array, which feeds into the
130 	 * leaves of the srcu_node tree.
131 	 */
132 	WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
133 		     ARRAY_SIZE(sdp->srcu_unlock_count));
134 	level = rcu_num_lvls - 1;
135 	snp_first = ssp->level[level];
136 	for_each_possible_cpu(cpu) {
137 		sdp = per_cpu_ptr(ssp->sda, cpu);
138 		spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
139 		rcu_segcblist_init(&sdp->srcu_cblist);
140 		sdp->srcu_cblist_invoking = false;
141 		sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
142 		sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
143 		sdp->mynode = &snp_first[cpu / levelspread[level]];
144 		for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
145 			if (snp->grplo < 0)
146 				snp->grplo = cpu;
147 			snp->grphi = cpu;
148 		}
149 		sdp->cpu = cpu;
150 		INIT_WORK(&sdp->work, srcu_invoke_callbacks);
151 		timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
152 		sdp->ssp = ssp;
153 		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
154 		if (is_static)
155 			continue;
156 
157 		/* Dynamically allocated, better be no srcu_read_locks()! */
158 		for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
159 			sdp->srcu_lock_count[i] = 0;
160 			sdp->srcu_unlock_count[i] = 0;
161 		}
162 	}
163 }
164 
165 /*
166  * Initialize non-compile-time initialized fields, including the
167  * associated srcu_node and srcu_data structures.  The is_static
168  * parameter is passed through to init_srcu_struct_nodes(), and
169  * also tells us that ->sda has already been wired up to srcu_data.
170  */
init_srcu_struct_fields(struct srcu_struct * ssp,bool is_static)171 static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
172 {
173 	mutex_init(&ssp->srcu_cb_mutex);
174 	mutex_init(&ssp->srcu_gp_mutex);
175 	ssp->srcu_idx = 0;
176 	ssp->srcu_gp_seq = 0;
177 	ssp->srcu_barrier_seq = 0;
178 	mutex_init(&ssp->srcu_barrier_mutex);
179 	atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
180 	INIT_DELAYED_WORK(&ssp->work, process_srcu);
181 	if (!is_static)
182 		ssp->sda = alloc_percpu(struct srcu_data);
183 	if (!ssp->sda)
184 		return -ENOMEM;
185 	init_srcu_struct_nodes(ssp, is_static);
186 	ssp->srcu_gp_seq_needed_exp = 0;
187 	ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
188 	smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
189 	return 0;
190 }
191 
192 #ifdef CONFIG_DEBUG_LOCK_ALLOC
193 
__init_srcu_struct(struct srcu_struct * ssp,const char * name,struct lock_class_key * key)194 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
195 		       struct lock_class_key *key)
196 {
197 	/* Don't re-initialize a lock while it is held. */
198 	debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
199 	lockdep_init_map(&ssp->dep_map, name, key, 0);
200 	spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
201 	return init_srcu_struct_fields(ssp, false);
202 }
203 EXPORT_SYMBOL_GPL(__init_srcu_struct);
204 
205 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
206 
207 /**
208  * init_srcu_struct - initialize a sleep-RCU structure
209  * @ssp: structure to initialize.
210  *
211  * Must invoke this on a given srcu_struct before passing that srcu_struct
212  * to any other function.  Each srcu_struct represents a separate domain
213  * of SRCU protection.
214  */
init_srcu_struct(struct srcu_struct * ssp)215 int init_srcu_struct(struct srcu_struct *ssp)
216 {
217 	spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
218 	return init_srcu_struct_fields(ssp, false);
219 }
220 EXPORT_SYMBOL_GPL(init_srcu_struct);
221 
222 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
223 
224 /*
225  * First-use initialization of statically allocated srcu_struct
226  * structure.  Wiring up the combining tree is more than can be
227  * done with compile-time initialization, so this check is added
228  * to each update-side SRCU primitive.  Use ssp->lock, which -is-
229  * compile-time initialized, to resolve races involving multiple
230  * CPUs trying to garner first-use privileges.
231  */
check_init_srcu_struct(struct srcu_struct * ssp)232 static void check_init_srcu_struct(struct srcu_struct *ssp)
233 {
234 	unsigned long flags;
235 
236 	/* The smp_load_acquire() pairs with the smp_store_release(). */
237 	if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
238 		return; /* Already initialized. */
239 	spin_lock_irqsave_rcu_node(ssp, flags);
240 	if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
241 		spin_unlock_irqrestore_rcu_node(ssp, flags);
242 		return;
243 	}
244 	init_srcu_struct_fields(ssp, true);
245 	spin_unlock_irqrestore_rcu_node(ssp, flags);
246 }
247 
248 /*
249  * Returns approximate total of the readers' ->srcu_lock_count[] values
250  * for the rank of per-CPU counters specified by idx.
251  */
srcu_readers_lock_idx(struct srcu_struct * ssp,int idx)252 static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
253 {
254 	int cpu;
255 	unsigned long sum = 0;
256 
257 	for_each_possible_cpu(cpu) {
258 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
259 
260 		sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
261 	}
262 	return sum;
263 }
264 
265 /*
266  * Returns approximate total of the readers' ->srcu_unlock_count[] values
267  * for the rank of per-CPU counters specified by idx.
268  */
srcu_readers_unlock_idx(struct srcu_struct * ssp,int idx)269 static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
270 {
271 	int cpu;
272 	unsigned long sum = 0;
273 
274 	for_each_possible_cpu(cpu) {
275 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
276 
277 		sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
278 	}
279 	return sum;
280 }
281 
282 /*
283  * Return true if the number of pre-existing readers is determined to
284  * be zero.
285  */
srcu_readers_active_idx_check(struct srcu_struct * ssp,int idx)286 static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
287 {
288 	unsigned long unlocks;
289 
290 	unlocks = srcu_readers_unlock_idx(ssp, idx);
291 
292 	/*
293 	 * Make sure that a lock is always counted if the corresponding
294 	 * unlock is counted. Needs to be a smp_mb() as the read side may
295 	 * contain a read from a variable that is written to before the
296 	 * synchronize_srcu() in the write side. In this case smp_mb()s
297 	 * A and B act like the store buffering pattern.
298 	 *
299 	 * This smp_mb() also pairs with smp_mb() C to prevent accesses
300 	 * after the synchronize_srcu() from being executed before the
301 	 * grace period ends.
302 	 */
303 	smp_mb(); /* A */
304 
305 	/*
306 	 * If the locks are the same as the unlocks, then there must have
307 	 * been no readers on this index at some time in between. This does
308 	 * not mean that there are no more readers, as one could have read
309 	 * the current index but not have incremented the lock counter yet.
310 	 *
311 	 * So suppose that the updater is preempted here for so long
312 	 * that more than ULONG_MAX non-nested readers come and go in
313 	 * the meantime.  It turns out that this cannot result in overflow
314 	 * because if a reader modifies its unlock count after we read it
315 	 * above, then that reader's next load of ->srcu_idx is guaranteed
316 	 * to get the new value, which will cause it to operate on the
317 	 * other bank of counters, where it cannot contribute to the
318 	 * overflow of these counters.  This means that there is a maximum
319 	 * of 2*NR_CPUS increments, which cannot overflow given current
320 	 * systems, especially not on 64-bit systems.
321 	 *
322 	 * OK, how about nesting?  This does impose a limit on nesting
323 	 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
324 	 * especially on 64-bit systems.
325 	 */
326 	return srcu_readers_lock_idx(ssp, idx) == unlocks;
327 }
328 
329 /**
330  * srcu_readers_active - returns true if there are readers. and false
331  *                       otherwise
332  * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
333  *
334  * Note that this is not an atomic primitive, and can therefore suffer
335  * severe errors when invoked on an active srcu_struct.  That said, it
336  * can be useful as an error check at cleanup time.
337  */
srcu_readers_active(struct srcu_struct * ssp)338 static bool srcu_readers_active(struct srcu_struct *ssp)
339 {
340 	int cpu;
341 	unsigned long sum = 0;
342 
343 	for_each_possible_cpu(cpu) {
344 		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
345 
346 		sum += READ_ONCE(cpuc->srcu_lock_count[0]);
347 		sum += READ_ONCE(cpuc->srcu_lock_count[1]);
348 		sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
349 		sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
350 	}
351 	return sum;
352 }
353 
354 #define SRCU_INTERVAL		1
355 
356 /*
357  * Return grace-period delay, zero if there are expedited grace
358  * periods pending, SRCU_INTERVAL otherwise.
359  */
srcu_get_delay(struct srcu_struct * ssp)360 static unsigned long srcu_get_delay(struct srcu_struct *ssp)
361 {
362 	if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
363 			 READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
364 		return 0;
365 	return SRCU_INTERVAL;
366 }
367 
368 /**
369  * cleanup_srcu_struct - deconstruct a sleep-RCU structure
370  * @ssp: structure to clean up.
371  *
372  * Must invoke this after you are finished using a given srcu_struct that
373  * was initialized via init_srcu_struct(), else you leak memory.
374  */
cleanup_srcu_struct(struct srcu_struct * ssp)375 void cleanup_srcu_struct(struct srcu_struct *ssp)
376 {
377 	int cpu;
378 
379 	if (WARN_ON(!srcu_get_delay(ssp)))
380 		return; /* Just leak it! */
381 	if (WARN_ON(srcu_readers_active(ssp)))
382 		return; /* Just leak it! */
383 	flush_delayed_work(&ssp->work);
384 	for_each_possible_cpu(cpu) {
385 		struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
386 
387 		del_timer_sync(&sdp->delay_work);
388 		flush_work(&sdp->work);
389 		if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
390 			return; /* Forgot srcu_barrier(), so just leak it! */
391 	}
392 	if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
393 	    WARN_ON(srcu_readers_active(ssp))) {
394 		pr_info("%s: Active srcu_struct %p state: %d\n",
395 			__func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
396 		return; /* Caller forgot to stop doing call_srcu()? */
397 	}
398 	free_percpu(ssp->sda);
399 	ssp->sda = NULL;
400 }
401 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
402 
403 /*
404  * Counts the new reader in the appropriate per-CPU element of the
405  * srcu_struct.
406  * Returns an index that must be passed to the matching srcu_read_unlock().
407  */
__srcu_read_lock(struct srcu_struct * ssp)408 int __srcu_read_lock(struct srcu_struct *ssp)
409 {
410 	int idx;
411 
412 	idx = READ_ONCE(ssp->srcu_idx) & 0x1;
413 	this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
414 	smp_mb(); /* B */  /* Avoid leaking the critical section. */
415 	return idx;
416 }
417 EXPORT_SYMBOL_GPL(__srcu_read_lock);
418 
419 /*
420  * Removes the count for the old reader from the appropriate per-CPU
421  * element of the srcu_struct.  Note that this may well be a different
422  * CPU than that which was incremented by the corresponding srcu_read_lock().
423  */
__srcu_read_unlock(struct srcu_struct * ssp,int idx)424 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
425 {
426 	smp_mb(); /* C */  /* Avoid leaking the critical section. */
427 	this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
428 }
429 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
430 
431 /*
432  * We use an adaptive strategy for synchronize_srcu() and especially for
433  * synchronize_srcu_expedited().  We spin for a fixed time period
434  * (defined below) to allow SRCU readers to exit their read-side critical
435  * sections.  If there are still some readers after a few microseconds,
436  * we repeatedly block for 1-millisecond time periods.
437  */
438 #define SRCU_RETRY_CHECK_DELAY		5
439 
440 /*
441  * Start an SRCU grace period.
442  */
srcu_gp_start(struct srcu_struct * ssp)443 static void srcu_gp_start(struct srcu_struct *ssp)
444 {
445 	struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
446 	int state;
447 
448 	lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
449 	WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
450 	spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
451 	rcu_segcblist_advance(&sdp->srcu_cblist,
452 			      rcu_seq_current(&ssp->srcu_gp_seq));
453 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
454 				       rcu_seq_snap(&ssp->srcu_gp_seq));
455 	spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
456 	smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
457 	rcu_seq_start(&ssp->srcu_gp_seq);
458 	state = rcu_seq_state(ssp->srcu_gp_seq);
459 	WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
460 }
461 
462 
srcu_delay_timer(struct timer_list * t)463 static void srcu_delay_timer(struct timer_list *t)
464 {
465 	struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
466 
467 	queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
468 }
469 
srcu_queue_delayed_work_on(struct srcu_data * sdp,unsigned long delay)470 static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
471 				       unsigned long delay)
472 {
473 	if (!delay) {
474 		queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
475 		return;
476 	}
477 
478 	timer_reduce(&sdp->delay_work, jiffies + delay);
479 }
480 
481 /*
482  * Schedule callback invocation for the specified srcu_data structure,
483  * if possible, on the corresponding CPU.
484  */
srcu_schedule_cbs_sdp(struct srcu_data * sdp,unsigned long delay)485 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
486 {
487 	srcu_queue_delayed_work_on(sdp, delay);
488 }
489 
490 /*
491  * Schedule callback invocation for all srcu_data structures associated
492  * with the specified srcu_node structure that have callbacks for the
493  * just-completed grace period, the one corresponding to idx.  If possible,
494  * schedule this invocation on the corresponding CPUs.
495  */
srcu_schedule_cbs_snp(struct srcu_struct * ssp,struct srcu_node * snp,unsigned long mask,unsigned long delay)496 static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
497 				  unsigned long mask, unsigned long delay)
498 {
499 	int cpu;
500 
501 	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
502 		if (!(mask & (1 << (cpu - snp->grplo))))
503 			continue;
504 		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
505 	}
506 }
507 
508 /*
509  * Note the end of an SRCU grace period.  Initiates callback invocation
510  * and starts a new grace period if needed.
511  *
512  * The ->srcu_cb_mutex acquisition does not protect any data, but
513  * instead prevents more than one grace period from starting while we
514  * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
515  * array to have a finite number of elements.
516  */
srcu_gp_end(struct srcu_struct * ssp)517 static void srcu_gp_end(struct srcu_struct *ssp)
518 {
519 	unsigned long cbdelay;
520 	bool cbs;
521 	bool last_lvl;
522 	int cpu;
523 	unsigned long flags;
524 	unsigned long gpseq;
525 	int idx;
526 	unsigned long mask;
527 	struct srcu_data *sdp;
528 	struct srcu_node *snp;
529 
530 	/* Prevent more than one additional grace period. */
531 	mutex_lock(&ssp->srcu_cb_mutex);
532 
533 	/* End the current grace period. */
534 	spin_lock_irq_rcu_node(ssp);
535 	idx = rcu_seq_state(ssp->srcu_gp_seq);
536 	WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
537 	cbdelay = srcu_get_delay(ssp);
538 	WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
539 	rcu_seq_end(&ssp->srcu_gp_seq);
540 	gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
541 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
542 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
543 	spin_unlock_irq_rcu_node(ssp);
544 	mutex_unlock(&ssp->srcu_gp_mutex);
545 	/* A new grace period can start at this point.  But only one. */
546 
547 	/* Initiate callback invocation as needed. */
548 	idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
549 	srcu_for_each_node_breadth_first(ssp, snp) {
550 		spin_lock_irq_rcu_node(snp);
551 		cbs = false;
552 		last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
553 		if (last_lvl)
554 			cbs = snp->srcu_have_cbs[idx] == gpseq;
555 		snp->srcu_have_cbs[idx] = gpseq;
556 		rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
557 		if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
558 			WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
559 		mask = snp->srcu_data_have_cbs[idx];
560 		snp->srcu_data_have_cbs[idx] = 0;
561 		spin_unlock_irq_rcu_node(snp);
562 		if (cbs)
563 			srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
564 
565 		/* Occasionally prevent srcu_data counter wrap. */
566 		if (!(gpseq & counter_wrap_check) && last_lvl)
567 			for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
568 				sdp = per_cpu_ptr(ssp->sda, cpu);
569 				spin_lock_irqsave_rcu_node(sdp, flags);
570 				if (ULONG_CMP_GE(gpseq,
571 						 sdp->srcu_gp_seq_needed + 100))
572 					sdp->srcu_gp_seq_needed = gpseq;
573 				if (ULONG_CMP_GE(gpseq,
574 						 sdp->srcu_gp_seq_needed_exp + 100))
575 					sdp->srcu_gp_seq_needed_exp = gpseq;
576 				spin_unlock_irqrestore_rcu_node(sdp, flags);
577 			}
578 	}
579 
580 	/* Callback initiation done, allow grace periods after next. */
581 	mutex_unlock(&ssp->srcu_cb_mutex);
582 
583 	/* Start a new grace period if needed. */
584 	spin_lock_irq_rcu_node(ssp);
585 	gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
586 	if (!rcu_seq_state(gpseq) &&
587 	    ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
588 		srcu_gp_start(ssp);
589 		spin_unlock_irq_rcu_node(ssp);
590 		srcu_reschedule(ssp, 0);
591 	} else {
592 		spin_unlock_irq_rcu_node(ssp);
593 	}
594 }
595 
596 /*
597  * Funnel-locking scheme to scalably mediate many concurrent expedited
598  * grace-period requests.  This function is invoked for the first known
599  * expedited request for a grace period that has already been requested,
600  * but without expediting.  To start a completely new grace period,
601  * whether expedited or not, use srcu_funnel_gp_start() instead.
602  */
srcu_funnel_exp_start(struct srcu_struct * ssp,struct srcu_node * snp,unsigned long s)603 static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
604 				  unsigned long s)
605 {
606 	unsigned long flags;
607 
608 	for (; snp != NULL; snp = snp->srcu_parent) {
609 		if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
610 		    ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
611 			return;
612 		spin_lock_irqsave_rcu_node(snp, flags);
613 		if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
614 			spin_unlock_irqrestore_rcu_node(snp, flags);
615 			return;
616 		}
617 		WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
618 		spin_unlock_irqrestore_rcu_node(snp, flags);
619 	}
620 	spin_lock_irqsave_rcu_node(ssp, flags);
621 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
622 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
623 	spin_unlock_irqrestore_rcu_node(ssp, flags);
624 }
625 
626 /*
627  * Funnel-locking scheme to scalably mediate many concurrent grace-period
628  * requests.  The winner has to do the work of actually starting grace
629  * period s.  Losers must either ensure that their desired grace-period
630  * number is recorded on at least their leaf srcu_node structure, or they
631  * must take steps to invoke their own callbacks.
632  *
633  * Note that this function also does the work of srcu_funnel_exp_start(),
634  * in some cases by directly invoking it.
635  */
srcu_funnel_gp_start(struct srcu_struct * ssp,struct srcu_data * sdp,unsigned long s,bool do_norm)636 static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
637 				 unsigned long s, bool do_norm)
638 {
639 	unsigned long flags;
640 	int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
641 	struct srcu_node *snp = sdp->mynode;
642 	unsigned long snp_seq;
643 
644 	/* Each pass through the loop does one level of the srcu_node tree. */
645 	for (; snp != NULL; snp = snp->srcu_parent) {
646 		if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
647 			return; /* GP already done and CBs recorded. */
648 		spin_lock_irqsave_rcu_node(snp, flags);
649 		if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
650 			snp_seq = snp->srcu_have_cbs[idx];
651 			if (snp == sdp->mynode && snp_seq == s)
652 				snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
653 			spin_unlock_irqrestore_rcu_node(snp, flags);
654 			if (snp == sdp->mynode && snp_seq != s) {
655 				srcu_schedule_cbs_sdp(sdp, do_norm
656 							   ? SRCU_INTERVAL
657 							   : 0);
658 				return;
659 			}
660 			if (!do_norm)
661 				srcu_funnel_exp_start(ssp, snp, s);
662 			return;
663 		}
664 		snp->srcu_have_cbs[idx] = s;
665 		if (snp == sdp->mynode)
666 			snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
667 		if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
668 			WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
669 		spin_unlock_irqrestore_rcu_node(snp, flags);
670 	}
671 
672 	/* Top of tree, must ensure the grace period will be started. */
673 	spin_lock_irqsave_rcu_node(ssp, flags);
674 	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
675 		/*
676 		 * Record need for grace period s.  Pair with load
677 		 * acquire setting up for initialization.
678 		 */
679 		smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
680 	}
681 	if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
682 		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
683 
684 	/* If grace period not already done and none in progress, start it. */
685 	if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
686 	    rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
687 		WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
688 		srcu_gp_start(ssp);
689 		if (likely(srcu_init_done))
690 			queue_delayed_work(rcu_gp_wq, &ssp->work,
691 					   srcu_get_delay(ssp));
692 		else if (list_empty(&ssp->work.work.entry))
693 			list_add(&ssp->work.work.entry, &srcu_boot_list);
694 	}
695 	spin_unlock_irqrestore_rcu_node(ssp, flags);
696 }
697 
698 /*
699  * Wait until all readers counted by array index idx complete, but
700  * loop an additional time if there is an expedited grace period pending.
701  * The caller must ensure that ->srcu_idx is not changed while checking.
702  */
try_check_zero(struct srcu_struct * ssp,int idx,int trycount)703 static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
704 {
705 	for (;;) {
706 		if (srcu_readers_active_idx_check(ssp, idx))
707 			return true;
708 		if (--trycount + !srcu_get_delay(ssp) <= 0)
709 			return false;
710 		udelay(SRCU_RETRY_CHECK_DELAY);
711 	}
712 }
713 
714 /*
715  * Increment the ->srcu_idx counter so that future SRCU readers will
716  * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
717  * us to wait for pre-existing readers in a starvation-free manner.
718  */
srcu_flip(struct srcu_struct * ssp)719 static void srcu_flip(struct srcu_struct *ssp)
720 {
721 	/*
722 	 * Ensure that if this updater saw a given reader's increment
723 	 * from __srcu_read_lock(), that reader was using an old value
724 	 * of ->srcu_idx.  Also ensure that if a given reader sees the
725 	 * new value of ->srcu_idx, this updater's earlier scans cannot
726 	 * have seen that reader's increments (which is OK, because this
727 	 * grace period need not wait on that reader).
728 	 */
729 	smp_mb(); /* E */  /* Pairs with B and C. */
730 
731 	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
732 
733 	/*
734 	 * Ensure that if the updater misses an __srcu_read_unlock()
735 	 * increment, that task's next __srcu_read_lock() will see the
736 	 * above counter update.  Note that both this memory barrier
737 	 * and the one in srcu_readers_active_idx_check() provide the
738 	 * guarantee for __srcu_read_lock().
739 	 */
740 	smp_mb(); /* D */  /* Pairs with C. */
741 }
742 
743 /*
744  * If SRCU is likely idle, return true, otherwise return false.
745  *
746  * Note that it is OK for several current from-idle requests for a new
747  * grace period from idle to specify expediting because they will all end
748  * up requesting the same grace period anyhow.  So no loss.
749  *
750  * Note also that if any CPU (including the current one) is still invoking
751  * callbacks, this function will nevertheless say "idle".  This is not
752  * ideal, but the overhead of checking all CPUs' callback lists is even
753  * less ideal, especially on large systems.  Furthermore, the wakeup
754  * can happen before the callback is fully removed, so we have no choice
755  * but to accept this type of error.
756  *
757  * This function is also subject to counter-wrap errors, but let's face
758  * it, if this function was preempted for enough time for the counters
759  * to wrap, it really doesn't matter whether or not we expedite the grace
760  * period.  The extra overhead of a needlessly expedited grace period is
761  * negligible when amortized over that time period, and the extra latency
762  * of a needlessly non-expedited grace period is similarly negligible.
763  */
srcu_might_be_idle(struct srcu_struct * ssp)764 static bool srcu_might_be_idle(struct srcu_struct *ssp)
765 {
766 	unsigned long curseq;
767 	unsigned long flags;
768 	struct srcu_data *sdp;
769 	unsigned long t;
770 	unsigned long tlast;
771 
772 	check_init_srcu_struct(ssp);
773 	/* If the local srcu_data structure has callbacks, not idle.  */
774 	sdp = raw_cpu_ptr(ssp->sda);
775 	spin_lock_irqsave_rcu_node(sdp, flags);
776 	if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
777 		spin_unlock_irqrestore_rcu_node(sdp, flags);
778 		return false; /* Callbacks already present, so not idle. */
779 	}
780 	spin_unlock_irqrestore_rcu_node(sdp, flags);
781 
782 	/*
783 	 * No local callbacks, so probabalistically probe global state.
784 	 * Exact information would require acquiring locks, which would
785 	 * kill scalability, hence the probabalistic nature of the probe.
786 	 */
787 
788 	/* First, see if enough time has passed since the last GP. */
789 	t = ktime_get_mono_fast_ns();
790 	tlast = READ_ONCE(ssp->srcu_last_gp_end);
791 	if (exp_holdoff == 0 ||
792 	    time_in_range_open(t, tlast, tlast + exp_holdoff))
793 		return false; /* Too soon after last GP. */
794 
795 	/* Next, check for probable idleness. */
796 	curseq = rcu_seq_current(&ssp->srcu_gp_seq);
797 	smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
798 	if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
799 		return false; /* Grace period in progress, so not idle. */
800 	smp_mb(); /* Order ->srcu_gp_seq with prior access. */
801 	if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
802 		return false; /* GP # changed, so not idle. */
803 	return true; /* With reasonable probability, idle! */
804 }
805 
806 /*
807  * SRCU callback function to leak a callback.
808  */
srcu_leak_callback(struct rcu_head * rhp)809 static void srcu_leak_callback(struct rcu_head *rhp)
810 {
811 }
812 
813 /*
814  * Start an SRCU grace period, and also queue the callback if non-NULL.
815  */
srcu_gp_start_if_needed(struct srcu_struct * ssp,struct rcu_head * rhp,bool do_norm)816 static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
817 					     struct rcu_head *rhp, bool do_norm)
818 {
819 	unsigned long flags;
820 	int idx;
821 	bool needexp = false;
822 	bool needgp = false;
823 	unsigned long s;
824 	struct srcu_data *sdp;
825 
826 	check_init_srcu_struct(ssp);
827 	idx = srcu_read_lock(ssp);
828 	sdp = raw_cpu_ptr(ssp->sda);
829 	spin_lock_irqsave_rcu_node(sdp, flags);
830 	if (rhp)
831 		rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
832 	rcu_segcblist_advance(&sdp->srcu_cblist,
833 			      rcu_seq_current(&ssp->srcu_gp_seq));
834 	s = rcu_seq_snap(&ssp->srcu_gp_seq);
835 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
836 	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
837 		sdp->srcu_gp_seq_needed = s;
838 		needgp = true;
839 	}
840 	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
841 		sdp->srcu_gp_seq_needed_exp = s;
842 		needexp = true;
843 	}
844 	spin_unlock_irqrestore_rcu_node(sdp, flags);
845 	if (needgp)
846 		srcu_funnel_gp_start(ssp, sdp, s, do_norm);
847 	else if (needexp)
848 		srcu_funnel_exp_start(ssp, sdp->mynode, s);
849 	srcu_read_unlock(ssp, idx);
850 	return s;
851 }
852 
853 /*
854  * Enqueue an SRCU callback on the srcu_data structure associated with
855  * the current CPU and the specified srcu_struct structure, initiating
856  * grace-period processing if it is not already running.
857  *
858  * Note that all CPUs must agree that the grace period extended beyond
859  * all pre-existing SRCU read-side critical section.  On systems with
860  * more than one CPU, this means that when "func()" is invoked, each CPU
861  * is guaranteed to have executed a full memory barrier since the end of
862  * its last corresponding SRCU read-side critical section whose beginning
863  * preceded the call to call_srcu().  It also means that each CPU executing
864  * an SRCU read-side critical section that continues beyond the start of
865  * "func()" must have executed a memory barrier after the call_srcu()
866  * but before the beginning of that SRCU read-side critical section.
867  * Note that these guarantees include CPUs that are offline, idle, or
868  * executing in user mode, as well as CPUs that are executing in the kernel.
869  *
870  * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
871  * resulting SRCU callback function "func()", then both CPU A and CPU
872  * B are guaranteed to execute a full memory barrier during the time
873  * interval between the call to call_srcu() and the invocation of "func()".
874  * This guarantee applies even if CPU A and CPU B are the same CPU (but
875  * again only if the system has more than one CPU).
876  *
877  * Of course, these guarantees apply only for invocations of call_srcu(),
878  * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
879  * srcu_struct structure.
880  */
__call_srcu(struct srcu_struct * ssp,struct rcu_head * rhp,rcu_callback_t func,bool do_norm)881 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
882 			rcu_callback_t func, bool do_norm)
883 {
884 	if (debug_rcu_head_queue(rhp)) {
885 		/* Probable double call_srcu(), so leak the callback. */
886 		WRITE_ONCE(rhp->func, srcu_leak_callback);
887 		WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
888 		return;
889 	}
890 	rhp->func = func;
891 	(void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
892 }
893 
894 /**
895  * call_srcu() - Queue a callback for invocation after an SRCU grace period
896  * @ssp: srcu_struct in queue the callback
897  * @rhp: structure to be used for queueing the SRCU callback.
898  * @func: function to be invoked after the SRCU grace period
899  *
900  * The callback function will be invoked some time after a full SRCU
901  * grace period elapses, in other words after all pre-existing SRCU
902  * read-side critical sections have completed.  However, the callback
903  * function might well execute concurrently with other SRCU read-side
904  * critical sections that started after call_srcu() was invoked.  SRCU
905  * read-side critical sections are delimited by srcu_read_lock() and
906  * srcu_read_unlock(), and may be nested.
907  *
908  * The callback will be invoked from process context, but must nevertheless
909  * be fast and must not block.
910  */
call_srcu(struct srcu_struct * ssp,struct rcu_head * rhp,rcu_callback_t func)911 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
912 	       rcu_callback_t func)
913 {
914 	__call_srcu(ssp, rhp, func, true);
915 }
916 EXPORT_SYMBOL_GPL(call_srcu);
917 
918 /*
919  * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
920  */
__synchronize_srcu(struct srcu_struct * ssp,bool do_norm)921 static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
922 {
923 	struct rcu_synchronize rcu;
924 
925 	RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
926 			 lock_is_held(&rcu_bh_lock_map) ||
927 			 lock_is_held(&rcu_lock_map) ||
928 			 lock_is_held(&rcu_sched_lock_map),
929 			 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
930 
931 	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
932 		return;
933 	might_sleep();
934 	check_init_srcu_struct(ssp);
935 	init_completion(&rcu.completion);
936 	init_rcu_head_on_stack(&rcu.head);
937 	__call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
938 	wait_for_completion(&rcu.completion);
939 	destroy_rcu_head_on_stack(&rcu.head);
940 
941 	/*
942 	 * Make sure that later code is ordered after the SRCU grace
943 	 * period.  This pairs with the spin_lock_irq_rcu_node()
944 	 * in srcu_invoke_callbacks().  Unlike Tree RCU, this is needed
945 	 * because the current CPU might have been totally uninvolved with
946 	 * (and thus unordered against) that grace period.
947 	 */
948 	smp_mb();
949 }
950 
951 /**
952  * synchronize_srcu_expedited - Brute-force SRCU grace period
953  * @ssp: srcu_struct with which to synchronize.
954  *
955  * Wait for an SRCU grace period to elapse, but be more aggressive about
956  * spinning rather than blocking when waiting.
957  *
958  * Note that synchronize_srcu_expedited() has the same deadlock and
959  * memory-ordering properties as does synchronize_srcu().
960  */
synchronize_srcu_expedited(struct srcu_struct * ssp)961 void synchronize_srcu_expedited(struct srcu_struct *ssp)
962 {
963 	__synchronize_srcu(ssp, rcu_gp_is_normal());
964 }
965 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
966 
967 /**
968  * synchronize_srcu - wait for prior SRCU read-side critical-section completion
969  * @ssp: srcu_struct with which to synchronize.
970  *
971  * Wait for the count to drain to zero of both indexes. To avoid the
972  * possible starvation of synchronize_srcu(), it waits for the count of
973  * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
974  * and then flip the srcu_idx and wait for the count of the other index.
975  *
976  * Can block; must be called from process context.
977  *
978  * Note that it is illegal to call synchronize_srcu() from the corresponding
979  * SRCU read-side critical section; doing so will result in deadlock.
980  * However, it is perfectly legal to call synchronize_srcu() on one
981  * srcu_struct from some other srcu_struct's read-side critical section,
982  * as long as the resulting graph of srcu_structs is acyclic.
983  *
984  * There are memory-ordering constraints implied by synchronize_srcu().
985  * On systems with more than one CPU, when synchronize_srcu() returns,
986  * each CPU is guaranteed to have executed a full memory barrier since
987  * the end of its last corresponding SRCU read-side critical section
988  * whose beginning preceded the call to synchronize_srcu().  In addition,
989  * each CPU having an SRCU read-side critical section that extends beyond
990  * the return from synchronize_srcu() is guaranteed to have executed a
991  * full memory barrier after the beginning of synchronize_srcu() and before
992  * the beginning of that SRCU read-side critical section.  Note that these
993  * guarantees include CPUs that are offline, idle, or executing in user mode,
994  * as well as CPUs that are executing in the kernel.
995  *
996  * Furthermore, if CPU A invoked synchronize_srcu(), which returned
997  * to its caller on CPU B, then both CPU A and CPU B are guaranteed
998  * to have executed a full memory barrier during the execution of
999  * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
1000  * are the same CPU, but again only if the system has more than one CPU.
1001  *
1002  * Of course, these memory-ordering guarantees apply only when
1003  * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1004  * passed the same srcu_struct structure.
1005  *
1006  * If SRCU is likely idle, expedite the first request.  This semantic
1007  * was provided by Classic SRCU, and is relied upon by its users, so TREE
1008  * SRCU must also provide it.  Note that detecting idleness is heuristic
1009  * and subject to both false positives and negatives.
1010  */
synchronize_srcu(struct srcu_struct * ssp)1011 void synchronize_srcu(struct srcu_struct *ssp)
1012 {
1013 	if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1014 		synchronize_srcu_expedited(ssp);
1015 	else
1016 		__synchronize_srcu(ssp, true);
1017 }
1018 EXPORT_SYMBOL_GPL(synchronize_srcu);
1019 
1020 /**
1021  * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1022  * @ssp: srcu_struct to provide cookie for.
1023  *
1024  * This function returns a cookie that can be passed to
1025  * poll_state_synchronize_srcu(), which will return true if a full grace
1026  * period has elapsed in the meantime.  It is the caller's responsibility
1027  * to make sure that grace period happens, for example, by invoking
1028  * call_srcu() after return from get_state_synchronize_srcu().
1029  */
get_state_synchronize_srcu(struct srcu_struct * ssp)1030 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1031 {
1032 	// Any prior manipulation of SRCU-protected data must happen
1033 	// before the load from ->srcu_gp_seq.
1034 	smp_mb();
1035 	return rcu_seq_snap(&ssp->srcu_gp_seq);
1036 }
1037 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1038 
1039 /**
1040  * start_poll_synchronize_srcu - Provide cookie and start grace period
1041  * @ssp: srcu_struct to provide cookie for.
1042  *
1043  * This function returns a cookie that can be passed to
1044  * poll_state_synchronize_srcu(), which will return true if a full grace
1045  * period has elapsed in the meantime.  Unlike get_state_synchronize_srcu(),
1046  * this function also ensures that any needed SRCU grace period will be
1047  * started.  This convenience does come at a cost in terms of CPU overhead.
1048  */
start_poll_synchronize_srcu(struct srcu_struct * ssp)1049 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1050 {
1051 	return srcu_gp_start_if_needed(ssp, NULL, true);
1052 }
1053 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1054 
1055 /**
1056  * poll_state_synchronize_srcu - Has cookie's grace period ended?
1057  * @ssp: srcu_struct to provide cookie for.
1058  * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1059  *
1060  * This function takes the cookie that was returned from either
1061  * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1062  * returns @true if an SRCU grace period elapsed since the time that the
1063  * cookie was created.
1064  */
poll_state_synchronize_srcu(struct srcu_struct * ssp,unsigned long cookie)1065 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1066 {
1067 	if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
1068 		return false;
1069 	// Ensure that the end of the SRCU grace period happens before
1070 	// any subsequent code that the caller might execute.
1071 	smp_mb(); // ^^^
1072 	return true;
1073 }
1074 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1075 
1076 /*
1077  * Callback function for srcu_barrier() use.
1078  */
srcu_barrier_cb(struct rcu_head * rhp)1079 static void srcu_barrier_cb(struct rcu_head *rhp)
1080 {
1081 	struct srcu_data *sdp;
1082 	struct srcu_struct *ssp;
1083 
1084 	sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1085 	ssp = sdp->ssp;
1086 	if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1087 		complete(&ssp->srcu_barrier_completion);
1088 }
1089 
1090 /**
1091  * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1092  * @ssp: srcu_struct on which to wait for in-flight callbacks.
1093  */
srcu_barrier(struct srcu_struct * ssp)1094 void srcu_barrier(struct srcu_struct *ssp)
1095 {
1096 	int cpu;
1097 	struct srcu_data *sdp;
1098 	unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1099 
1100 	check_init_srcu_struct(ssp);
1101 	mutex_lock(&ssp->srcu_barrier_mutex);
1102 	if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1103 		smp_mb(); /* Force ordering following return. */
1104 		mutex_unlock(&ssp->srcu_barrier_mutex);
1105 		return; /* Someone else did our work for us. */
1106 	}
1107 	rcu_seq_start(&ssp->srcu_barrier_seq);
1108 	init_completion(&ssp->srcu_barrier_completion);
1109 
1110 	/* Initial count prevents reaching zero until all CBs are posted. */
1111 	atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1112 
1113 	/*
1114 	 * Each pass through this loop enqueues a callback, but only
1115 	 * on CPUs already having callbacks enqueued.  Note that if
1116 	 * a CPU already has callbacks enqueue, it must have already
1117 	 * registered the need for a future grace period, so all we
1118 	 * need do is enqueue a callback that will use the same
1119 	 * grace period as the last callback already in the queue.
1120 	 */
1121 	for_each_possible_cpu(cpu) {
1122 		sdp = per_cpu_ptr(ssp->sda, cpu);
1123 		spin_lock_irq_rcu_node(sdp);
1124 		atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1125 		sdp->srcu_barrier_head.func = srcu_barrier_cb;
1126 		debug_rcu_head_queue(&sdp->srcu_barrier_head);
1127 		if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1128 					   &sdp->srcu_barrier_head)) {
1129 			debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1130 			atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1131 		}
1132 		spin_unlock_irq_rcu_node(sdp);
1133 	}
1134 
1135 	/* Remove the initial count, at which point reaching zero can happen. */
1136 	if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1137 		complete(&ssp->srcu_barrier_completion);
1138 	wait_for_completion(&ssp->srcu_barrier_completion);
1139 
1140 	rcu_seq_end(&ssp->srcu_barrier_seq);
1141 	mutex_unlock(&ssp->srcu_barrier_mutex);
1142 }
1143 EXPORT_SYMBOL_GPL(srcu_barrier);
1144 
1145 /**
1146  * srcu_batches_completed - return batches completed.
1147  * @ssp: srcu_struct on which to report batch completion.
1148  *
1149  * Report the number of batches, correlated with, but not necessarily
1150  * precisely the same as, the number of grace periods that have elapsed.
1151  */
srcu_batches_completed(struct srcu_struct * ssp)1152 unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1153 {
1154 	return READ_ONCE(ssp->srcu_idx);
1155 }
1156 EXPORT_SYMBOL_GPL(srcu_batches_completed);
1157 
1158 /*
1159  * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
1160  * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1161  * completed in that state.
1162  */
srcu_advance_state(struct srcu_struct * ssp)1163 static void srcu_advance_state(struct srcu_struct *ssp)
1164 {
1165 	int idx;
1166 
1167 	mutex_lock(&ssp->srcu_gp_mutex);
1168 
1169 	/*
1170 	 * Because readers might be delayed for an extended period after
1171 	 * fetching ->srcu_idx for their index, at any point in time there
1172 	 * might well be readers using both idx=0 and idx=1.  We therefore
1173 	 * need to wait for readers to clear from both index values before
1174 	 * invoking a callback.
1175 	 *
1176 	 * The load-acquire ensures that we see the accesses performed
1177 	 * by the prior grace period.
1178 	 */
1179 	idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1180 	if (idx == SRCU_STATE_IDLE) {
1181 		spin_lock_irq_rcu_node(ssp);
1182 		if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1183 			WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1184 			spin_unlock_irq_rcu_node(ssp);
1185 			mutex_unlock(&ssp->srcu_gp_mutex);
1186 			return;
1187 		}
1188 		idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1189 		if (idx == SRCU_STATE_IDLE)
1190 			srcu_gp_start(ssp);
1191 		spin_unlock_irq_rcu_node(ssp);
1192 		if (idx != SRCU_STATE_IDLE) {
1193 			mutex_unlock(&ssp->srcu_gp_mutex);
1194 			return; /* Someone else started the grace period. */
1195 		}
1196 	}
1197 
1198 	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1199 		idx = 1 ^ (ssp->srcu_idx & 1);
1200 		if (!try_check_zero(ssp, idx, 1)) {
1201 			mutex_unlock(&ssp->srcu_gp_mutex);
1202 			return; /* readers present, retry later. */
1203 		}
1204 		srcu_flip(ssp);
1205 		spin_lock_irq_rcu_node(ssp);
1206 		rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1207 		spin_unlock_irq_rcu_node(ssp);
1208 	}
1209 
1210 	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1211 
1212 		/*
1213 		 * SRCU read-side critical sections are normally short,
1214 		 * so check at least twice in quick succession after a flip.
1215 		 */
1216 		idx = 1 ^ (ssp->srcu_idx & 1);
1217 		if (!try_check_zero(ssp, idx, 2)) {
1218 			mutex_unlock(&ssp->srcu_gp_mutex);
1219 			return; /* readers present, retry later. */
1220 		}
1221 		srcu_gp_end(ssp);  /* Releases ->srcu_gp_mutex. */
1222 	}
1223 }
1224 
1225 /*
1226  * Invoke a limited number of SRCU callbacks that have passed through
1227  * their grace period.  If there are more to do, SRCU will reschedule
1228  * the workqueue.  Note that needed memory barriers have been executed
1229  * in this task's context by srcu_readers_active_idx_check().
1230  */
srcu_invoke_callbacks(struct work_struct * work)1231 static void srcu_invoke_callbacks(struct work_struct *work)
1232 {
1233 	bool more;
1234 	struct rcu_cblist ready_cbs;
1235 	struct rcu_head *rhp;
1236 	struct srcu_data *sdp;
1237 	struct srcu_struct *ssp;
1238 
1239 	sdp = container_of(work, struct srcu_data, work);
1240 
1241 	ssp = sdp->ssp;
1242 	rcu_cblist_init(&ready_cbs);
1243 	spin_lock_irq_rcu_node(sdp);
1244 	rcu_segcblist_advance(&sdp->srcu_cblist,
1245 			      rcu_seq_current(&ssp->srcu_gp_seq));
1246 	if (sdp->srcu_cblist_invoking ||
1247 	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1248 		spin_unlock_irq_rcu_node(sdp);
1249 		return;  /* Someone else on the job or nothing to do. */
1250 	}
1251 
1252 	/* We are on the job!  Extract and invoke ready callbacks. */
1253 	sdp->srcu_cblist_invoking = true;
1254 	rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1255 	spin_unlock_irq_rcu_node(sdp);
1256 	rhp = rcu_cblist_dequeue(&ready_cbs);
1257 	for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1258 		debug_rcu_head_unqueue(rhp);
1259 		local_bh_disable();
1260 		rhp->func(rhp);
1261 		local_bh_enable();
1262 	}
1263 
1264 	/*
1265 	 * Update counts, accelerate new callbacks, and if needed,
1266 	 * schedule another round of callback invocation.
1267 	 */
1268 	spin_lock_irq_rcu_node(sdp);
1269 	rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1270 	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1271 				       rcu_seq_snap(&ssp->srcu_gp_seq));
1272 	sdp->srcu_cblist_invoking = false;
1273 	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1274 	spin_unlock_irq_rcu_node(sdp);
1275 	if (more)
1276 		srcu_schedule_cbs_sdp(sdp, 0);
1277 }
1278 
1279 /*
1280  * Finished one round of SRCU grace period.  Start another if there are
1281  * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1282  */
srcu_reschedule(struct srcu_struct * ssp,unsigned long delay)1283 static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1284 {
1285 	bool pushgp = true;
1286 
1287 	spin_lock_irq_rcu_node(ssp);
1288 	if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1289 		if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1290 			/* All requests fulfilled, time to go idle. */
1291 			pushgp = false;
1292 		}
1293 	} else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1294 		/* Outstanding request and no GP.  Start one. */
1295 		srcu_gp_start(ssp);
1296 	}
1297 	spin_unlock_irq_rcu_node(ssp);
1298 
1299 	if (pushgp)
1300 		queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1301 }
1302 
1303 /*
1304  * This is the work-queue function that handles SRCU grace periods.
1305  */
process_srcu(struct work_struct * work)1306 static void process_srcu(struct work_struct *work)
1307 {
1308 	struct srcu_struct *ssp;
1309 
1310 	ssp = container_of(work, struct srcu_struct, work.work);
1311 
1312 	srcu_advance_state(ssp);
1313 	srcu_reschedule(ssp, srcu_get_delay(ssp));
1314 }
1315 
srcutorture_get_gp_data(enum rcutorture_type test_type,struct srcu_struct * ssp,int * flags,unsigned long * gp_seq)1316 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1317 			     struct srcu_struct *ssp, int *flags,
1318 			     unsigned long *gp_seq)
1319 {
1320 	if (test_type != SRCU_FLAVOR)
1321 		return;
1322 	*flags = 0;
1323 	*gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1324 }
1325 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1326 
srcu_torture_stats_print(struct srcu_struct * ssp,char * tt,char * tf)1327 void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1328 {
1329 	int cpu;
1330 	int idx;
1331 	unsigned long s0 = 0, s1 = 0;
1332 
1333 	idx = ssp->srcu_idx & 0x1;
1334 	pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1335 		 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
1336 	for_each_possible_cpu(cpu) {
1337 		unsigned long l0, l1;
1338 		unsigned long u0, u1;
1339 		long c0, c1;
1340 		struct srcu_data *sdp;
1341 
1342 		sdp = per_cpu_ptr(ssp->sda, cpu);
1343 		u0 = data_race(sdp->srcu_unlock_count[!idx]);
1344 		u1 = data_race(sdp->srcu_unlock_count[idx]);
1345 
1346 		/*
1347 		 * Make sure that a lock is always counted if the corresponding
1348 		 * unlock is counted.
1349 		 */
1350 		smp_rmb();
1351 
1352 		l0 = data_race(sdp->srcu_lock_count[!idx]);
1353 		l1 = data_race(sdp->srcu_lock_count[idx]);
1354 
1355 		c0 = l0 - u0;
1356 		c1 = l1 - u1;
1357 		pr_cont(" %d(%ld,%ld %c)",
1358 			cpu, c0, c1,
1359 			"C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1360 		s0 += c0;
1361 		s1 += c1;
1362 	}
1363 	pr_cont(" T(%ld,%ld)\n", s0, s1);
1364 }
1365 EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1366 
srcu_bootup_announce(void)1367 static int __init srcu_bootup_announce(void)
1368 {
1369 	pr_info("Hierarchical SRCU implementation.\n");
1370 	if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1371 		pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1372 	return 0;
1373 }
1374 early_initcall(srcu_bootup_announce);
1375 
srcu_init(void)1376 void __init srcu_init(void)
1377 {
1378 	struct srcu_struct *ssp;
1379 
1380 	srcu_init_done = true;
1381 	while (!list_empty(&srcu_boot_list)) {
1382 		ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1383 				      work.work.entry);
1384 		check_init_srcu_struct(ssp);
1385 		list_del_init(&ssp->work.work.entry);
1386 		queue_work(rcu_gp_wq, &ssp->work.work);
1387 	}
1388 }
1389 
1390 #ifdef CONFIG_MODULES
1391 
1392 /* Initialize any global-scope srcu_struct structures used by this module. */
srcu_module_coming(struct module * mod)1393 static int srcu_module_coming(struct module *mod)
1394 {
1395 	int i;
1396 	struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1397 	int ret;
1398 
1399 	for (i = 0; i < mod->num_srcu_structs; i++) {
1400 		ret = init_srcu_struct(*(sspp++));
1401 		if (WARN_ON_ONCE(ret))
1402 			return ret;
1403 	}
1404 	return 0;
1405 }
1406 
1407 /* Clean up any global-scope srcu_struct structures used by this module. */
srcu_module_going(struct module * mod)1408 static void srcu_module_going(struct module *mod)
1409 {
1410 	int i;
1411 	struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1412 
1413 	for (i = 0; i < mod->num_srcu_structs; i++)
1414 		cleanup_srcu_struct(*(sspp++));
1415 }
1416 
1417 /* Handle one module, either coming or going. */
srcu_module_notify(struct notifier_block * self,unsigned long val,void * data)1418 static int srcu_module_notify(struct notifier_block *self,
1419 			      unsigned long val, void *data)
1420 {
1421 	struct module *mod = data;
1422 	int ret = 0;
1423 
1424 	switch (val) {
1425 	case MODULE_STATE_COMING:
1426 		ret = srcu_module_coming(mod);
1427 		break;
1428 	case MODULE_STATE_GOING:
1429 		srcu_module_going(mod);
1430 		break;
1431 	default:
1432 		break;
1433 	}
1434 	return ret;
1435 }
1436 
1437 static struct notifier_block srcu_module_nb = {
1438 	.notifier_call = srcu_module_notify,
1439 	.priority = 0,
1440 };
1441 
init_srcu_module_notifier(void)1442 static __init int init_srcu_module_notifier(void)
1443 {
1444 	int ret;
1445 
1446 	ret = register_module_notifier(&srcu_module_nb);
1447 	if (ret)
1448 		pr_warn("Failed to register srcu module notifier\n");
1449 	return ret;
1450 }
1451 late_initcall(init_srcu_module_notifier);
1452 
1453 #endif /* #ifdef CONFIG_MODULES */
1454