• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright IBM Corporation, 2001
19  *
20  * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21  *	    Manfred Spraul <manfred@colorfullife.com>
22  *
23  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25  * Papers:
26  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28  *
29  * For detailed explanation of Read-Copy Update mechanism see -
30  * 		Documentation/RCU
31  *
32  */
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/rcupdate.h>
39 #include <linux/interrupt.h>
40 #include <linux/sched.h>
41 #include <asm/atomic.h>
42 #include <linux/bitops.h>
43 #include <linux/module.h>
44 #include <linux/completion.h>
45 #include <linux/moduleparam.h>
46 #include <linux/percpu.h>
47 #include <linux/notifier.h>
48 #include <linux/cpu.h>
49 #include <linux/mutex.h>
50 #include <linux/time.h>
51 
52 #ifdef CONFIG_DEBUG_LOCK_ALLOC
53 static struct lock_class_key rcu_lock_key;
54 struct lockdep_map rcu_lock_map =
55 	STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
56 EXPORT_SYMBOL_GPL(rcu_lock_map);
57 #endif
58 
59 
60 /* Definition for rcupdate control block. */
61 static struct rcu_ctrlblk rcu_ctrlblk = {
62 	.cur = -300,
63 	.completed = -300,
64 	.pending = -300,
65 	.lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
66 	.cpumask = CPU_BITS_NONE,
67 };
68 static struct rcu_ctrlblk rcu_bh_ctrlblk = {
69 	.cur = -300,
70 	.completed = -300,
71 	.pending = -300,
72 	.lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
73 	.cpumask = CPU_BITS_NONE,
74 };
75 
76 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
77 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
78 
79 static int blimit = 10;
80 static int qhimark = 10000;
81 static int qlowmark = 100;
82 
83 #ifdef CONFIG_SMP
force_quiescent_state(struct rcu_data * rdp,struct rcu_ctrlblk * rcp)84 static void force_quiescent_state(struct rcu_data *rdp,
85 			struct rcu_ctrlblk *rcp)
86 {
87 	int cpu;
88 	unsigned long flags;
89 
90 	set_need_resched();
91 	spin_lock_irqsave(&rcp->lock, flags);
92 	if (unlikely(!rcp->signaled)) {
93 		rcp->signaled = 1;
94 		/*
95 		 * Don't send IPI to itself. With irqs disabled,
96 		 * rdp->cpu is the current cpu.
97 		 *
98 		 * cpu_online_mask is updated by the _cpu_down()
99 		 * using __stop_machine(). Since we're in irqs disabled
100 		 * section, __stop_machine() is not exectuting, hence
101 		 * the cpu_online_mask is stable.
102 		 *
103 		 * However,  a cpu might have been offlined _just_ before
104 		 * we disabled irqs while entering here.
105 		 * And rcu subsystem might not yet have handled the CPU_DEAD
106 		 * notification, leading to the offlined cpu's bit
107 		 * being set in the rcp->cpumask.
108 		 *
109 		 * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
110 		 * sending smp_reschedule() to an offlined CPU.
111 		 */
112 		for_each_cpu_and(cpu,
113 				  to_cpumask(rcp->cpumask), cpu_online_mask) {
114 			if (cpu != rdp->cpu)
115 				smp_send_reschedule(cpu);
116 		}
117 	}
118 	spin_unlock_irqrestore(&rcp->lock, flags);
119 }
120 #else
force_quiescent_state(struct rcu_data * rdp,struct rcu_ctrlblk * rcp)121 static inline void force_quiescent_state(struct rcu_data *rdp,
122 			struct rcu_ctrlblk *rcp)
123 {
124 	set_need_resched();
125 }
126 #endif
127 
__call_rcu(struct rcu_head * head,struct rcu_ctrlblk * rcp,struct rcu_data * rdp)128 static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
129 		struct rcu_data *rdp)
130 {
131 	long batch;
132 
133 	head->next = NULL;
134 	smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
135 
136 	/*
137 	 * Determine the batch number of this callback.
138 	 *
139 	 * Using ACCESS_ONCE to avoid the following error when gcc eliminates
140 	 * local variable "batch" and emits codes like this:
141 	 *	1) rdp->batch = rcp->cur + 1 # gets old value
142 	 *	......
143 	 *	2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
144 	 * then [*nxttail[0], *nxttail[1]) may contain callbacks
145 	 * that batch# = rdp->batch, see the comment of struct rcu_data.
146 	 */
147 	batch = ACCESS_ONCE(rcp->cur) + 1;
148 
149 	if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
150 		/* process callbacks */
151 		rdp->nxttail[0] = rdp->nxttail[1];
152 		rdp->nxttail[1] = rdp->nxttail[2];
153 		if (rcu_batch_after(batch - 1, rdp->batch))
154 			rdp->nxttail[0] = rdp->nxttail[2];
155 	}
156 
157 	rdp->batch = batch;
158 	*rdp->nxttail[2] = head;
159 	rdp->nxttail[2] = &head->next;
160 
161 	if (unlikely(++rdp->qlen > qhimark)) {
162 		rdp->blimit = INT_MAX;
163 		force_quiescent_state(rdp, &rcu_ctrlblk);
164 	}
165 }
166 
167 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
168 
record_gp_stall_check_time(struct rcu_ctrlblk * rcp)169 static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
170 {
171 	rcp->gp_start = jiffies;
172 	rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
173 }
174 
print_other_cpu_stall(struct rcu_ctrlblk * rcp)175 static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
176 {
177 	int cpu;
178 	long delta;
179 	unsigned long flags;
180 
181 	/* Only let one CPU complain about others per time interval. */
182 
183 	spin_lock_irqsave(&rcp->lock, flags);
184 	delta = jiffies - rcp->jiffies_stall;
185 	if (delta < 2 || rcp->cur != rcp->completed) {
186 		spin_unlock_irqrestore(&rcp->lock, flags);
187 		return;
188 	}
189 	rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
190 	spin_unlock_irqrestore(&rcp->lock, flags);
191 
192 	/* OK, time to rat on our buddy... */
193 
194 	printk(KERN_ERR "INFO: RCU detected CPU stalls:");
195 	for_each_possible_cpu(cpu) {
196 		if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
197 			printk(" %d", cpu);
198 	}
199 	printk(" (detected by %d, t=%ld jiffies)\n",
200 	       smp_processor_id(), (long)(jiffies - rcp->gp_start));
201 }
202 
print_cpu_stall(struct rcu_ctrlblk * rcp)203 static void print_cpu_stall(struct rcu_ctrlblk *rcp)
204 {
205 	unsigned long flags;
206 
207 	printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
208 			smp_processor_id(), jiffies,
209 			jiffies - rcp->gp_start);
210 	dump_stack();
211 	spin_lock_irqsave(&rcp->lock, flags);
212 	if ((long)(jiffies - rcp->jiffies_stall) >= 0)
213 		rcp->jiffies_stall =
214 			jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
215 	spin_unlock_irqrestore(&rcp->lock, flags);
216 	set_need_resched();  /* kick ourselves to get things going. */
217 }
218 
check_cpu_stall(struct rcu_ctrlblk * rcp)219 static void check_cpu_stall(struct rcu_ctrlblk *rcp)
220 {
221 	long delta;
222 
223 	delta = jiffies - rcp->jiffies_stall;
224 	if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
225 		delta >= 0) {
226 
227 		/* We haven't checked in, so go dump stack. */
228 		print_cpu_stall(rcp);
229 
230 	} else if (rcp->cur != rcp->completed && delta >= 2) {
231 
232 		/* They had two seconds to dump stack, so complain. */
233 		print_other_cpu_stall(rcp);
234 	}
235 }
236 
237 #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
238 
record_gp_stall_check_time(struct rcu_ctrlblk * rcp)239 static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
240 {
241 }
242 
check_cpu_stall(struct rcu_ctrlblk * rcp)243 static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
244 {
245 }
246 
247 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
248 
249 /**
250  * call_rcu - Queue an RCU callback for invocation after a grace period.
251  * @head: structure to be used for queueing the RCU updates.
252  * @func: actual update function to be invoked after the grace period
253  *
254  * The update function will be invoked some time after a full grace
255  * period elapses, in other words after all currently executing RCU
256  * read-side critical sections have completed.  RCU read-side critical
257  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
258  * and may be nested.
259  */
call_rcu(struct rcu_head * head,void (* func)(struct rcu_head * rcu))260 void call_rcu(struct rcu_head *head,
261 				void (*func)(struct rcu_head *rcu))
262 {
263 	unsigned long flags;
264 
265 	head->func = func;
266 	local_irq_save(flags);
267 	__call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
268 	local_irq_restore(flags);
269 }
270 EXPORT_SYMBOL_GPL(call_rcu);
271 
272 /**
273  * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
274  * @head: structure to be used for queueing the RCU updates.
275  * @func: actual update function to be invoked after the grace period
276  *
277  * The update function will be invoked some time after a full grace
278  * period elapses, in other words after all currently executing RCU
279  * read-side critical sections have completed. call_rcu_bh() assumes
280  * that the read-side critical sections end on completion of a softirq
281  * handler. This means that read-side critical sections in process
282  * context must not be interrupted by softirqs. This interface is to be
283  * used when most of the read-side critical sections are in softirq context.
284  * RCU read-side critical sections are delimited by rcu_read_lock() and
285  * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
286  * and rcu_read_unlock_bh(), if in process context. These may be nested.
287  */
call_rcu_bh(struct rcu_head * head,void (* func)(struct rcu_head * rcu))288 void call_rcu_bh(struct rcu_head *head,
289 				void (*func)(struct rcu_head *rcu))
290 {
291 	unsigned long flags;
292 
293 	head->func = func;
294 	local_irq_save(flags);
295 	__call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
296 	local_irq_restore(flags);
297 }
298 EXPORT_SYMBOL_GPL(call_rcu_bh);
299 
300 /*
301  * Return the number of RCU batches processed thus far.  Useful
302  * for debug and statistics.
303  */
rcu_batches_completed(void)304 long rcu_batches_completed(void)
305 {
306 	return rcu_ctrlblk.completed;
307 }
308 EXPORT_SYMBOL_GPL(rcu_batches_completed);
309 
310 /*
311  * Return the number of RCU batches processed thus far.  Useful
312  * for debug and statistics.
313  */
rcu_batches_completed_bh(void)314 long rcu_batches_completed_bh(void)
315 {
316 	return rcu_bh_ctrlblk.completed;
317 }
318 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
319 
320 /* Raises the softirq for processing rcu_callbacks. */
raise_rcu_softirq(void)321 static inline void raise_rcu_softirq(void)
322 {
323 	raise_softirq(RCU_SOFTIRQ);
324 }
325 
326 /*
327  * Invoke the completed RCU callbacks. They are expected to be in
328  * a per-cpu list.
329  */
rcu_do_batch(struct rcu_data * rdp)330 static void rcu_do_batch(struct rcu_data *rdp)
331 {
332 	unsigned long flags;
333 	struct rcu_head *next, *list;
334 	int count = 0;
335 
336 	list = rdp->donelist;
337 	while (list) {
338 		next = list->next;
339 		prefetch(next);
340 		list->func(list);
341 		list = next;
342 		if (++count >= rdp->blimit)
343 			break;
344 	}
345 	rdp->donelist = list;
346 
347 	local_irq_save(flags);
348 	rdp->qlen -= count;
349 	local_irq_restore(flags);
350 	if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
351 		rdp->blimit = blimit;
352 
353 	if (!rdp->donelist)
354 		rdp->donetail = &rdp->donelist;
355 	else
356 		raise_rcu_softirq();
357 }
358 
359 /*
360  * Grace period handling:
361  * The grace period handling consists out of two steps:
362  * - A new grace period is started.
363  *   This is done by rcu_start_batch. The start is not broadcasted to
364  *   all cpus, they must pick this up by comparing rcp->cur with
365  *   rdp->quiescbatch. All cpus are recorded  in the
366  *   rcu_ctrlblk.cpumask bitmap.
367  * - All cpus must go through a quiescent state.
368  *   Since the start of the grace period is not broadcasted, at least two
369  *   calls to rcu_check_quiescent_state are required:
370  *   The first call just notices that a new grace period is running. The
371  *   following calls check if there was a quiescent state since the beginning
372  *   of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
373  *   the bitmap is empty, then the grace period is completed.
374  *   rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
375  *   period (if necessary).
376  */
377 
378 /*
379  * Register a new batch of callbacks, and start it up if there is currently no
380  * active batch and the batch to be registered has not already occurred.
381  * Caller must hold rcu_ctrlblk.lock.
382  */
rcu_start_batch(struct rcu_ctrlblk * rcp)383 static void rcu_start_batch(struct rcu_ctrlblk *rcp)
384 {
385 	if (rcp->cur != rcp->pending &&
386 			rcp->completed == rcp->cur) {
387 		rcp->cur++;
388 		record_gp_stall_check_time(rcp);
389 
390 		/*
391 		 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
392 		 * Barrier  Otherwise it can cause tickless idle CPUs to be
393 		 * included in rcp->cpumask, which will extend graceperiods
394 		 * unnecessarily.
395 		 */
396 		smp_mb();
397 		cpumask_andnot(to_cpumask(rcp->cpumask),
398 			       cpu_online_mask, nohz_cpu_mask);
399 
400 		rcp->signaled = 0;
401 	}
402 }
403 
404 /*
405  * cpu went through a quiescent state since the beginning of the grace period.
406  * Clear it from the cpu mask and complete the grace period if it was the last
407  * cpu. Start another grace period if someone has further entries pending
408  */
cpu_quiet(int cpu,struct rcu_ctrlblk * rcp)409 static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
410 {
411 	cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
412 	if (cpumask_empty(to_cpumask(rcp->cpumask))) {
413 		/* batch completed ! */
414 		rcp->completed = rcp->cur;
415 		rcu_start_batch(rcp);
416 	}
417 }
418 
419 /*
420  * Check if the cpu has gone through a quiescent state (say context
421  * switch). If so and if it already hasn't done so in this RCU
422  * quiescent cycle, then indicate that it has done so.
423  */
rcu_check_quiescent_state(struct rcu_ctrlblk * rcp,struct rcu_data * rdp)424 static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
425 					struct rcu_data *rdp)
426 {
427 	unsigned long flags;
428 
429 	if (rdp->quiescbatch != rcp->cur) {
430 		/* start new grace period: */
431 		rdp->qs_pending = 1;
432 		rdp->passed_quiesc = 0;
433 		rdp->quiescbatch = rcp->cur;
434 		return;
435 	}
436 
437 	/* Grace period already completed for this cpu?
438 	 * qs_pending is checked instead of the actual bitmap to avoid
439 	 * cacheline trashing.
440 	 */
441 	if (!rdp->qs_pending)
442 		return;
443 
444 	/*
445 	 * Was there a quiescent state since the beginning of the grace
446 	 * period? If no, then exit and wait for the next call.
447 	 */
448 	if (!rdp->passed_quiesc)
449 		return;
450 	rdp->qs_pending = 0;
451 
452 	spin_lock_irqsave(&rcp->lock, flags);
453 	/*
454 	 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
455 	 * during cpu startup. Ignore the quiescent state.
456 	 */
457 	if (likely(rdp->quiescbatch == rcp->cur))
458 		cpu_quiet(rdp->cpu, rcp);
459 
460 	spin_unlock_irqrestore(&rcp->lock, flags);
461 }
462 
463 
464 #ifdef CONFIG_HOTPLUG_CPU
465 
466 /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
467  * locking requirements, the list it's pulling from has to belong to a cpu
468  * which is dead and hence not processing interrupts.
469  */
rcu_move_batch(struct rcu_data * this_rdp,struct rcu_head * list,struct rcu_head ** tail,long batch)470 static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
471 				struct rcu_head **tail, long batch)
472 {
473 	unsigned long flags;
474 
475 	if (list) {
476 		local_irq_save(flags);
477 		this_rdp->batch = batch;
478 		*this_rdp->nxttail[2] = list;
479 		this_rdp->nxttail[2] = tail;
480 		local_irq_restore(flags);
481 	}
482 }
483 
__rcu_offline_cpu(struct rcu_data * this_rdp,struct rcu_ctrlblk * rcp,struct rcu_data * rdp)484 static void __rcu_offline_cpu(struct rcu_data *this_rdp,
485 				struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
486 {
487 	unsigned long flags;
488 
489 	/*
490 	 * if the cpu going offline owns the grace period
491 	 * we can block indefinitely waiting for it, so flush
492 	 * it here
493 	 */
494 	spin_lock_irqsave(&rcp->lock, flags);
495 	if (rcp->cur != rcp->completed)
496 		cpu_quiet(rdp->cpu, rcp);
497 	rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
498 	rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
499 	spin_unlock(&rcp->lock);
500 
501 	this_rdp->qlen += rdp->qlen;
502 	local_irq_restore(flags);
503 }
504 
rcu_offline_cpu(int cpu)505 static void rcu_offline_cpu(int cpu)
506 {
507 	struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
508 	struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
509 
510 	__rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
511 					&per_cpu(rcu_data, cpu));
512 	__rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
513 					&per_cpu(rcu_bh_data, cpu));
514 	put_cpu_var(rcu_data);
515 	put_cpu_var(rcu_bh_data);
516 }
517 
518 #else
519 
rcu_offline_cpu(int cpu)520 static void rcu_offline_cpu(int cpu)
521 {
522 }
523 
524 #endif
525 
526 /*
527  * This does the RCU processing work from softirq context.
528  */
__rcu_process_callbacks(struct rcu_ctrlblk * rcp,struct rcu_data * rdp)529 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
530 					struct rcu_data *rdp)
531 {
532 	unsigned long flags;
533 	long completed_snap;
534 
535 	if (rdp->nxtlist) {
536 		local_irq_save(flags);
537 		completed_snap = ACCESS_ONCE(rcp->completed);
538 
539 		/*
540 		 * move the other grace-period-completed entries to
541 		 * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
542 		 */
543 		if (!rcu_batch_before(completed_snap, rdp->batch))
544 			rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
545 		else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
546 			rdp->nxttail[0] = rdp->nxttail[1];
547 
548 		/*
549 		 * the grace period for entries in
550 		 * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
551 		 * move these entries to donelist
552 		 */
553 		if (rdp->nxttail[0] != &rdp->nxtlist) {
554 			*rdp->donetail = rdp->nxtlist;
555 			rdp->donetail = rdp->nxttail[0];
556 			rdp->nxtlist = *rdp->nxttail[0];
557 			*rdp->donetail = NULL;
558 
559 			if (rdp->nxttail[1] == rdp->nxttail[0])
560 				rdp->nxttail[1] = &rdp->nxtlist;
561 			if (rdp->nxttail[2] == rdp->nxttail[0])
562 				rdp->nxttail[2] = &rdp->nxtlist;
563 			rdp->nxttail[0] = &rdp->nxtlist;
564 		}
565 
566 		local_irq_restore(flags);
567 
568 		if (rcu_batch_after(rdp->batch, rcp->pending)) {
569 			unsigned long flags2;
570 
571 			/* and start it/schedule start if it's a new batch */
572 			spin_lock_irqsave(&rcp->lock, flags2);
573 			if (rcu_batch_after(rdp->batch, rcp->pending)) {
574 				rcp->pending = rdp->batch;
575 				rcu_start_batch(rcp);
576 			}
577 			spin_unlock_irqrestore(&rcp->lock, flags2);
578 		}
579 	}
580 
581 	rcu_check_quiescent_state(rcp, rdp);
582 	if (rdp->donelist)
583 		rcu_do_batch(rdp);
584 }
585 
rcu_process_callbacks(struct softirq_action * unused)586 static void rcu_process_callbacks(struct softirq_action *unused)
587 {
588 	/*
589 	 * Memory references from any prior RCU read-side critical sections
590 	 * executed by the interrupted code must be see before any RCU
591 	 * grace-period manupulations below.
592 	 */
593 
594 	smp_mb(); /* See above block comment. */
595 
596 	__rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
597 	__rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
598 
599 	/*
600 	 * Memory references from any later RCU read-side critical sections
601 	 * executed by the interrupted code must be see after any RCU
602 	 * grace-period manupulations above.
603 	 */
604 
605 	smp_mb(); /* See above block comment. */
606 }
607 
__rcu_pending(struct rcu_ctrlblk * rcp,struct rcu_data * rdp)608 static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
609 {
610 	/* Check for CPU stalls, if enabled. */
611 	check_cpu_stall(rcp);
612 
613 	if (rdp->nxtlist) {
614 		long completed_snap = ACCESS_ONCE(rcp->completed);
615 
616 		/*
617 		 * This cpu has pending rcu entries and the grace period
618 		 * for them has completed.
619 		 */
620 		if (!rcu_batch_before(completed_snap, rdp->batch))
621 			return 1;
622 		if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
623 				rdp->nxttail[0] != rdp->nxttail[1])
624 			return 1;
625 		if (rdp->nxttail[0] != &rdp->nxtlist)
626 			return 1;
627 
628 		/*
629 		 * This cpu has pending rcu entries and the new batch
630 		 * for then hasn't been started nor scheduled start
631 		 */
632 		if (rcu_batch_after(rdp->batch, rcp->pending))
633 			return 1;
634 	}
635 
636 	/* This cpu has finished callbacks to invoke */
637 	if (rdp->donelist)
638 		return 1;
639 
640 	/* The rcu core waits for a quiescent state from the cpu */
641 	if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
642 		return 1;
643 
644 	/* nothing to do */
645 	return 0;
646 }
647 
648 /*
649  * Check to see if there is any immediate RCU-related work to be done
650  * by the current CPU, returning 1 if so.  This function is part of the
651  * RCU implementation; it is -not- an exported member of the RCU API.
652  */
rcu_pending(int cpu)653 int rcu_pending(int cpu)
654 {
655 	return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
656 		__rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
657 }
658 
659 /*
660  * Check to see if any future RCU-related work will need to be done
661  * by the current CPU, even if none need be done immediately, returning
662  * 1 if so.  This function is part of the RCU implementation; it is -not-
663  * an exported member of the RCU API.
664  */
rcu_needs_cpu(int cpu)665 int rcu_needs_cpu(int cpu)
666 {
667 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
668 	struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
669 
670 	return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
671 }
672 
673 /*
674  * Top-level function driving RCU grace-period detection, normally
675  * invoked from the scheduler-clock interrupt.  This function simply
676  * increments counters that are read only from softirq by this same
677  * CPU, so there are no memory barriers required.
678  */
rcu_check_callbacks(int cpu,int user)679 void rcu_check_callbacks(int cpu, int user)
680 {
681 	if (user ||
682 	    (idle_cpu(cpu) && rcu_scheduler_active &&
683 	     !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
684 
685 		/*
686 		 * Get here if this CPU took its interrupt from user
687 		 * mode or from the idle loop, and if this is not a
688 		 * nested interrupt.  In this case, the CPU is in
689 		 * a quiescent state, so count it.
690 		 *
691 		 * Also do a memory barrier.  This is needed to handle
692 		 * the case where writes from a preempt-disable section
693 		 * of code get reordered into schedule() by this CPU's
694 		 * write buffer.  The memory barrier makes sure that
695 		 * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
696 		 * by other CPUs to happen after any such write.
697 		 */
698 
699 		smp_mb();  /* See above block comment. */
700 		rcu_qsctr_inc(cpu);
701 		rcu_bh_qsctr_inc(cpu);
702 
703 	} else if (!in_softirq()) {
704 
705 		/*
706 		 * Get here if this CPU did not take its interrupt from
707 		 * softirq, in other words, if it is not interrupting
708 		 * a rcu_bh read-side critical section.  This is an _bh
709 		 * critical section, so count it.  The memory barrier
710 		 * is needed for the same reason as is the above one.
711 		 */
712 
713 		smp_mb();  /* See above block comment. */
714 		rcu_bh_qsctr_inc(cpu);
715 	}
716 	raise_rcu_softirq();
717 }
718 
rcu_init_percpu_data(int cpu,struct rcu_ctrlblk * rcp,struct rcu_data * rdp)719 static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
720 						struct rcu_data *rdp)
721 {
722 	unsigned long flags;
723 
724 	spin_lock_irqsave(&rcp->lock, flags);
725 	memset(rdp, 0, sizeof(*rdp));
726 	rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
727 	rdp->donetail = &rdp->donelist;
728 	rdp->quiescbatch = rcp->completed;
729 	rdp->qs_pending = 0;
730 	rdp->cpu = cpu;
731 	rdp->blimit = blimit;
732 	spin_unlock_irqrestore(&rcp->lock, flags);
733 }
734 
rcu_online_cpu(int cpu)735 static void __cpuinit rcu_online_cpu(int cpu)
736 {
737 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
738 	struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
739 
740 	rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
741 	rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
742 	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
743 }
744 
rcu_cpu_notify(struct notifier_block * self,unsigned long action,void * hcpu)745 static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
746 				unsigned long action, void *hcpu)
747 {
748 	long cpu = (long)hcpu;
749 
750 	switch (action) {
751 	case CPU_UP_PREPARE:
752 	case CPU_UP_PREPARE_FROZEN:
753 		rcu_online_cpu(cpu);
754 		break;
755 	case CPU_DEAD:
756 	case CPU_DEAD_FROZEN:
757 		rcu_offline_cpu(cpu);
758 		break;
759 	default:
760 		break;
761 	}
762 	return NOTIFY_OK;
763 }
764 
765 static struct notifier_block __cpuinitdata rcu_nb = {
766 	.notifier_call	= rcu_cpu_notify,
767 };
768 
769 /*
770  * Initializes rcu mechanism.  Assumed to be called early.
771  * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
772  * Note that rcu_qsctr and friends are implicitly
773  * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
774  */
__rcu_init(void)775 void __init __rcu_init(void)
776 {
777 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
778 	printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
779 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
780 	rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
781 			(void *)(long)smp_processor_id());
782 	/* Register notifier for non-boot CPUs */
783 	register_cpu_notifier(&rcu_nb);
784 }
785 
786 module_param(blimit, int, 0);
787 module_param(qhimark, int, 0);
788 module_param(qlowmark, int, 0);
789