1 /*
2 * Read-Copy Update mechanism for mutual exclusion
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2001
19 *
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
22 *
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 * Papers:
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
28 *
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
31 *
32 */
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/interrupt.h>
39 #include <linux/sched.h>
40 #include <linux/atomic.h>
41 #include <linux/bitops.h>
42 #include <linux/percpu.h>
43 #include <linux/notifier.h>
44 #include <linux/cpu.h>
45 #include <linux/mutex.h>
46 #include <linux/export.h>
47 #include <linux/hardirq.h>
48 #include <linux/delay.h>
49 #include <linux/module.h>
50
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/rcu.h>
53
54 #include "rcu.h"
55
56 module_param(rcu_expedited, int, 0);
57
58 #ifdef CONFIG_PREEMPT_RCU
59
60 /*
61 * Preemptible RCU implementation for rcu_read_lock().
62 * Just increment ->rcu_read_lock_nesting, shared state will be updated
63 * if we block.
64 */
__rcu_read_lock(void)65 void __rcu_read_lock(void)
66 {
67 current->rcu_read_lock_nesting++;
68 barrier(); /* critical section after entry code. */
69 }
70 EXPORT_SYMBOL_GPL(__rcu_read_lock);
71
72 /*
73 * Preemptible RCU implementation for rcu_read_unlock().
74 * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
75 * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
76 * invoke rcu_read_unlock_special() to clean up after a context switch
77 * in an RCU read-side critical section and other special cases.
78 */
__rcu_read_unlock(void)79 void __rcu_read_unlock(void)
80 {
81 struct task_struct *t = current;
82
83 if (t->rcu_read_lock_nesting != 1) {
84 --t->rcu_read_lock_nesting;
85 } else {
86 barrier(); /* critical section before exit code. */
87 t->rcu_read_lock_nesting = INT_MIN;
88 #ifdef CONFIG_PROVE_RCU_DELAY
89 udelay(10); /* Make preemption more probable. */
90 #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
91 barrier(); /* assign before ->rcu_read_unlock_special load */
92 if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
93 rcu_read_unlock_special(t);
94 barrier(); /* ->rcu_read_unlock_special load before assign */
95 t->rcu_read_lock_nesting = 0;
96 }
97 #ifdef CONFIG_PROVE_LOCKING
98 {
99 int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
100
101 WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
102 }
103 #endif /* #ifdef CONFIG_PROVE_LOCKING */
104 }
105 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
106
107 /*
108 * Check for a task exiting while in a preemptible-RCU read-side
109 * critical section, clean up if so. No need to issue warnings,
110 * as debug_check_no_locks_held() already does this if lockdep
111 * is enabled.
112 */
exit_rcu(void)113 void exit_rcu(void)
114 {
115 struct task_struct *t = current;
116
117 if (likely(list_empty(¤t->rcu_node_entry)))
118 return;
119 t->rcu_read_lock_nesting = 1;
120 barrier();
121 t->rcu_read_unlock_special = RCU_READ_UNLOCK_BLOCKED;
122 __rcu_read_unlock();
123 }
124
125 #else /* #ifdef CONFIG_PREEMPT_RCU */
126
exit_rcu(void)127 void exit_rcu(void)
128 {
129 }
130
131 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
132
133 #ifdef CONFIG_DEBUG_LOCK_ALLOC
134 static struct lock_class_key rcu_lock_key;
135 struct lockdep_map rcu_lock_map =
136 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
137 EXPORT_SYMBOL_GPL(rcu_lock_map);
138
139 static struct lock_class_key rcu_bh_lock_key;
140 struct lockdep_map rcu_bh_lock_map =
141 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
142 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
143
144 static struct lock_class_key rcu_sched_lock_key;
145 struct lockdep_map rcu_sched_lock_map =
146 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
147 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
148 #endif
149
150 #ifdef CONFIG_DEBUG_LOCK_ALLOC
151
debug_lockdep_rcu_enabled(void)152 int debug_lockdep_rcu_enabled(void)
153 {
154 return rcu_scheduler_active && debug_locks &&
155 current->lockdep_recursion == 0;
156 }
157 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
158
159 /**
160 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
161 *
162 * Check for bottom half being disabled, which covers both the
163 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
164 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
165 * will show the situation. This is useful for debug checks in functions
166 * that require that they be called within an RCU read-side critical
167 * section.
168 *
169 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
170 *
171 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
172 * offline from an RCU perspective, so check for those as well.
173 */
rcu_read_lock_bh_held(void)174 int rcu_read_lock_bh_held(void)
175 {
176 if (!debug_lockdep_rcu_enabled())
177 return 1;
178 if (rcu_is_cpu_idle())
179 return 0;
180 if (!rcu_lockdep_current_cpu_online())
181 return 0;
182 return in_softirq() || irqs_disabled();
183 }
184 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
185
186 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
187
188 struct rcu_synchronize {
189 struct rcu_head head;
190 struct completion completion;
191 };
192
193 /*
194 * Awaken the corresponding synchronize_rcu() instance now that a
195 * grace period has elapsed.
196 */
wakeme_after_rcu(struct rcu_head * head)197 static void wakeme_after_rcu(struct rcu_head *head)
198 {
199 struct rcu_synchronize *rcu;
200
201 rcu = container_of(head, struct rcu_synchronize, head);
202 complete(&rcu->completion);
203 }
204
wait_rcu_gp(call_rcu_func_t crf)205 void wait_rcu_gp(call_rcu_func_t crf)
206 {
207 struct rcu_synchronize rcu;
208
209 init_rcu_head_on_stack(&rcu.head);
210 init_completion(&rcu.completion);
211 /* Will wake me after RCU finished. */
212 crf(&rcu.head, wakeme_after_rcu);
213 /* Wait for it. */
214 wait_for_completion(&rcu.completion);
215 destroy_rcu_head_on_stack(&rcu.head);
216 }
217 EXPORT_SYMBOL_GPL(wait_rcu_gp);
218
219 #ifdef CONFIG_PROVE_RCU
220 /*
221 * wrapper function to avoid #include problems.
222 */
rcu_my_thread_group_empty(void)223 int rcu_my_thread_group_empty(void)
224 {
225 return thread_group_empty(current);
226 }
227 EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty);
228 #endif /* #ifdef CONFIG_PROVE_RCU */
229
230 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
debug_init_rcu_head(struct rcu_head * head)231 static inline void debug_init_rcu_head(struct rcu_head *head)
232 {
233 debug_object_init(head, &rcuhead_debug_descr);
234 }
235
debug_rcu_head_free(struct rcu_head * head)236 static inline void debug_rcu_head_free(struct rcu_head *head)
237 {
238 debug_object_free(head, &rcuhead_debug_descr);
239 }
240
241 /*
242 * fixup_init is called when:
243 * - an active object is initialized
244 */
rcuhead_fixup_init(void * addr,enum debug_obj_state state)245 static int rcuhead_fixup_init(void *addr, enum debug_obj_state state)
246 {
247 struct rcu_head *head = addr;
248
249 switch (state) {
250 case ODEBUG_STATE_ACTIVE:
251 /*
252 * Ensure that queued callbacks are all executed.
253 * If we detect that we are nested in a RCU read-side critical
254 * section, we should simply fail, otherwise we would deadlock.
255 * In !PREEMPT configurations, there is no way to tell if we are
256 * in a RCU read-side critical section or not, so we never
257 * attempt any fixup and just print a warning.
258 */
259 #ifndef CONFIG_PREEMPT
260 WARN_ON_ONCE(1);
261 return 0;
262 #endif
263 if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
264 irqs_disabled()) {
265 WARN_ON_ONCE(1);
266 return 0;
267 }
268 rcu_barrier();
269 rcu_barrier_sched();
270 rcu_barrier_bh();
271 debug_object_init(head, &rcuhead_debug_descr);
272 return 1;
273 default:
274 return 0;
275 }
276 }
277
278 /*
279 * fixup_activate is called when:
280 * - an active object is activated
281 * - an unknown object is activated (might be a statically initialized object)
282 * Activation is performed internally by call_rcu().
283 */
rcuhead_fixup_activate(void * addr,enum debug_obj_state state)284 static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
285 {
286 struct rcu_head *head = addr;
287
288 switch (state) {
289
290 case ODEBUG_STATE_NOTAVAILABLE:
291 /*
292 * This is not really a fixup. We just make sure that it is
293 * tracked in the object tracker.
294 */
295 debug_object_init(head, &rcuhead_debug_descr);
296 debug_object_activate(head, &rcuhead_debug_descr);
297 return 0;
298
299 case ODEBUG_STATE_ACTIVE:
300 /*
301 * Ensure that queued callbacks are all executed.
302 * If we detect that we are nested in a RCU read-side critical
303 * section, we should simply fail, otherwise we would deadlock.
304 * In !PREEMPT configurations, there is no way to tell if we are
305 * in a RCU read-side critical section or not, so we never
306 * attempt any fixup and just print a warning.
307 */
308 #ifndef CONFIG_PREEMPT
309 WARN_ON_ONCE(1);
310 return 0;
311 #endif
312 if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
313 irqs_disabled()) {
314 WARN_ON_ONCE(1);
315 return 0;
316 }
317 rcu_barrier();
318 rcu_barrier_sched();
319 rcu_barrier_bh();
320 debug_object_activate(head, &rcuhead_debug_descr);
321 return 1;
322 default:
323 return 0;
324 }
325 }
326
327 /*
328 * fixup_free is called when:
329 * - an active object is freed
330 */
rcuhead_fixup_free(void * addr,enum debug_obj_state state)331 static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
332 {
333 struct rcu_head *head = addr;
334
335 switch (state) {
336 case ODEBUG_STATE_ACTIVE:
337 /*
338 * Ensure that queued callbacks are all executed.
339 * If we detect that we are nested in a RCU read-side critical
340 * section, we should simply fail, otherwise we would deadlock.
341 * In !PREEMPT configurations, there is no way to tell if we are
342 * in a RCU read-side critical section or not, so we never
343 * attempt any fixup and just print a warning.
344 */
345 #ifndef CONFIG_PREEMPT
346 WARN_ON_ONCE(1);
347 return 0;
348 #endif
349 if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
350 irqs_disabled()) {
351 WARN_ON_ONCE(1);
352 return 0;
353 }
354 rcu_barrier();
355 rcu_barrier_sched();
356 rcu_barrier_bh();
357 debug_object_free(head, &rcuhead_debug_descr);
358 return 1;
359 default:
360 return 0;
361 }
362 }
363
364 /**
365 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
366 * @head: pointer to rcu_head structure to be initialized
367 *
368 * This function informs debugobjects of a new rcu_head structure that
369 * has been allocated as an auto variable on the stack. This function
370 * is not required for rcu_head structures that are statically defined or
371 * that are dynamically allocated on the heap. This function has no
372 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
373 */
init_rcu_head_on_stack(struct rcu_head * head)374 void init_rcu_head_on_stack(struct rcu_head *head)
375 {
376 debug_object_init_on_stack(head, &rcuhead_debug_descr);
377 }
378 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
379
380 /**
381 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
382 * @head: pointer to rcu_head structure to be initialized
383 *
384 * This function informs debugobjects that an on-stack rcu_head structure
385 * is about to go out of scope. As with init_rcu_head_on_stack(), this
386 * function is not required for rcu_head structures that are statically
387 * defined or that are dynamically allocated on the heap. Also as with
388 * init_rcu_head_on_stack(), this function has no effect for
389 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
390 */
destroy_rcu_head_on_stack(struct rcu_head * head)391 void destroy_rcu_head_on_stack(struct rcu_head *head)
392 {
393 debug_object_free(head, &rcuhead_debug_descr);
394 }
395 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
396
397 struct debug_obj_descr rcuhead_debug_descr = {
398 .name = "rcu_head",
399 .fixup_init = rcuhead_fixup_init,
400 .fixup_activate = rcuhead_fixup_activate,
401 .fixup_free = rcuhead_fixup_free,
402 };
403 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
404 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
405
406 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
do_trace_rcu_torture_read(char * rcutorturename,struct rcu_head * rhp,unsigned long secs,unsigned long c_old,unsigned long c)407 void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp,
408 unsigned long secs,
409 unsigned long c_old, unsigned long c)
410 {
411 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
412 }
413 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
414 #else
415 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
416 do { } while (0)
417 #endif
418
419 #ifdef CONFIG_RCU_STALL_COMMON
420
421 #ifdef CONFIG_PROVE_RCU
422 #define RCU_STALL_DELAY_DELTA (5 * HZ)
423 #else
424 #define RCU_STALL_DELAY_DELTA 0
425 #endif
426
427 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
428 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
429
430 module_param(rcu_cpu_stall_suppress, int, 0644);
431 module_param(rcu_cpu_stall_timeout, int, 0644);
432
rcu_jiffies_till_stall_check(void)433 int rcu_jiffies_till_stall_check(void)
434 {
435 int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
436
437 /*
438 * Limit check must be consistent with the Kconfig limits
439 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
440 */
441 if (till_stall_check < 3) {
442 ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
443 till_stall_check = 3;
444 } else if (till_stall_check > 300) {
445 ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
446 till_stall_check = 300;
447 }
448 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
449 }
450
rcu_panic(struct notifier_block * this,unsigned long ev,void * ptr)451 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
452 {
453 rcu_cpu_stall_suppress = 1;
454 return NOTIFY_DONE;
455 }
456
457 static struct notifier_block rcu_panic_block = {
458 .notifier_call = rcu_panic,
459 };
460
check_cpu_stall_init(void)461 static int __init check_cpu_stall_init(void)
462 {
463 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
464 return 0;
465 }
466 early_initcall(check_cpu_stall_init);
467
468 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
469