1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Read-Copy Update mechanism for mutual exclusion
4 *
5 * Copyright IBM Corporation, 2001
6 *
7 * Author: Dipankar Sarma <dipankar@in.ibm.com>
8 *
9 * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com>
10 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
11 * Papers:
12 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
13 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
14 *
15 * For detailed explanation of Read-Copy Update mechanism see -
16 * http://lse.sourceforge.net/locking/rcupdate.html
17 *
18 */
19
20 #ifndef __LINUX_RCUPDATE_H
21 #define __LINUX_RCUPDATE_H
22
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/atomic.h>
26 #include <linux/irqflags.h>
27 #include <linux/preempt.h>
28 #include <linux/bottom_half.h>
29 #include <linux/lockdep.h>
30 #include <asm/processor.h>
31 #include <linux/cpumask.h>
32
33 #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
34 #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
35 #define ulong2long(a) (*(long *)(&(a)))
36 #define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
37 #define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
38
39 /* Exported common interfaces */
40 void call_rcu(struct rcu_head *head, rcu_callback_t func);
41 void rcu_barrier_tasks(void);
42 void rcu_barrier_tasks_rude(void);
43 void synchronize_rcu(void);
44
45 #ifdef CONFIG_PREEMPT_RCU
46
47 void __rcu_read_lock(void);
48 void __rcu_read_unlock(void);
49
50 /*
51 * Defined as a macro as it is a very low level header included from
52 * areas that don't even know about current. This gives the rcu_read_lock()
53 * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
54 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
55 */
56 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
57
58 #else /* #ifdef CONFIG_PREEMPT_RCU */
59
60 #ifdef CONFIG_TINY_RCU
61 #define rcu_read_unlock_strict() do { } while (0)
62 #else
63 void rcu_read_unlock_strict(void);
64 #endif
65
__rcu_read_lock(void)66 static inline void __rcu_read_lock(void)
67 {
68 preempt_disable();
69 }
70
__rcu_read_unlock(void)71 static inline void __rcu_read_unlock(void)
72 {
73 preempt_enable();
74 rcu_read_unlock_strict();
75 }
76
rcu_preempt_depth(void)77 static inline int rcu_preempt_depth(void)
78 {
79 return 0;
80 }
81
82 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
83
84 /* Internal to kernel */
85 void rcu_init(void);
86 extern int rcu_scheduler_active __read_mostly;
87 void rcu_sched_clock_irq(int user);
88 void rcu_report_dead(unsigned int cpu);
89 void rcutree_migrate_callbacks(int cpu);
90
91 #ifdef CONFIG_TASKS_RCU_GENERIC
92 void rcu_init_tasks_generic(void);
93 #else
rcu_init_tasks_generic(void)94 static inline void rcu_init_tasks_generic(void) { }
95 #endif
96
97 #ifdef CONFIG_RCU_STALL_COMMON
98 void rcu_sysrq_start(void);
99 void rcu_sysrq_end(void);
100 #else /* #ifdef CONFIG_RCU_STALL_COMMON */
rcu_sysrq_start(void)101 static inline void rcu_sysrq_start(void) { }
rcu_sysrq_end(void)102 static inline void rcu_sysrq_end(void) { }
103 #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
104
105 #ifdef CONFIG_NO_HZ_FULL
106 void rcu_user_enter(void);
107 void rcu_user_exit(void);
108 #else
rcu_user_enter(void)109 static inline void rcu_user_enter(void) { }
rcu_user_exit(void)110 static inline void rcu_user_exit(void) { }
111 #endif /* CONFIG_NO_HZ_FULL */
112
113 #ifdef CONFIG_RCU_NOCB_CPU
114 void rcu_init_nohz(void);
115 void rcu_nocb_flush_deferred_wakeup(void);
116 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
rcu_init_nohz(void)117 static inline void rcu_init_nohz(void) { }
rcu_nocb_flush_deferred_wakeup(void)118 static inline void rcu_nocb_flush_deferred_wakeup(void) { }
119 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
120
121 /**
122 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
123 * @a: Code that RCU needs to pay attention to.
124 *
125 * RCU read-side critical sections are forbidden in the inner idle loop,
126 * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
127 * will happily ignore any such read-side critical sections. However,
128 * things like powertop need tracepoints in the inner idle loop.
129 *
130 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
131 * will tell RCU that it needs to pay attention, invoke its argument
132 * (in this example, calling the do_something_with_RCU() function),
133 * and then tell RCU to go back to ignoring this CPU. It is permissible
134 * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is
135 * on the order of a million or so, even on 32-bit systems). It is
136 * not legal to block within RCU_NONIDLE(), nor is it permissible to
137 * transfer control either into or out of RCU_NONIDLE()'s statement.
138 */
139 #define RCU_NONIDLE(a) \
140 do { \
141 rcu_irq_enter_irqson(); \
142 do { a; } while (0); \
143 rcu_irq_exit_irqson(); \
144 } while (0)
145
146 /*
147 * Note a quasi-voluntary context switch for RCU-tasks's benefit.
148 * This is a macro rather than an inline function to avoid #include hell.
149 */
150 #ifdef CONFIG_TASKS_RCU_GENERIC
151
152 # ifdef CONFIG_TASKS_RCU
153 # define rcu_tasks_classic_qs(t, preempt) \
154 do { \
155 if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \
156 WRITE_ONCE((t)->rcu_tasks_holdout, false); \
157 } while (0)
158 void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
159 void synchronize_rcu_tasks(void);
160 # else
161 # define rcu_tasks_classic_qs(t, preempt) do { } while (0)
162 # define call_rcu_tasks call_rcu
163 # define synchronize_rcu_tasks synchronize_rcu
164 # endif
165
166 # ifdef CONFIG_TASKS_TRACE_RCU
167 # define rcu_tasks_trace_qs(t) \
168 do { \
169 if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
170 !unlikely(READ_ONCE((t)->trc_reader_nesting))) { \
171 smp_store_release(&(t)->trc_reader_checked, true); \
172 smp_mb(); /* Readers partitioned by store. */ \
173 } \
174 } while (0)
175 # else
176 # define rcu_tasks_trace_qs(t) do { } while (0)
177 # endif
178
179 #define rcu_tasks_qs(t, preempt) \
180 do { \
181 rcu_tasks_classic_qs((t), (preempt)); \
182 rcu_tasks_trace_qs((t)); \
183 } while (0)
184
185 # ifdef CONFIG_TASKS_RUDE_RCU
186 void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
187 void synchronize_rcu_tasks_rude(void);
188 # endif
189
190 #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
191 void exit_tasks_rcu_start(void);
192 void exit_tasks_rcu_finish(void);
193 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
194 #define rcu_tasks_qs(t, preempt) do { } while (0)
195 #define rcu_note_voluntary_context_switch(t) do { } while (0)
196 #define call_rcu_tasks call_rcu
197 #define synchronize_rcu_tasks synchronize_rcu
exit_tasks_rcu_start(void)198 static inline void exit_tasks_rcu_start(void) { }
exit_tasks_rcu_finish(void)199 static inline void exit_tasks_rcu_finish(void) { }
200 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
201
202 /**
203 * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
204 *
205 * This macro resembles cond_resched(), except that it is defined to
206 * report potential quiescent states to RCU-tasks even if the cond_resched()
207 * machinery were to be shut off, as some advocate for PREEMPTION kernels.
208 */
209 #define cond_resched_tasks_rcu_qs() \
210 do { \
211 rcu_tasks_qs(current, false); \
212 cond_resched(); \
213 } while (0)
214
215 /*
216 * Infrastructure to implement the synchronize_() primitives in
217 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
218 */
219
220 #if defined(CONFIG_TREE_RCU)
221 #include <linux/rcutree.h>
222 #elif defined(CONFIG_TINY_RCU)
223 #include <linux/rcutiny.h>
224 #else
225 #error "Unknown RCU implementation specified to kernel configuration"
226 #endif
227
228 /*
229 * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls
230 * are needed for dynamic initialization and destruction of rcu_head
231 * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for
232 * dynamic initialization and destruction of statically allocated rcu_head
233 * structures. However, rcu_head structures allocated dynamically in the
234 * heap don't need any initialization.
235 */
236 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
237 void init_rcu_head(struct rcu_head *head);
238 void destroy_rcu_head(struct rcu_head *head);
239 void init_rcu_head_on_stack(struct rcu_head *head);
240 void destroy_rcu_head_on_stack(struct rcu_head *head);
241 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
init_rcu_head(struct rcu_head * head)242 static inline void init_rcu_head(struct rcu_head *head) { }
destroy_rcu_head(struct rcu_head * head)243 static inline void destroy_rcu_head(struct rcu_head *head) { }
init_rcu_head_on_stack(struct rcu_head * head)244 static inline void init_rcu_head_on_stack(struct rcu_head *head) { }
destroy_rcu_head_on_stack(struct rcu_head * head)245 static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { }
246 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
247
248 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
249 bool rcu_lockdep_current_cpu_online(void);
250 #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
rcu_lockdep_current_cpu_online(void)251 static inline bool rcu_lockdep_current_cpu_online(void) { return true; }
252 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
253
254 #ifdef CONFIG_DEBUG_LOCK_ALLOC
255
rcu_lock_acquire(struct lockdep_map * map)256 static inline void rcu_lock_acquire(struct lockdep_map *map)
257 {
258 lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
259 }
260
rcu_lock_release(struct lockdep_map * map)261 static inline void rcu_lock_release(struct lockdep_map *map)
262 {
263 lock_release(map, _THIS_IP_);
264 }
265
266 extern struct lockdep_map rcu_lock_map;
267 extern struct lockdep_map rcu_bh_lock_map;
268 extern struct lockdep_map rcu_sched_lock_map;
269 extern struct lockdep_map rcu_callback_map;
270 int debug_lockdep_rcu_enabled(void);
271 int rcu_read_lock_held(void);
272 int rcu_read_lock_bh_held(void);
273 int rcu_read_lock_sched_held(void);
274 int rcu_read_lock_any_held(void);
275
276 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
277
278 # define rcu_lock_acquire(a) do { } while (0)
279 # define rcu_lock_release(a) do { } while (0)
280
rcu_read_lock_held(void)281 static inline int rcu_read_lock_held(void)
282 {
283 return 1;
284 }
285
rcu_read_lock_bh_held(void)286 static inline int rcu_read_lock_bh_held(void)
287 {
288 return 1;
289 }
290
rcu_read_lock_sched_held(void)291 static inline int rcu_read_lock_sched_held(void)
292 {
293 return !preemptible();
294 }
295
rcu_read_lock_any_held(void)296 static inline int rcu_read_lock_any_held(void)
297 {
298 return !preemptible();
299 }
300
301 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
302
303 #ifdef CONFIG_PROVE_RCU
304
305 /**
306 * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
307 * @c: condition to check
308 * @s: informative message
309 */
310 #define RCU_LOCKDEP_WARN(c, s) \
311 do { \
312 static bool __section(".data.unlikely") __warned; \
313 if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \
314 __warned = true; \
315 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
316 } \
317 } while (0)
318
319 #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
rcu_preempt_sleep_check(void)320 static inline void rcu_preempt_sleep_check(void)
321 {
322 RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
323 "Illegal context switch in RCU read-side critical section");
324 }
325 #else /* #ifdef CONFIG_PROVE_RCU */
rcu_preempt_sleep_check(void)326 static inline void rcu_preempt_sleep_check(void) { }
327 #endif /* #else #ifdef CONFIG_PROVE_RCU */
328
329 #define rcu_sleep_check() \
330 do { \
331 rcu_preempt_sleep_check(); \
332 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
333 "Illegal context switch in RCU-bh read-side critical section"); \
334 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
335 "Illegal context switch in RCU-sched read-side critical section"); \
336 } while (0)
337
338 #else /* #ifdef CONFIG_PROVE_RCU */
339
340 #define RCU_LOCKDEP_WARN(c, s) do { } while (0)
341 #define rcu_sleep_check() do { } while (0)
342
343 #endif /* #else #ifdef CONFIG_PROVE_RCU */
344
345 /*
346 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
347 * and rcu_assign_pointer(). Some of these could be folded into their
348 * callers, but they are left separate in order to ease introduction of
349 * multiple pointers markings to match different RCU implementations
350 * (e.g., __srcu), should this make sense in the future.
351 */
352
353 #ifdef __CHECKER__
354 #define rcu_check_sparse(p, space) \
355 ((void)(((typeof(*p) space *)p) == p))
356 #else /* #ifdef __CHECKER__ */
357 #define rcu_check_sparse(p, space)
358 #endif /* #else #ifdef __CHECKER__ */
359
360 #define __rcu_access_pointer(p, space) \
361 ({ \
362 typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
363 rcu_check_sparse(p, space); \
364 ((typeof(*p) __force __kernel *)(_________p1)); \
365 })
366 #define __rcu_dereference_check(p, c, space) \
367 ({ \
368 /* Dependency order vs. p above. */ \
369 typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
370 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
371 rcu_check_sparse(p, space); \
372 ((typeof(*p) __force __kernel *)(________p1)); \
373 })
374 #define __rcu_dereference_protected(p, c, space) \
375 ({ \
376 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
377 rcu_check_sparse(p, space); \
378 ((typeof(*p) __force __kernel *)(p)); \
379 })
380 #define rcu_dereference_raw(p) \
381 ({ \
382 /* Dependency order vs. p above. */ \
383 typeof(p) ________p1 = READ_ONCE(p); \
384 ((typeof(*p) __force __kernel *)(________p1)); \
385 })
386
387 /**
388 * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
389 * @v: The value to statically initialize with.
390 */
391 #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
392
393 /**
394 * rcu_assign_pointer() - assign to RCU-protected pointer
395 * @p: pointer to assign to
396 * @v: value to assign (publish)
397 *
398 * Assigns the specified value to the specified RCU-protected
399 * pointer, ensuring that any concurrent RCU readers will see
400 * any prior initialization.
401 *
402 * Inserts memory barriers on architectures that require them
403 * (which is most of them), and also prevents the compiler from
404 * reordering the code that initializes the structure after the pointer
405 * assignment. More importantly, this call documents which pointers
406 * will be dereferenced by RCU read-side code.
407 *
408 * In some special cases, you may use RCU_INIT_POINTER() instead
409 * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
410 * to the fact that it does not constrain either the CPU or the compiler.
411 * That said, using RCU_INIT_POINTER() when you should have used
412 * rcu_assign_pointer() is a very bad thing that results in
413 * impossible-to-diagnose memory corruption. So please be careful.
414 * See the RCU_INIT_POINTER() comment header for details.
415 *
416 * Note that rcu_assign_pointer() evaluates each of its arguments only
417 * once, appearances notwithstanding. One of the "extra" evaluations
418 * is in typeof() and the other visible only to sparse (__CHECKER__),
419 * neither of which actually execute the argument. As with most cpp
420 * macros, this execute-arguments-only-once property is important, so
421 * please be careful when making changes to rcu_assign_pointer() and the
422 * other macros that it invokes.
423 */
424 #define rcu_assign_pointer(p, v) \
425 do { \
426 uintptr_t _r_a_p__v = (uintptr_t)(v); \
427 rcu_check_sparse(p, __rcu); \
428 \
429 if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
430 WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
431 else \
432 smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
433 } while (0)
434
435 /**
436 * rcu_replace_pointer() - replace an RCU pointer, returning its old value
437 * @rcu_ptr: RCU pointer, whose old value is returned
438 * @ptr: regular pointer
439 * @c: the lockdep conditions under which the dereference will take place
440 *
441 * Perform a replacement, where @rcu_ptr is an RCU-annotated
442 * pointer and @c is the lockdep argument that is passed to the
443 * rcu_dereference_protected() call used to read that pointer. The old
444 * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
445 */
446 #define rcu_replace_pointer(rcu_ptr, ptr, c) \
447 ({ \
448 typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
449 rcu_assign_pointer((rcu_ptr), (ptr)); \
450 __tmp; \
451 })
452
453 /**
454 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
455 * @p: The pointer to read
456 *
457 * Return the value of the specified RCU-protected pointer, but omit the
458 * lockdep checks for being in an RCU read-side critical section. This is
459 * useful when the value of this pointer is accessed, but the pointer is
460 * not dereferenced, for example, when testing an RCU-protected pointer
461 * against NULL. Although rcu_access_pointer() may also be used in cases
462 * where update-side locks prevent the value of the pointer from changing,
463 * you should instead use rcu_dereference_protected() for this use case.
464 *
465 * It is also permissible to use rcu_access_pointer() when read-side
466 * access to the pointer was removed at least one grace period ago, as
467 * is the case in the context of the RCU callback that is freeing up
468 * the data, or after a synchronize_rcu() returns. This can be useful
469 * when tearing down multi-linked structures after a grace period
470 * has elapsed.
471 */
472 #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
473
474 /**
475 * rcu_dereference_check() - rcu_dereference with debug checking
476 * @p: The pointer to read, prior to dereferencing
477 * @c: The conditions under which the dereference will take place
478 *
479 * Do an rcu_dereference(), but check that the conditions under which the
480 * dereference will take place are correct. Typically the conditions
481 * indicate the various locking conditions that should be held at that
482 * point. The check should return true if the conditions are satisfied.
483 * An implicit check for being in an RCU read-side critical section
484 * (rcu_read_lock()) is included.
485 *
486 * For example:
487 *
488 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
489 *
490 * could be used to indicate to lockdep that foo->bar may only be dereferenced
491 * if either rcu_read_lock() is held, or that the lock required to replace
492 * the bar struct at foo->bar is held.
493 *
494 * Note that the list of conditions may also include indications of when a lock
495 * need not be held, for example during initialisation or destruction of the
496 * target struct:
497 *
498 * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
499 * atomic_read(&foo->usage) == 0);
500 *
501 * Inserts memory barriers on architectures that require them
502 * (currently only the Alpha), prevents the compiler from refetching
503 * (and from merging fetches), and, more importantly, documents exactly
504 * which pointers are protected by RCU and checks that the pointer is
505 * annotated as __rcu.
506 */
507 #define rcu_dereference_check(p, c) \
508 __rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
509
510 /**
511 * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
512 * @p: The pointer to read, prior to dereferencing
513 * @c: The conditions under which the dereference will take place
514 *
515 * This is the RCU-bh counterpart to rcu_dereference_check().
516 */
517 #define rcu_dereference_bh_check(p, c) \
518 __rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
519
520 /**
521 * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
522 * @p: The pointer to read, prior to dereferencing
523 * @c: The conditions under which the dereference will take place
524 *
525 * This is the RCU-sched counterpart to rcu_dereference_check().
526 */
527 #define rcu_dereference_sched_check(p, c) \
528 __rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
529 __rcu)
530
531 /*
532 * The tracing infrastructure traces RCU (we want that), but unfortunately
533 * some of the RCU checks causes tracing to lock up the system.
534 *
535 * The no-tracing version of rcu_dereference_raw() must not call
536 * rcu_read_lock_held().
537 */
538 #define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)
539
540 /**
541 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
542 * @p: The pointer to read, prior to dereferencing
543 * @c: The conditions under which the dereference will take place
544 *
545 * Return the value of the specified RCU-protected pointer, but omit
546 * the READ_ONCE(). This is useful in cases where update-side locks
547 * prevent the value of the pointer from changing. Please note that this
548 * primitive does *not* prevent the compiler from repeating this reference
549 * or combining it with other references, so it should not be used without
550 * protection of appropriate locks.
551 *
552 * This function is only for update-side use. Using this function
553 * when protected only by rcu_read_lock() will result in infrequent
554 * but very ugly failures.
555 */
556 #define rcu_dereference_protected(p, c) \
557 __rcu_dereference_protected((p), (c), __rcu)
558
559
560 /**
561 * rcu_dereference() - fetch RCU-protected pointer for dereferencing
562 * @p: The pointer to read, prior to dereferencing
563 *
564 * This is a simple wrapper around rcu_dereference_check().
565 */
566 #define rcu_dereference(p) rcu_dereference_check(p, 0)
567
568 /**
569 * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
570 * @p: The pointer to read, prior to dereferencing
571 *
572 * Makes rcu_dereference_check() do the dirty work.
573 */
574 #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
575
576 /**
577 * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
578 * @p: The pointer to read, prior to dereferencing
579 *
580 * Makes rcu_dereference_check() do the dirty work.
581 */
582 #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
583
584 /**
585 * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
586 * @p: The pointer to hand off
587 *
588 * This is simply an identity function, but it documents where a pointer
589 * is handed off from RCU to some other synchronization mechanism, for
590 * example, reference counting or locking. In C11, it would map to
591 * kill_dependency(). It could be used as follows::
592 *
593 * rcu_read_lock();
594 * p = rcu_dereference(gp);
595 * long_lived = is_long_lived(p);
596 * if (long_lived) {
597 * if (!atomic_inc_not_zero(p->refcnt))
598 * long_lived = false;
599 * else
600 * p = rcu_pointer_handoff(p);
601 * }
602 * rcu_read_unlock();
603 */
604 #define rcu_pointer_handoff(p) (p)
605
606 /**
607 * rcu_read_lock() - mark the beginning of an RCU read-side critical section
608 *
609 * When synchronize_rcu() is invoked on one CPU while other CPUs
610 * are within RCU read-side critical sections, then the
611 * synchronize_rcu() is guaranteed to block until after all the other
612 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
613 * on one CPU while other CPUs are within RCU read-side critical
614 * sections, invocation of the corresponding RCU callback is deferred
615 * until after the all the other CPUs exit their critical sections.
616 *
617 * Note, however, that RCU callbacks are permitted to run concurrently
618 * with new RCU read-side critical sections. One way that this can happen
619 * is via the following sequence of events: (1) CPU 0 enters an RCU
620 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
621 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
622 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
623 * callback is invoked. This is legal, because the RCU read-side critical
624 * section that was running concurrently with the call_rcu() (and which
625 * therefore might be referencing something that the corresponding RCU
626 * callback would free up) has completed before the corresponding
627 * RCU callback is invoked.
628 *
629 * RCU read-side critical sections may be nested. Any deferred actions
630 * will be deferred until the outermost RCU read-side critical section
631 * completes.
632 *
633 * You can avoid reading and understanding the next paragraph by
634 * following this rule: don't put anything in an rcu_read_lock() RCU
635 * read-side critical section that would block in a !PREEMPTION kernel.
636 * But if you want the full story, read on!
637 *
638 * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
639 * it is illegal to block while in an RCU read-side critical section.
640 * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
641 * kernel builds, RCU read-side critical sections may be preempted,
642 * but explicit blocking is illegal. Finally, in preemptible RCU
643 * implementations in real-time (with -rt patchset) kernel builds, RCU
644 * read-side critical sections may be preempted and they may also block, but
645 * only when acquiring spinlocks that are subject to priority inheritance.
646 */
rcu_read_lock(void)647 static __always_inline void rcu_read_lock(void)
648 {
649 __rcu_read_lock();
650 __acquire(RCU);
651 rcu_lock_acquire(&rcu_lock_map);
652 RCU_LOCKDEP_WARN(!rcu_is_watching(),
653 "rcu_read_lock() used illegally while idle");
654 }
655
656 /*
657 * So where is rcu_write_lock()? It does not exist, as there is no
658 * way for writers to lock out RCU readers. This is a feature, not
659 * a bug -- this property is what provides RCU's performance benefits.
660 * Of course, writers must coordinate with each other. The normal
661 * spinlock primitives work well for this, but any other technique may be
662 * used as well. RCU does not care how the writers keep out of each
663 * others' way, as long as they do so.
664 */
665
666 /**
667 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
668 *
669 * In most situations, rcu_read_unlock() is immune from deadlock.
670 * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
671 * is responsible for deboosting, which it does via rt_mutex_unlock().
672 * Unfortunately, this function acquires the scheduler's runqueue and
673 * priority-inheritance spinlocks. This means that deadlock could result
674 * if the caller of rcu_read_unlock() already holds one of these locks or
675 * any lock that is ever acquired while holding them.
676 *
677 * That said, RCU readers are never priority boosted unless they were
678 * preempted. Therefore, one way to avoid deadlock is to make sure
679 * that preemption never happens within any RCU read-side critical
680 * section whose outermost rcu_read_unlock() is called with one of
681 * rt_mutex_unlock()'s locks held. Such preemption can be avoided in
682 * a number of ways, for example, by invoking preempt_disable() before
683 * critical section's outermost rcu_read_lock().
684 *
685 * Given that the set of locks acquired by rt_mutex_unlock() might change
686 * at any time, a somewhat more future-proofed approach is to make sure
687 * that that preemption never happens within any RCU read-side critical
688 * section whose outermost rcu_read_unlock() is called with irqs disabled.
689 * This approach relies on the fact that rt_mutex_unlock() currently only
690 * acquires irq-disabled locks.
691 *
692 * The second of these two approaches is best in most situations,
693 * however, the first approach can also be useful, at least to those
694 * developers willing to keep abreast of the set of locks acquired by
695 * rt_mutex_unlock().
696 *
697 * See rcu_read_lock() for more information.
698 */
rcu_read_unlock(void)699 static inline void rcu_read_unlock(void)
700 {
701 RCU_LOCKDEP_WARN(!rcu_is_watching(),
702 "rcu_read_unlock() used illegally while idle");
703 __release(RCU);
704 __rcu_read_unlock();
705 rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
706 }
707
708 /**
709 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
710 *
711 * This is equivalent of rcu_read_lock(), but also disables softirqs.
712 * Note that anything else that disables softirqs can also serve as
713 * an RCU read-side critical section.
714 *
715 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
716 * must occur in the same context, for example, it is illegal to invoke
717 * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
718 * was invoked from some other task.
719 */
rcu_read_lock_bh(void)720 static inline void rcu_read_lock_bh(void)
721 {
722 local_bh_disable();
723 __acquire(RCU_BH);
724 rcu_lock_acquire(&rcu_bh_lock_map);
725 RCU_LOCKDEP_WARN(!rcu_is_watching(),
726 "rcu_read_lock_bh() used illegally while idle");
727 }
728
729 /**
730 * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section
731 *
732 * See rcu_read_lock_bh() for more information.
733 */
rcu_read_unlock_bh(void)734 static inline void rcu_read_unlock_bh(void)
735 {
736 RCU_LOCKDEP_WARN(!rcu_is_watching(),
737 "rcu_read_unlock_bh() used illegally while idle");
738 rcu_lock_release(&rcu_bh_lock_map);
739 __release(RCU_BH);
740 local_bh_enable();
741 }
742
743 /**
744 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
745 *
746 * This is equivalent of rcu_read_lock(), but disables preemption.
747 * Read-side critical sections can also be introduced by anything else
748 * that disables preemption, including local_irq_disable() and friends.
749 *
750 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
751 * must occur in the same context, for example, it is illegal to invoke
752 * rcu_read_unlock_sched() from process context if the matching
753 * rcu_read_lock_sched() was invoked from an NMI handler.
754 */
rcu_read_lock_sched(void)755 static inline void rcu_read_lock_sched(void)
756 {
757 preempt_disable();
758 __acquire(RCU_SCHED);
759 rcu_lock_acquire(&rcu_sched_lock_map);
760 RCU_LOCKDEP_WARN(!rcu_is_watching(),
761 "rcu_read_lock_sched() used illegally while idle");
762 }
763
764 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
rcu_read_lock_sched_notrace(void)765 static inline notrace void rcu_read_lock_sched_notrace(void)
766 {
767 preempt_disable_notrace();
768 __acquire(RCU_SCHED);
769 }
770
771 /**
772 * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section
773 *
774 * See rcu_read_lock_sched() for more information.
775 */
rcu_read_unlock_sched(void)776 static inline void rcu_read_unlock_sched(void)
777 {
778 RCU_LOCKDEP_WARN(!rcu_is_watching(),
779 "rcu_read_unlock_sched() used illegally while idle");
780 rcu_lock_release(&rcu_sched_lock_map);
781 __release(RCU_SCHED);
782 preempt_enable();
783 }
784
785 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
rcu_read_unlock_sched_notrace(void)786 static inline notrace void rcu_read_unlock_sched_notrace(void)
787 {
788 __release(RCU_SCHED);
789 preempt_enable_notrace();
790 }
791
792 /**
793 * RCU_INIT_POINTER() - initialize an RCU protected pointer
794 * @p: The pointer to be initialized.
795 * @v: The value to initialized the pointer to.
796 *
797 * Initialize an RCU-protected pointer in special cases where readers
798 * do not need ordering constraints on the CPU or the compiler. These
799 * special cases are:
800 *
801 * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
802 * 2. The caller has taken whatever steps are required to prevent
803 * RCU readers from concurrently accessing this pointer *or*
804 * 3. The referenced data structure has already been exposed to
805 * readers either at compile time or via rcu_assign_pointer() *and*
806 *
807 * a. You have not made *any* reader-visible changes to
808 * this structure since then *or*
809 * b. It is OK for readers accessing this structure from its
810 * new location to see the old state of the structure. (For
811 * example, the changes were to statistical counters or to
812 * other state where exact synchronization is not required.)
813 *
814 * Failure to follow these rules governing use of RCU_INIT_POINTER() will
815 * result in impossible-to-diagnose memory corruption. As in the structures
816 * will look OK in crash dumps, but any concurrent RCU readers might
817 * see pre-initialized values of the referenced data structure. So
818 * please be very careful how you use RCU_INIT_POINTER()!!!
819 *
820 * If you are creating an RCU-protected linked structure that is accessed
821 * by a single external-to-structure RCU-protected pointer, then you may
822 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
823 * pointers, but you must use rcu_assign_pointer() to initialize the
824 * external-to-structure pointer *after* you have completely initialized
825 * the reader-accessible portions of the linked structure.
826 *
827 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
828 * ordering guarantees for either the CPU or the compiler.
829 */
830 #define RCU_INIT_POINTER(p, v) \
831 do { \
832 rcu_check_sparse(p, __rcu); \
833 WRITE_ONCE(p, RCU_INITIALIZER(v)); \
834 } while (0)
835
836 /**
837 * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
838 * @p: The pointer to be initialized.
839 * @v: The value to initialized the pointer to.
840 *
841 * GCC-style initialization for an RCU-protected pointer in a structure field.
842 */
843 #define RCU_POINTER_INITIALIZER(p, v) \
844 .p = RCU_INITIALIZER(v)
845
846 /*
847 * Does the specified offset indicate that the corresponding rcu_head
848 * structure can be handled by kvfree_rcu()?
849 */
850 #define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
851
852 /*
853 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
854 */
855 #define __kvfree_rcu(head, offset) \
856 do { \
857 BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \
858 kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
859 } while (0)
860
861 /**
862 * kfree_rcu() - kfree an object after a grace period.
863 * @ptr: pointer to kfree
864 * @rhf: the name of the struct rcu_head within the type of @ptr.
865 *
866 * Many rcu callbacks functions just call kfree() on the base structure.
867 * These functions are trivial, but their size adds up, and furthermore
868 * when they are used in a kernel module, that module must invoke the
869 * high-latency rcu_barrier() function at module-unload time.
870 *
871 * The kfree_rcu() function handles this issue. Rather than encoding a
872 * function address in the embedded rcu_head structure, kfree_rcu() instead
873 * encodes the offset of the rcu_head structure within the base structure.
874 * Because the functions are not allowed in the low-order 4096 bytes of
875 * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
876 * If the offset is larger than 4095 bytes, a compile-time error will
877 * be generated in __kvfree_rcu(). If this error is triggered, you can
878 * either fall back to use of call_rcu() or rearrange the structure to
879 * position the rcu_head structure into the first 4096 bytes.
880 *
881 * Note that the allowable offset might decrease in the future, for example,
882 * to allow something like kmem_cache_free_rcu().
883 *
884 * The BUILD_BUG_ON check must not involve any function calls, hence the
885 * checks are done in macros here.
886 */
887 #define kfree_rcu(ptr, rhf) \
888 do { \
889 typeof (ptr) ___p = (ptr); \
890 \
891 if (___p) \
892 __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
893 } while (0)
894
895 /**
896 * kvfree_rcu() - kvfree an object after a grace period.
897 *
898 * This macro consists of one or two arguments and it is
899 * based on whether an object is head-less or not. If it
900 * has a head then a semantic stays the same as it used
901 * to be before:
902 *
903 * kvfree_rcu(ptr, rhf);
904 *
905 * where @ptr is a pointer to kvfree(), @rhf is the name
906 * of the rcu_head structure within the type of @ptr.
907 *
908 * When it comes to head-less variant, only one argument
909 * is passed and that is just a pointer which has to be
910 * freed after a grace period. Therefore the semantic is
911 *
912 * kvfree_rcu(ptr);
913 *
914 * where @ptr is a pointer to kvfree().
915 *
916 * Please note, head-less way of freeing is permitted to
917 * use from a context that has to follow might_sleep()
918 * annotation. Otherwise, please switch and embed the
919 * rcu_head structure within the type of @ptr.
920 */
921 #define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
922 kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
923
924 #define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
925 #define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf)
926 #define kvfree_rcu_arg_1(ptr) \
927 do { \
928 typeof(ptr) ___p = (ptr); \
929 \
930 if (___p) \
931 kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
932 } while (0)
933
934 /*
935 * Place this after a lock-acquisition primitive to guarantee that
936 * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies
937 * if the UNLOCK and LOCK are executed by the same CPU or if the
938 * UNLOCK and LOCK operate on the same lock variable.
939 */
940 #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE
941 #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
942 #else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
943 #define smp_mb__after_unlock_lock() do { } while (0)
944 #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
945
946
947 /* Has the specified rcu_head structure been handed to call_rcu()? */
948
949 /**
950 * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
951 * @rhp: The rcu_head structure to initialize.
952 *
953 * If you intend to invoke rcu_head_after_call_rcu() to test whether a
954 * given rcu_head structure has already been passed to call_rcu(), then
955 * you must also invoke this rcu_head_init() function on it just after
956 * allocating that structure. Calls to this function must not race with
957 * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
958 */
rcu_head_init(struct rcu_head * rhp)959 static inline void rcu_head_init(struct rcu_head *rhp)
960 {
961 rhp->func = (rcu_callback_t)~0L;
962 }
963
964 /**
965 * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()?
966 * @rhp: The rcu_head structure to test.
967 * @f: The function passed to call_rcu() along with @rhp.
968 *
969 * Returns @true if the @rhp has been passed to call_rcu() with @func,
970 * and @false otherwise. Emits a warning in any other case, including
971 * the case where @rhp has already been invoked after a grace period.
972 * Calls to this function must not race with callback invocation. One way
973 * to avoid such races is to enclose the call to rcu_head_after_call_rcu()
974 * in an RCU read-side critical section that includes a read-side fetch
975 * of the pointer to the structure containing @rhp.
976 */
977 static inline bool
rcu_head_after_call_rcu(struct rcu_head * rhp,rcu_callback_t f)978 rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
979 {
980 rcu_callback_t func = READ_ONCE(rhp->func);
981
982 if (func == f)
983 return true;
984 WARN_ON_ONCE(func != (rcu_callback_t)~0L);
985 return false;
986 }
987
988 /* kernel/ksysfs.c definitions */
989 extern int rcu_expedited;
990 extern int rcu_normal;
991
992 #endif /* __LINUX_RCUPDATE_H */
993