• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Read-Copy Update mechanism for mutual exclusion
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2001
19  *
20  * Author: Dipankar Sarma <dipankar@in.ibm.com>
21  *
22  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
23  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24  * Papers:
25  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27  *
28  * For detailed explanation of Read-Copy Update mechanism see -
29  *		http://lse.sourceforge.net/locking/rcupdate.html
30  *
31  */
32 
33 #ifndef __LINUX_RCUPDATE_H
34 #define __LINUX_RCUPDATE_H
35 
36 #include <linux/types.h>
37 #include <linux/cache.h>
38 #include <linux/spinlock.h>
39 #include <linux/threads.h>
40 #include <linux/cpumask.h>
41 #include <linux/seqlock.h>
42 #include <linux/lockdep.h>
43 #include <linux/completion.h>
44 #include <linux/debugobjects.h>
45 #include <linux/bug.h>
46 #include <linux/compiler.h>
47 #include <asm/barrier.h>
48 
49 extern int rcu_expedited; /* for sysctl */
50 
51 enum rcutorture_type {
52 	RCU_FLAVOR,
53 	RCU_BH_FLAVOR,
54 	RCU_SCHED_FLAVOR,
55 	RCU_TASKS_FLAVOR,
56 	SRCU_FLAVOR,
57 	INVALID_RCU_FLAVOR
58 };
59 
60 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
61 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
62 			    unsigned long *gpnum, unsigned long *completed);
63 void rcutorture_record_test_transition(void);
64 void rcutorture_record_progress(unsigned long vernum);
65 void do_trace_rcu_torture_read(const char *rcutorturename,
66 			       struct rcu_head *rhp,
67 			       unsigned long secs,
68 			       unsigned long c_old,
69 			       unsigned long c);
70 #else
rcutorture_get_gp_data(enum rcutorture_type test_type,int * flags,unsigned long * gpnum,unsigned long * completed)71 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
72 					  int *flags,
73 					  unsigned long *gpnum,
74 					  unsigned long *completed)
75 {
76 	*flags = 0;
77 	*gpnum = 0;
78 	*completed = 0;
79 }
rcutorture_record_test_transition(void)80 static inline void rcutorture_record_test_transition(void)
81 {
82 }
rcutorture_record_progress(unsigned long vernum)83 static inline void rcutorture_record_progress(unsigned long vernum)
84 {
85 }
86 #ifdef CONFIG_RCU_TRACE
87 void do_trace_rcu_torture_read(const char *rcutorturename,
88 			       struct rcu_head *rhp,
89 			       unsigned long secs,
90 			       unsigned long c_old,
91 			       unsigned long c);
92 #else
93 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
94 	do { } while (0)
95 #endif
96 #endif
97 
98 #define UINT_CMP_GE(a, b)	(UINT_MAX / 2 >= (a) - (b))
99 #define UINT_CMP_LT(a, b)	(UINT_MAX / 2 < (a) - (b))
100 #define ULONG_CMP_GE(a, b)	(ULONG_MAX / 2 >= (a) - (b))
101 #define ULONG_CMP_LT(a, b)	(ULONG_MAX / 2 < (a) - (b))
102 #define ulong2long(a)		(*(long *)(&(a)))
103 
104 /* Exported common interfaces */
105 
106 #ifdef CONFIG_PREEMPT_RCU
107 
108 /**
109  * call_rcu() - Queue an RCU callback for invocation after a grace period.
110  * @head: structure to be used for queueing the RCU updates.
111  * @func: actual callback function to be invoked after the grace period
112  *
113  * The callback function will be invoked some time after a full grace
114  * period elapses, in other words after all pre-existing RCU read-side
115  * critical sections have completed.  However, the callback function
116  * might well execute concurrently with RCU read-side critical sections
117  * that started after call_rcu() was invoked.  RCU read-side critical
118  * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
119  * and may be nested.
120  *
121  * Note that all CPUs must agree that the grace period extended beyond
122  * all pre-existing RCU read-side critical section.  On systems with more
123  * than one CPU, this means that when "func()" is invoked, each CPU is
124  * guaranteed to have executed a full memory barrier since the end of its
125  * last RCU read-side critical section whose beginning preceded the call
126  * to call_rcu().  It also means that each CPU executing an RCU read-side
127  * critical section that continues beyond the start of "func()" must have
128  * executed a memory barrier after the call_rcu() but before the beginning
129  * of that RCU read-side critical section.  Note that these guarantees
130  * include CPUs that are offline, idle, or executing in user mode, as
131  * well as CPUs that are executing in the kernel.
132  *
133  * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
134  * resulting RCU callback function "func()", then both CPU A and CPU B are
135  * guaranteed to execute a full memory barrier during the time interval
136  * between the call to call_rcu() and the invocation of "func()" -- even
137  * if CPU A and CPU B are the same CPU (but again only if the system has
138  * more than one CPU).
139  */
140 void call_rcu(struct rcu_head *head,
141 	      void (*func)(struct rcu_head *head));
142 
143 #else /* #ifdef CONFIG_PREEMPT_RCU */
144 
145 /* In classic RCU, call_rcu() is just call_rcu_sched(). */
146 #define	call_rcu	call_rcu_sched
147 
148 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
149 
150 /**
151  * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
152  * @head: structure to be used for queueing the RCU updates.
153  * @func: actual callback function to be invoked after the grace period
154  *
155  * The callback function will be invoked some time after a full grace
156  * period elapses, in other words after all currently executing RCU
157  * read-side critical sections have completed. call_rcu_bh() assumes
158  * that the read-side critical sections end on completion of a softirq
159  * handler. This means that read-side critical sections in process
160  * context must not be interrupted by softirqs. This interface is to be
161  * used when most of the read-side critical sections are in softirq context.
162  * RCU read-side critical sections are delimited by :
163  *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
164  *  OR
165  *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
166  *  These may be nested.
167  *
168  * See the description of call_rcu() for more detailed information on
169  * memory ordering guarantees.
170  */
171 void call_rcu_bh(struct rcu_head *head,
172 		 void (*func)(struct rcu_head *head));
173 
174 /**
175  * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
176  * @head: structure to be used for queueing the RCU updates.
177  * @func: actual callback function to be invoked after the grace period
178  *
179  * The callback function will be invoked some time after a full grace
180  * period elapses, in other words after all currently executing RCU
181  * read-side critical sections have completed. call_rcu_sched() assumes
182  * that the read-side critical sections end on enabling of preemption
183  * or on voluntary preemption.
184  * RCU read-side critical sections are delimited by :
185  *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
186  *  OR
187  *  anything that disables preemption.
188  *  These may be nested.
189  *
190  * See the description of call_rcu() for more detailed information on
191  * memory ordering guarantees.
192  */
193 void call_rcu_sched(struct rcu_head *head,
194 		    void (*func)(struct rcu_head *rcu));
195 
196 void synchronize_sched(void);
197 
198 /**
199  * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
200  * @head: structure to be used for queueing the RCU updates.
201  * @func: actual callback function to be invoked after the grace period
202  *
203  * The callback function will be invoked some time after a full grace
204  * period elapses, in other words after all currently executing RCU
205  * read-side critical sections have completed. call_rcu_tasks() assumes
206  * that the read-side critical sections end at a voluntary context
207  * switch (not a preemption!), entry into idle, or transition to usermode
208  * execution.  As such, there are no read-side primitives analogous to
209  * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
210  * to determine that all tasks have passed through a safe state, not so
211  * much for data-strcuture synchronization.
212  *
213  * See the description of call_rcu() for more detailed information on
214  * memory ordering guarantees.
215  */
216 void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
217 void synchronize_rcu_tasks(void);
218 void rcu_barrier_tasks(void);
219 
220 #ifdef CONFIG_PREEMPT_RCU
221 
222 void __rcu_read_lock(void);
223 void __rcu_read_unlock(void);
224 void rcu_read_unlock_special(struct task_struct *t);
225 void synchronize_rcu(void);
226 
227 /*
228  * Defined as a macro as it is a very low level header included from
229  * areas that don't even know about current.  This gives the rcu_read_lock()
230  * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
231  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
232  */
233 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
234 
235 #else /* #ifdef CONFIG_PREEMPT_RCU */
236 
__rcu_read_lock(void)237 static inline void __rcu_read_lock(void)
238 {
239 	preempt_disable();
240 }
241 
__rcu_read_unlock(void)242 static inline void __rcu_read_unlock(void)
243 {
244 	preempt_enable();
245 }
246 
synchronize_rcu(void)247 static inline void synchronize_rcu(void)
248 {
249 	synchronize_sched();
250 }
251 
rcu_preempt_depth(void)252 static inline int rcu_preempt_depth(void)
253 {
254 	return 0;
255 }
256 
257 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
258 
259 /* Internal to kernel */
260 void rcu_init(void);
261 void rcu_sched_qs(void);
262 void rcu_bh_qs(void);
263 void rcu_check_callbacks(int cpu, int user);
264 struct notifier_block;
265 void rcu_idle_enter(void);
266 void rcu_idle_exit(void);
267 void rcu_irq_enter(void);
268 void rcu_irq_exit(void);
269 
270 #ifdef CONFIG_RCU_STALL_COMMON
271 void rcu_sysrq_start(void);
272 void rcu_sysrq_end(void);
273 #else /* #ifdef CONFIG_RCU_STALL_COMMON */
rcu_sysrq_start(void)274 static inline void rcu_sysrq_start(void)
275 {
276 }
rcu_sysrq_end(void)277 static inline void rcu_sysrq_end(void)
278 {
279 }
280 #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
281 
282 #ifdef CONFIG_RCU_USER_QS
283 void rcu_user_enter(void);
284 void rcu_user_exit(void);
285 #else
rcu_user_enter(void)286 static inline void rcu_user_enter(void) { }
rcu_user_exit(void)287 static inline void rcu_user_exit(void) { }
rcu_user_hooks_switch(struct task_struct * prev,struct task_struct * next)288 static inline void rcu_user_hooks_switch(struct task_struct *prev,
289 					 struct task_struct *next) { }
290 #endif /* CONFIG_RCU_USER_QS */
291 
292 #ifdef CONFIG_RCU_NOCB_CPU
293 void rcu_init_nohz(void);
294 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
rcu_init_nohz(void)295 static inline void rcu_init_nohz(void)
296 {
297 }
298 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
299 
300 /**
301  * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
302  * @a: Code that RCU needs to pay attention to.
303  *
304  * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
305  * in the inner idle loop, that is, between the rcu_idle_enter() and
306  * the rcu_idle_exit() -- RCU will happily ignore any such read-side
307  * critical sections.  However, things like powertop need tracepoints
308  * in the inner idle loop.
309  *
310  * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU())
311  * will tell RCU that it needs to pay attending, invoke its argument
312  * (in this example, a call to the do_something_with_RCU() function),
313  * and then tell RCU to go back to ignoring this CPU.  It is permissible
314  * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
315  * quite limited.  If deeper nesting is required, it will be necessary
316  * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
317  */
318 #define RCU_NONIDLE(a) \
319 	do { \
320 		rcu_irq_enter(); \
321 		do { a; } while (0); \
322 		rcu_irq_exit(); \
323 	} while (0)
324 
325 /*
326  * Note a voluntary context switch for RCU-tasks benefit.  This is a
327  * macro rather than an inline function to avoid #include hell.
328  */
329 #ifdef CONFIG_TASKS_RCU
330 #define TASKS_RCU(x) x
331 extern struct srcu_struct tasks_rcu_exit_srcu;
332 #define rcu_note_voluntary_context_switch(t) \
333 	do { \
334 		if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
335 			ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
336 	} while (0)
337 #else /* #ifdef CONFIG_TASKS_RCU */
338 #define TASKS_RCU(x) do { } while (0)
339 #define rcu_note_voluntary_context_switch(t)	do { } while (0)
340 #endif /* #else #ifdef CONFIG_TASKS_RCU */
341 
342 /**
343  * cond_resched_rcu_qs - Report potential quiescent states to RCU
344  *
345  * This macro resembles cond_resched(), except that it is defined to
346  * report potential quiescent states to RCU-tasks even if the cond_resched()
347  * machinery were to be shut off, as some advocate for PREEMPT kernels.
348  */
349 #define cond_resched_rcu_qs() \
350 do { \
351 	rcu_note_voluntary_context_switch(current); \
352 	cond_resched(); \
353 } while (0)
354 
355 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
356 bool __rcu_is_watching(void);
357 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
358 
359 /*
360  * Infrastructure to implement the synchronize_() primitives in
361  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
362  */
363 
364 typedef void call_rcu_func_t(struct rcu_head *head,
365 			     void (*func)(struct rcu_head *head));
366 void wait_rcu_gp(call_rcu_func_t crf);
367 
368 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
369 #include <linux/rcutree.h>
370 #elif defined(CONFIG_TINY_RCU)
371 #include <linux/rcutiny.h>
372 #else
373 #error "Unknown RCU implementation specified to kernel configuration"
374 #endif
375 
376 /*
377  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
378  * initialization and destruction of rcu_head on the stack. rcu_head structures
379  * allocated dynamically in the heap or defined statically don't need any
380  * initialization.
381  */
382 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
383 void init_rcu_head(struct rcu_head *head);
384 void destroy_rcu_head(struct rcu_head *head);
385 void init_rcu_head_on_stack(struct rcu_head *head);
386 void destroy_rcu_head_on_stack(struct rcu_head *head);
387 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
init_rcu_head(struct rcu_head * head)388 static inline void init_rcu_head(struct rcu_head *head)
389 {
390 }
391 
destroy_rcu_head(struct rcu_head * head)392 static inline void destroy_rcu_head(struct rcu_head *head)
393 {
394 }
395 
init_rcu_head_on_stack(struct rcu_head * head)396 static inline void init_rcu_head_on_stack(struct rcu_head *head)
397 {
398 }
399 
destroy_rcu_head_on_stack(struct rcu_head * head)400 static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
401 {
402 }
403 #endif	/* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
404 
405 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
406 bool rcu_lockdep_current_cpu_online(void);
407 #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
rcu_lockdep_current_cpu_online(void)408 static inline bool rcu_lockdep_current_cpu_online(void)
409 {
410 	return true;
411 }
412 #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
413 
414 #ifdef CONFIG_DEBUG_LOCK_ALLOC
415 
rcu_lock_acquire(struct lockdep_map * map)416 static inline void rcu_lock_acquire(struct lockdep_map *map)
417 {
418 	lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
419 }
420 
rcu_lock_release(struct lockdep_map * map)421 static inline void rcu_lock_release(struct lockdep_map *map)
422 {
423 	lock_release(map, 1, _THIS_IP_);
424 }
425 
426 extern struct lockdep_map rcu_lock_map;
427 extern struct lockdep_map rcu_bh_lock_map;
428 extern struct lockdep_map rcu_sched_lock_map;
429 extern struct lockdep_map rcu_callback_map;
430 int debug_lockdep_rcu_enabled(void);
431 
432 int rcu_read_lock_held(void);
433 int rcu_read_lock_bh_held(void);
434 
435 /**
436  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
437  *
438  * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
439  * RCU-sched read-side critical section.  In absence of
440  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
441  * critical section unless it can prove otherwise.  Note that disabling
442  * of preemption (including disabling irqs) counts as an RCU-sched
443  * read-side critical section.  This is useful for debug checks in functions
444  * that required that they be called within an RCU-sched read-side
445  * critical section.
446  *
447  * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
448  * and while lockdep is disabled.
449  *
450  * Note that if the CPU is in the idle loop from an RCU point of
451  * view (ie: that we are in the section between rcu_idle_enter() and
452  * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
453  * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
454  * that are in such a section, considering these as in extended quiescent
455  * state, so such a CPU is effectively never in an RCU read-side critical
456  * section regardless of what RCU primitives it invokes.  This state of
457  * affairs is required --- we need to keep an RCU-free window in idle
458  * where the CPU may possibly enter into low power mode. This way we can
459  * notice an extended quiescent state to other CPUs that started a grace
460  * period. Otherwise we would delay any grace period as long as we run in
461  * the idle task.
462  *
463  * Similarly, we avoid claiming an SRCU read lock held if the current
464  * CPU is offline.
465  */
466 #ifdef CONFIG_PREEMPT_COUNT
rcu_read_lock_sched_held(void)467 static inline int rcu_read_lock_sched_held(void)
468 {
469 	int lockdep_opinion = 0;
470 
471 	if (!debug_lockdep_rcu_enabled())
472 		return 1;
473 	if (!rcu_is_watching())
474 		return 0;
475 	if (!rcu_lockdep_current_cpu_online())
476 		return 0;
477 	if (debug_locks)
478 		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
479 	return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
480 }
481 #else /* #ifdef CONFIG_PREEMPT_COUNT */
rcu_read_lock_sched_held(void)482 static inline int rcu_read_lock_sched_held(void)
483 {
484 	return 1;
485 }
486 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
487 
488 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
489 
490 # define rcu_lock_acquire(a)		do { } while (0)
491 # define rcu_lock_release(a)		do { } while (0)
492 
rcu_read_lock_held(void)493 static inline int rcu_read_lock_held(void)
494 {
495 	return 1;
496 }
497 
rcu_read_lock_bh_held(void)498 static inline int rcu_read_lock_bh_held(void)
499 {
500 	return 1;
501 }
502 
503 #ifdef CONFIG_PREEMPT_COUNT
rcu_read_lock_sched_held(void)504 static inline int rcu_read_lock_sched_held(void)
505 {
506 	return preempt_count() != 0 || irqs_disabled();
507 }
508 #else /* #ifdef CONFIG_PREEMPT_COUNT */
rcu_read_lock_sched_held(void)509 static inline int rcu_read_lock_sched_held(void)
510 {
511 	return 1;
512 }
513 #endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
514 
515 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
516 
517 #ifdef CONFIG_PROVE_RCU
518 
519 /**
520  * rcu_lockdep_assert - emit lockdep splat if specified condition not met
521  * @c: condition to check
522  * @s: informative message
523  */
524 #define rcu_lockdep_assert(c, s)					\
525 	do {								\
526 		static bool __section(.data.unlikely) __warned;		\
527 		if (debug_lockdep_rcu_enabled() && !__warned && !(c)) {	\
528 			__warned = true;				\
529 			lockdep_rcu_suspicious(__FILE__, __LINE__, s);	\
530 		}							\
531 	} while (0)
532 
533 #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
rcu_preempt_sleep_check(void)534 static inline void rcu_preempt_sleep_check(void)
535 {
536 	rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
537 			   "Illegal context switch in RCU read-side critical section");
538 }
539 #else /* #ifdef CONFIG_PROVE_RCU */
rcu_preempt_sleep_check(void)540 static inline void rcu_preempt_sleep_check(void)
541 {
542 }
543 #endif /* #else #ifdef CONFIG_PROVE_RCU */
544 
545 #define rcu_sleep_check()						\
546 	do {								\
547 		rcu_preempt_sleep_check();				\
548 		rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),	\
549 				   "Illegal context switch in RCU-bh read-side critical section"); \
550 		rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),	\
551 				   "Illegal context switch in RCU-sched read-side critical section"); \
552 	} while (0)
553 
554 #else /* #ifdef CONFIG_PROVE_RCU */
555 
556 #define rcu_lockdep_assert(c, s) do { } while (0)
557 #define rcu_sleep_check() do { } while (0)
558 
559 #endif /* #else #ifdef CONFIG_PROVE_RCU */
560 
561 /*
562  * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
563  * and rcu_assign_pointer().  Some of these could be folded into their
564  * callers, but they are left separate in order to ease introduction of
565  * multiple flavors of pointers to match the multiple flavors of RCU
566  * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
567  * the future.
568  */
569 
570 #ifdef __CHECKER__
571 #define rcu_dereference_sparse(p, space) \
572 	((void)(((typeof(*p) space *)p) == p))
573 #else /* #ifdef __CHECKER__ */
574 #define rcu_dereference_sparse(p, space)
575 #endif /* #else #ifdef __CHECKER__ */
576 
577 #define __rcu_access_pointer(p, space) \
578 ({ \
579 	typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
580 	rcu_dereference_sparse(p, space); \
581 	((typeof(*p) __force __kernel *)(_________p1)); \
582 })
583 #define __rcu_dereference_check(p, c, space) \
584 ({ \
585 	typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
586 	rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
587 	rcu_dereference_sparse(p, space); \
588 	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
589 	((typeof(*p) __force __kernel *)(_________p1)); \
590 })
591 #define __rcu_dereference_protected(p, c, space) \
592 ({ \
593 	rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \
594 	rcu_dereference_sparse(p, space); \
595 	((typeof(*p) __force __kernel *)(p)); \
596 })
597 
598 #define __rcu_access_index(p, space) \
599 ({ \
600 	typeof(p) _________p1 = ACCESS_ONCE(p); \
601 	rcu_dereference_sparse(p, space); \
602 	(_________p1); \
603 })
604 #define __rcu_dereference_index_check(p, c) \
605 ({ \
606 	typeof(p) _________p1 = ACCESS_ONCE(p); \
607 	rcu_lockdep_assert(c, \
608 			   "suspicious rcu_dereference_index_check() usage"); \
609 	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
610 	(_________p1); \
611 })
612 
613 /**
614  * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
615  * @v: The value to statically initialize with.
616  */
617 #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
618 
619 /**
620  * rcu_assign_pointer() - assign to RCU-protected pointer
621  * @p: pointer to assign to
622  * @v: value to assign (publish)
623  *
624  * Assigns the specified value to the specified RCU-protected
625  * pointer, ensuring that any concurrent RCU readers will see
626  * any prior initialization.
627  *
628  * Inserts memory barriers on architectures that require them
629  * (which is most of them), and also prevents the compiler from
630  * reordering the code that initializes the structure after the pointer
631  * assignment.  More importantly, this call documents which pointers
632  * will be dereferenced by RCU read-side code.
633  *
634  * In some special cases, you may use RCU_INIT_POINTER() instead
635  * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
636  * to the fact that it does not constrain either the CPU or the compiler.
637  * That said, using RCU_INIT_POINTER() when you should have used
638  * rcu_assign_pointer() is a very bad thing that results in
639  * impossible-to-diagnose memory corruption.  So please be careful.
640  * See the RCU_INIT_POINTER() comment header for details.
641  *
642  * Note that rcu_assign_pointer() evaluates each of its arguments only
643  * once, appearances notwithstanding.  One of the "extra" evaluations
644  * is in typeof() and the other visible only to sparse (__CHECKER__),
645  * neither of which actually execute the argument.  As with most cpp
646  * macros, this execute-arguments-only-once property is important, so
647  * please be careful when making changes to rcu_assign_pointer() and the
648  * other macros that it invokes.
649  */
650 #define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v))
651 
652 /**
653  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
654  * @p: The pointer to read
655  *
656  * Return the value of the specified RCU-protected pointer, but omit the
657  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
658  * when the value of this pointer is accessed, but the pointer is not
659  * dereferenced, for example, when testing an RCU-protected pointer against
660  * NULL.  Although rcu_access_pointer() may also be used in cases where
661  * update-side locks prevent the value of the pointer from changing, you
662  * should instead use rcu_dereference_protected() for this use case.
663  *
664  * It is also permissible to use rcu_access_pointer() when read-side
665  * access to the pointer was removed at least one grace period ago, as
666  * is the case in the context of the RCU callback that is freeing up
667  * the data, or after a synchronize_rcu() returns.  This can be useful
668  * when tearing down multi-linked structures after a grace period
669  * has elapsed.
670  */
671 #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
672 
673 /**
674  * rcu_dereference_check() - rcu_dereference with debug checking
675  * @p: The pointer to read, prior to dereferencing
676  * @c: The conditions under which the dereference will take place
677  *
678  * Do an rcu_dereference(), but check that the conditions under which the
679  * dereference will take place are correct.  Typically the conditions
680  * indicate the various locking conditions that should be held at that
681  * point.  The check should return true if the conditions are satisfied.
682  * An implicit check for being in an RCU read-side critical section
683  * (rcu_read_lock()) is included.
684  *
685  * For example:
686  *
687  *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
688  *
689  * could be used to indicate to lockdep that foo->bar may only be dereferenced
690  * if either rcu_read_lock() is held, or that the lock required to replace
691  * the bar struct at foo->bar is held.
692  *
693  * Note that the list of conditions may also include indications of when a lock
694  * need not be held, for example during initialisation or destruction of the
695  * target struct:
696  *
697  *	bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
698  *					      atomic_read(&foo->usage) == 0);
699  *
700  * Inserts memory barriers on architectures that require them
701  * (currently only the Alpha), prevents the compiler from refetching
702  * (and from merging fetches), and, more importantly, documents exactly
703  * which pointers are protected by RCU and checks that the pointer is
704  * annotated as __rcu.
705  */
706 #define rcu_dereference_check(p, c) \
707 	__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
708 
709 /**
710  * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
711  * @p: The pointer to read, prior to dereferencing
712  * @c: The conditions under which the dereference will take place
713  *
714  * This is the RCU-bh counterpart to rcu_dereference_check().
715  */
716 #define rcu_dereference_bh_check(p, c) \
717 	__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
718 
719 /**
720  * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
721  * @p: The pointer to read, prior to dereferencing
722  * @c: The conditions under which the dereference will take place
723  *
724  * This is the RCU-sched counterpart to rcu_dereference_check().
725  */
726 #define rcu_dereference_sched_check(p, c) \
727 	__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
728 				__rcu)
729 
730 #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
731 
732 /*
733  * The tracing infrastructure traces RCU (we want that), but unfortunately
734  * some of the RCU checks causes tracing to lock up the system.
735  *
736  * The tracing version of rcu_dereference_raw() must not call
737  * rcu_read_lock_held().
738  */
739 #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
740 
741 /**
742  * rcu_access_index() - fetch RCU index with no dereferencing
743  * @p: The index to read
744  *
745  * Return the value of the specified RCU-protected index, but omit the
746  * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
747  * when the value of this index is accessed, but the index is not
748  * dereferenced, for example, when testing an RCU-protected index against
749  * -1.  Although rcu_access_index() may also be used in cases where
750  * update-side locks prevent the value of the index from changing, you
751  * should instead use rcu_dereference_index_protected() for this use case.
752  */
753 #define rcu_access_index(p) __rcu_access_index((p), __rcu)
754 
755 /**
756  * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
757  * @p: The pointer to read, prior to dereferencing
758  * @c: The conditions under which the dereference will take place
759  *
760  * Similar to rcu_dereference_check(), but omits the sparse checking.
761  * This allows rcu_dereference_index_check() to be used on integers,
762  * which can then be used as array indices.  Attempting to use
763  * rcu_dereference_check() on an integer will give compiler warnings
764  * because the sparse address-space mechanism relies on dereferencing
765  * the RCU-protected pointer.  Dereferencing integers is not something
766  * that even gcc will put up with.
767  *
768  * Note that this function does not implicitly check for RCU read-side
769  * critical sections.  If this function gains lots of uses, it might
770  * make sense to provide versions for each flavor of RCU, but it does
771  * not make sense as of early 2010.
772  */
773 #define rcu_dereference_index_check(p, c) \
774 	__rcu_dereference_index_check((p), (c))
775 
776 /**
777  * rcu_dereference_protected() - fetch RCU pointer when updates prevented
778  * @p: The pointer to read, prior to dereferencing
779  * @c: The conditions under which the dereference will take place
780  *
781  * Return the value of the specified RCU-protected pointer, but omit
782  * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
783  * is useful in cases where update-side locks prevent the value of the
784  * pointer from changing.  Please note that this primitive does -not-
785  * prevent the compiler from repeating this reference or combining it
786  * with other references, so it should not be used without protection
787  * of appropriate locks.
788  *
789  * This function is only for update-side use.  Using this function
790  * when protected only by rcu_read_lock() will result in infrequent
791  * but very ugly failures.
792  */
793 #define rcu_dereference_protected(p, c) \
794 	__rcu_dereference_protected((p), (c), __rcu)
795 
796 
797 /**
798  * rcu_dereference() - fetch RCU-protected pointer for dereferencing
799  * @p: The pointer to read, prior to dereferencing
800  *
801  * This is a simple wrapper around rcu_dereference_check().
802  */
803 #define rcu_dereference(p) rcu_dereference_check(p, 0)
804 
805 /**
806  * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
807  * @p: The pointer to read, prior to dereferencing
808  *
809  * Makes rcu_dereference_check() do the dirty work.
810  */
811 #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
812 
813 /**
814  * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
815  * @p: The pointer to read, prior to dereferencing
816  *
817  * Makes rcu_dereference_check() do the dirty work.
818  */
819 #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
820 
821 /**
822  * rcu_read_lock() - mark the beginning of an RCU read-side critical section
823  *
824  * When synchronize_rcu() is invoked on one CPU while other CPUs
825  * are within RCU read-side critical sections, then the
826  * synchronize_rcu() is guaranteed to block until after all the other
827  * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
828  * on one CPU while other CPUs are within RCU read-side critical
829  * sections, invocation of the corresponding RCU callback is deferred
830  * until after the all the other CPUs exit their critical sections.
831  *
832  * Note, however, that RCU callbacks are permitted to run concurrently
833  * with new RCU read-side critical sections.  One way that this can happen
834  * is via the following sequence of events: (1) CPU 0 enters an RCU
835  * read-side critical section, (2) CPU 1 invokes call_rcu() to register
836  * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
837  * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
838  * callback is invoked.  This is legal, because the RCU read-side critical
839  * section that was running concurrently with the call_rcu() (and which
840  * therefore might be referencing something that the corresponding RCU
841  * callback would free up) has completed before the corresponding
842  * RCU callback is invoked.
843  *
844  * RCU read-side critical sections may be nested.  Any deferred actions
845  * will be deferred until the outermost RCU read-side critical section
846  * completes.
847  *
848  * You can avoid reading and understanding the next paragraph by
849  * following this rule: don't put anything in an rcu_read_lock() RCU
850  * read-side critical section that would block in a !PREEMPT kernel.
851  * But if you want the full story, read on!
852  *
853  * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
854  * it is illegal to block while in an RCU read-side critical section.
855  * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT
856  * kernel builds, RCU read-side critical sections may be preempted,
857  * but explicit blocking is illegal.  Finally, in preemptible RCU
858  * implementations in real-time (with -rt patchset) kernel builds, RCU
859  * read-side critical sections may be preempted and they may also block, but
860  * only when acquiring spinlocks that are subject to priority inheritance.
861  */
rcu_read_lock(void)862 static inline void rcu_read_lock(void)
863 {
864 	__rcu_read_lock();
865 	__acquire(RCU);
866 	rcu_lock_acquire(&rcu_lock_map);
867 	rcu_lockdep_assert(rcu_is_watching(),
868 			   "rcu_read_lock() used illegally while idle");
869 }
870 
871 /*
872  * So where is rcu_write_lock()?  It does not exist, as there is no
873  * way for writers to lock out RCU readers.  This is a feature, not
874  * a bug -- this property is what provides RCU's performance benefits.
875  * Of course, writers must coordinate with each other.  The normal
876  * spinlock primitives work well for this, but any other technique may be
877  * used as well.  RCU does not care how the writers keep out of each
878  * others' way, as long as they do so.
879  */
880 
881 /**
882  * rcu_read_unlock() - marks the end of an RCU read-side critical section.
883  *
884  * In most situations, rcu_read_unlock() is immune from deadlock.
885  * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
886  * is responsible for deboosting, which it does via rt_mutex_unlock().
887  * Unfortunately, this function acquires the scheduler's runqueue and
888  * priority-inheritance spinlocks.  This means that deadlock could result
889  * if the caller of rcu_read_unlock() already holds one of these locks or
890  * any lock that is ever acquired while holding them.
891  *
892  * That said, RCU readers are never priority boosted unless they were
893  * preempted.  Therefore, one way to avoid deadlock is to make sure
894  * that preemption never happens within any RCU read-side critical
895  * section whose outermost rcu_read_unlock() is called with one of
896  * rt_mutex_unlock()'s locks held.  Such preemption can be avoided in
897  * a number of ways, for example, by invoking preempt_disable() before
898  * critical section's outermost rcu_read_lock().
899  *
900  * Given that the set of locks acquired by rt_mutex_unlock() might change
901  * at any time, a somewhat more future-proofed approach is to make sure
902  * that that preemption never happens within any RCU read-side critical
903  * section whose outermost rcu_read_unlock() is called with irqs disabled.
904  * This approach relies on the fact that rt_mutex_unlock() currently only
905  * acquires irq-disabled locks.
906  *
907  * The second of these two approaches is best in most situations,
908  * however, the first approach can also be useful, at least to those
909  * developers willing to keep abreast of the set of locks acquired by
910  * rt_mutex_unlock().
911  *
912  * See rcu_read_lock() for more information.
913  */
rcu_read_unlock(void)914 static inline void rcu_read_unlock(void)
915 {
916 	rcu_lockdep_assert(rcu_is_watching(),
917 			   "rcu_read_unlock() used illegally while idle");
918 	rcu_lock_release(&rcu_lock_map);
919 	__release(RCU);
920 	__rcu_read_unlock();
921 }
922 
923 /**
924  * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
925  *
926  * This is equivalent of rcu_read_lock(), but to be used when updates
927  * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
928  * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
929  * softirq handler to be a quiescent state, a process in RCU read-side
930  * critical section must be protected by disabling softirqs. Read-side
931  * critical sections in interrupt context can use just rcu_read_lock(),
932  * though this should at least be commented to avoid confusing people
933  * reading the code.
934  *
935  * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
936  * must occur in the same context, for example, it is illegal to invoke
937  * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
938  * was invoked from some other task.
939  */
rcu_read_lock_bh(void)940 static inline void rcu_read_lock_bh(void)
941 {
942 	local_bh_disable();
943 	__acquire(RCU_BH);
944 	rcu_lock_acquire(&rcu_bh_lock_map);
945 	rcu_lockdep_assert(rcu_is_watching(),
946 			   "rcu_read_lock_bh() used illegally while idle");
947 }
948 
949 /*
950  * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
951  *
952  * See rcu_read_lock_bh() for more information.
953  */
rcu_read_unlock_bh(void)954 static inline void rcu_read_unlock_bh(void)
955 {
956 	rcu_lockdep_assert(rcu_is_watching(),
957 			   "rcu_read_unlock_bh() used illegally while idle");
958 	rcu_lock_release(&rcu_bh_lock_map);
959 	__release(RCU_BH);
960 	local_bh_enable();
961 }
962 
963 /**
964  * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
965  *
966  * This is equivalent of rcu_read_lock(), but to be used when updates
967  * are being done using call_rcu_sched() or synchronize_rcu_sched().
968  * Read-side critical sections can also be introduced by anything that
969  * disables preemption, including local_irq_disable() and friends.
970  *
971  * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
972  * must occur in the same context, for example, it is illegal to invoke
973  * rcu_read_unlock_sched() from process context if the matching
974  * rcu_read_lock_sched() was invoked from an NMI handler.
975  */
rcu_read_lock_sched(void)976 static inline void rcu_read_lock_sched(void)
977 {
978 	preempt_disable();
979 	__acquire(RCU_SCHED);
980 	rcu_lock_acquire(&rcu_sched_lock_map);
981 	rcu_lockdep_assert(rcu_is_watching(),
982 			   "rcu_read_lock_sched() used illegally while idle");
983 }
984 
985 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
rcu_read_lock_sched_notrace(void)986 static inline notrace void rcu_read_lock_sched_notrace(void)
987 {
988 	preempt_disable_notrace();
989 	__acquire(RCU_SCHED);
990 }
991 
992 /*
993  * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
994  *
995  * See rcu_read_lock_sched for more information.
996  */
rcu_read_unlock_sched(void)997 static inline void rcu_read_unlock_sched(void)
998 {
999 	rcu_lockdep_assert(rcu_is_watching(),
1000 			   "rcu_read_unlock_sched() used illegally while idle");
1001 	rcu_lock_release(&rcu_sched_lock_map);
1002 	__release(RCU_SCHED);
1003 	preempt_enable();
1004 }
1005 
1006 /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
rcu_read_unlock_sched_notrace(void)1007 static inline notrace void rcu_read_unlock_sched_notrace(void)
1008 {
1009 	__release(RCU_SCHED);
1010 	preempt_enable_notrace();
1011 }
1012 
1013 /**
1014  * RCU_INIT_POINTER() - initialize an RCU protected pointer
1015  *
1016  * Initialize an RCU-protected pointer in special cases where readers
1017  * do not need ordering constraints on the CPU or the compiler.  These
1018  * special cases are:
1019  *
1020  * 1.	This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
1021  * 2.	The caller has taken whatever steps are required to prevent
1022  *	RCU readers from concurrently accessing this pointer -or-
1023  * 3.	The referenced data structure has already been exposed to
1024  *	readers either at compile time or via rcu_assign_pointer() -and-
1025  *	a.	You have not made -any- reader-visible changes to
1026  *		this structure since then -or-
1027  *	b.	It is OK for readers accessing this structure from its
1028  *		new location to see the old state of the structure.  (For
1029  *		example, the changes were to statistical counters or to
1030  *		other state where exact synchronization is not required.)
1031  *
1032  * Failure to follow these rules governing use of RCU_INIT_POINTER() will
1033  * result in impossible-to-diagnose memory corruption.  As in the structures
1034  * will look OK in crash dumps, but any concurrent RCU readers might
1035  * see pre-initialized values of the referenced data structure.  So
1036  * please be very careful how you use RCU_INIT_POINTER()!!!
1037  *
1038  * If you are creating an RCU-protected linked structure that is accessed
1039  * by a single external-to-structure RCU-protected pointer, then you may
1040  * use RCU_INIT_POINTER() to initialize the internal RCU-protected
1041  * pointers, but you must use rcu_assign_pointer() to initialize the
1042  * external-to-structure pointer -after- you have completely initialized
1043  * the reader-accessible portions of the linked structure.
1044  *
1045  * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
1046  * ordering guarantees for either the CPU or the compiler.
1047  */
1048 #define RCU_INIT_POINTER(p, v) \
1049 	do { \
1050 		p = RCU_INITIALIZER(v); \
1051 	} while (0)
1052 
1053 /**
1054  * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
1055  *
1056  * GCC-style initialization for an RCU-protected pointer in a structure field.
1057  */
1058 #define RCU_POINTER_INITIALIZER(p, v) \
1059 		.p = RCU_INITIALIZER(v)
1060 
1061 /*
1062  * Does the specified offset indicate that the corresponding rcu_head
1063  * structure can be handled by kfree_rcu()?
1064  */
1065 #define __is_kfree_rcu_offset(offset) ((offset) < 4096)
1066 
1067 /*
1068  * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
1069  */
1070 #define __kfree_rcu(head, offset) \
1071 	do { \
1072 		BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
1073 		kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
1074 	} while (0)
1075 
1076 /**
1077  * kfree_rcu() - kfree an object after a grace period.
1078  * @ptr:	pointer to kfree
1079  * @rcu_head:	the name of the struct rcu_head within the type of @ptr.
1080  *
1081  * Many rcu callbacks functions just call kfree() on the base structure.
1082  * These functions are trivial, but their size adds up, and furthermore
1083  * when they are used in a kernel module, that module must invoke the
1084  * high-latency rcu_barrier() function at module-unload time.
1085  *
1086  * The kfree_rcu() function handles this issue.  Rather than encoding a
1087  * function address in the embedded rcu_head structure, kfree_rcu() instead
1088  * encodes the offset of the rcu_head structure within the base structure.
1089  * Because the functions are not allowed in the low-order 4096 bytes of
1090  * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
1091  * If the offset is larger than 4095 bytes, a compile-time error will
1092  * be generated in __kfree_rcu().  If this error is triggered, you can
1093  * either fall back to use of call_rcu() or rearrange the structure to
1094  * position the rcu_head structure into the first 4096 bytes.
1095  *
1096  * Note that the allowable offset might decrease in the future, for example,
1097  * to allow something like kmem_cache_free_rcu().
1098  *
1099  * The BUILD_BUG_ON check must not involve any function calls, hence the
1100  * checks are done in macros here.
1101  */
1102 #define kfree_rcu(ptr, rcu_head)					\
1103 	__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
1104 
1105 #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
rcu_needs_cpu(int cpu,unsigned long * delta_jiffies)1106 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1107 {
1108 	*delta_jiffies = ULONG_MAX;
1109 	return 0;
1110 }
1111 #endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
1112 
1113 #if defined(CONFIG_RCU_NOCB_CPU_ALL)
rcu_is_nocb_cpu(int cpu)1114 static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
1115 #elif defined(CONFIG_RCU_NOCB_CPU)
1116 bool rcu_is_nocb_cpu(int cpu);
1117 #else
rcu_is_nocb_cpu(int cpu)1118 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
1119 #endif
1120 
1121 
1122 /* Only for use by adaptive-ticks code. */
1123 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
1124 bool rcu_sys_is_idle(void);
1125 void rcu_sysidle_force_exit(void);
1126 #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
1127 
rcu_sys_is_idle(void)1128 static inline bool rcu_sys_is_idle(void)
1129 {
1130 	return false;
1131 }
1132 
rcu_sysidle_force_exit(void)1133 static inline void rcu_sysidle_force_exit(void)
1134 {
1135 }
1136 
1137 #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
1138 
1139 
1140 #endif /* __LINUX_RCUPDATE_H */
1141