1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Read-Copy Update definitions shared among RCU implementations.
4 *
5 * Copyright IBM Corporation, 2011
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10 #ifndef __LINUX_RCU_H
11 #define __LINUX_RCU_H
12
13 #include <trace/events/rcu.h>
14
15 /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
16 #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
17
18
19 /*
20 * Grace-period counter management.
21 */
22
23 #define RCU_SEQ_CTR_SHIFT 2
24 #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
25
26 /*
27 * Return the counter portion of a sequence number previously returned
28 * by rcu_seq_snap() or rcu_seq_current().
29 */
rcu_seq_ctr(unsigned long s)30 static inline unsigned long rcu_seq_ctr(unsigned long s)
31 {
32 return s >> RCU_SEQ_CTR_SHIFT;
33 }
34
35 /*
36 * Return the state portion of a sequence number previously returned
37 * by rcu_seq_snap() or rcu_seq_current().
38 */
rcu_seq_state(unsigned long s)39 static inline int rcu_seq_state(unsigned long s)
40 {
41 return s & RCU_SEQ_STATE_MASK;
42 }
43
44 /*
45 * Set the state portion of the pointed-to sequence number.
46 * The caller is responsible for preventing conflicting updates.
47 */
rcu_seq_set_state(unsigned long * sp,int newstate)48 static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
49 {
50 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
51 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
52 }
53
54 /* Adjust sequence number for start of update-side operation. */
rcu_seq_start(unsigned long * sp)55 static inline void rcu_seq_start(unsigned long *sp)
56 {
57 WRITE_ONCE(*sp, *sp + 1);
58 smp_mb(); /* Ensure update-side operation after counter increment. */
59 WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
60 }
61
62 /* Compute the end-of-grace-period value for the specified sequence number. */
rcu_seq_endval(unsigned long * sp)63 static inline unsigned long rcu_seq_endval(unsigned long *sp)
64 {
65 return (*sp | RCU_SEQ_STATE_MASK) + 1;
66 }
67
68 /* Adjust sequence number for end of update-side operation. */
rcu_seq_end(unsigned long * sp)69 static inline void rcu_seq_end(unsigned long *sp)
70 {
71 smp_mb(); /* Ensure update-side operation before counter increment. */
72 WARN_ON_ONCE(!rcu_seq_state(*sp));
73 WRITE_ONCE(*sp, rcu_seq_endval(sp));
74 }
75
76 /*
77 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
78 *
79 * This function returns the earliest value of the grace-period sequence number
80 * that will indicate that a full grace period has elapsed since the current
81 * time. Once the grace-period sequence number has reached this value, it will
82 * be safe to invoke all callbacks that have been registered prior to the
83 * current time. This value is the current grace-period number plus two to the
84 * power of the number of low-order bits reserved for state, then rounded up to
85 * the next value in which the state bits are all zero.
86 */
rcu_seq_snap(unsigned long * sp)87 static inline unsigned long rcu_seq_snap(unsigned long *sp)
88 {
89 unsigned long s;
90
91 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
92 smp_mb(); /* Above access must not bleed into critical section. */
93 return s;
94 }
95
96 /* Return the current value the update side's sequence number, no ordering. */
rcu_seq_current(unsigned long * sp)97 static inline unsigned long rcu_seq_current(unsigned long *sp)
98 {
99 return READ_ONCE(*sp);
100 }
101
102 /*
103 * Given a snapshot from rcu_seq_snap(), determine whether or not the
104 * corresponding update-side operation has started.
105 */
rcu_seq_started(unsigned long * sp,unsigned long s)106 static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
107 {
108 return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
109 }
110
111 /*
112 * Given a snapshot from rcu_seq_snap(), determine whether or not a
113 * full update-side operation has occurred.
114 */
rcu_seq_done(unsigned long * sp,unsigned long s)115 static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
116 {
117 return ULONG_CMP_GE(READ_ONCE(*sp), s);
118 }
119
120 /*
121 * Has a grace period completed since the time the old gp_seq was collected?
122 */
rcu_seq_completed_gp(unsigned long old,unsigned long new)123 static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
124 {
125 return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
126 }
127
128 /*
129 * Has a grace period started since the time the old gp_seq was collected?
130 */
rcu_seq_new_gp(unsigned long old,unsigned long new)131 static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
132 {
133 return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
134 new);
135 }
136
137 /*
138 * Roughly how many full grace periods have elapsed between the collection
139 * of the two specified grace periods?
140 */
rcu_seq_diff(unsigned long new,unsigned long old)141 static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
142 {
143 unsigned long rnd_diff;
144
145 if (old == new)
146 return 0;
147 /*
148 * Compute the number of grace periods (still shifted up), plus
149 * one if either of new and old is not an exact grace period.
150 */
151 rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
152 ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
153 ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
154 if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
155 return 1; /* Definitely no grace period has elapsed. */
156 return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
157 }
158
159 /*
160 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
161 * by call_rcu() and rcu callback execution, and are therefore not part
162 * of the RCU API. These are in rcupdate.h because they are used by all
163 * RCU implementations.
164 */
165
166 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
167 # define STATE_RCU_HEAD_READY 0
168 # define STATE_RCU_HEAD_QUEUED 1
169
170 extern const struct debug_obj_descr rcuhead_debug_descr;
171
debug_rcu_head_queue(struct rcu_head * head)172 static inline int debug_rcu_head_queue(struct rcu_head *head)
173 {
174 int r1;
175
176 r1 = debug_object_activate(head, &rcuhead_debug_descr);
177 debug_object_active_state(head, &rcuhead_debug_descr,
178 STATE_RCU_HEAD_READY,
179 STATE_RCU_HEAD_QUEUED);
180 return r1;
181 }
182
debug_rcu_head_unqueue(struct rcu_head * head)183 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
184 {
185 debug_object_active_state(head, &rcuhead_debug_descr,
186 STATE_RCU_HEAD_QUEUED,
187 STATE_RCU_HEAD_READY);
188 debug_object_deactivate(head, &rcuhead_debug_descr);
189 }
190 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
debug_rcu_head_queue(struct rcu_head * head)191 static inline int debug_rcu_head_queue(struct rcu_head *head)
192 {
193 return 0;
194 }
195
debug_rcu_head_unqueue(struct rcu_head * head)196 static inline void debug_rcu_head_unqueue(struct rcu_head *head)
197 {
198 }
199 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
200
201 extern int rcu_cpu_stall_suppress_at_boot;
202
rcu_stall_is_suppressed_at_boot(void)203 static inline bool rcu_stall_is_suppressed_at_boot(void)
204 {
205 return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
206 }
207
208 #ifdef CONFIG_RCU_STALL_COMMON
209
210 extern int rcu_cpu_stall_ftrace_dump;
211 extern int rcu_cpu_stall_suppress;
212 extern int rcu_cpu_stall_timeout;
213 int rcu_jiffies_till_stall_check(void);
214
rcu_stall_is_suppressed(void)215 static inline bool rcu_stall_is_suppressed(void)
216 {
217 return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
218 }
219
220 #define rcu_ftrace_dump_stall_suppress() \
221 do { \
222 if (!rcu_cpu_stall_suppress) \
223 rcu_cpu_stall_suppress = 3; \
224 } while (0)
225
226 #define rcu_ftrace_dump_stall_unsuppress() \
227 do { \
228 if (rcu_cpu_stall_suppress == 3) \
229 rcu_cpu_stall_suppress = 0; \
230 } while (0)
231
232 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
233
rcu_stall_is_suppressed(void)234 static inline bool rcu_stall_is_suppressed(void)
235 {
236 return rcu_stall_is_suppressed_at_boot();
237 }
238 #define rcu_ftrace_dump_stall_suppress()
239 #define rcu_ftrace_dump_stall_unsuppress()
240 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
241
242 /*
243 * Strings used in tracepoints need to be exported via the
244 * tracing system such that tools like perf and trace-cmd can
245 * translate the string address pointers to actual text.
246 */
247 #define TPS(x) tracepoint_string(x)
248
249 /*
250 * Dump the ftrace buffer, but only one time per callsite per boot.
251 */
252 #define rcu_ftrace_dump(oops_dump_mode) \
253 do { \
254 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
255 \
256 if (!atomic_read(&___rfd_beenhere) && \
257 !atomic_xchg(&___rfd_beenhere, 1)) { \
258 tracing_off(); \
259 rcu_ftrace_dump_stall_suppress(); \
260 ftrace_dump(oops_dump_mode); \
261 rcu_ftrace_dump_stall_unsuppress(); \
262 } \
263 } while (0)
264
265 void rcu_early_boot_tests(void);
266 void rcu_test_sync_prims(void);
267
268 /*
269 * This function really isn't for public consumption, but RCU is special in
270 * that context switches can allow the state machine to make progress.
271 */
272 extern void resched_cpu(int cpu);
273
274 #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU)
275
276 #include <linux/rcu_node_tree.h>
277
278 extern int rcu_num_lvls;
279 extern int num_rcu_lvl[];
280 extern int rcu_num_nodes;
281 static bool rcu_fanout_exact;
282 static int rcu_fanout_leaf;
283
284 /*
285 * Compute the per-level fanout, either using the exact fanout specified
286 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
287 */
rcu_init_levelspread(int * levelspread,const int * levelcnt)288 static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
289 {
290 int i;
291
292 for (i = 0; i < RCU_NUM_LVLS; i++)
293 levelspread[i] = INT_MIN;
294 if (rcu_fanout_exact) {
295 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
296 for (i = rcu_num_lvls - 2; i >= 0; i--)
297 levelspread[i] = RCU_FANOUT;
298 } else {
299 int ccur;
300 int cprv;
301
302 cprv = nr_cpu_ids;
303 for (i = rcu_num_lvls - 1; i >= 0; i--) {
304 ccur = levelcnt[i];
305 levelspread[i] = (cprv + ccur - 1) / ccur;
306 cprv = ccur;
307 }
308 }
309 }
310
311 extern void rcu_init_geometry(void);
312
313 /* Returns a pointer to the first leaf rcu_node structure. */
314 #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
315
316 /* Is this rcu_node a leaf? */
317 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
318
319 /* Is this rcu_node the last leaf? */
320 #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
321
322 /*
323 * Do a full breadth-first scan of the {s,}rcu_node structures for the
324 * specified state structure (for SRCU) or the only rcu_state structure
325 * (for RCU).
326 */
327 #define srcu_for_each_node_breadth_first(sp, rnp) \
328 for ((rnp) = &(sp)->node[0]; \
329 (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
330 #define rcu_for_each_node_breadth_first(rnp) \
331 srcu_for_each_node_breadth_first(&rcu_state, rnp)
332
333 /*
334 * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
335 * Note that if there is a singleton rcu_node tree with but one rcu_node
336 * structure, this loop -will- visit the rcu_node structure. It is still
337 * a leaf node, even if it is also the root node.
338 */
339 #define rcu_for_each_leaf_node(rnp) \
340 for ((rnp) = rcu_first_leaf_node(); \
341 (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
342
343 /*
344 * Iterate over all possible CPUs in a leaf RCU node.
345 */
346 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
347 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
348 (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
349 (cpu) <= rnp->grphi; \
350 (cpu) = cpumask_next((cpu), cpu_possible_mask))
351
352 /*
353 * Iterate over all CPUs in a leaf RCU node's specified mask.
354 */
355 #define rcu_find_next_bit(rnp, cpu, mask) \
356 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
357 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
358 for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
359 (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
360 (cpu) <= rnp->grphi; \
361 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
362
363 /*
364 * Wrappers for the rcu_node::lock acquire and release.
365 *
366 * Because the rcu_nodes form a tree, the tree traversal locking will observe
367 * different lock values, this in turn means that an UNLOCK of one level
368 * followed by a LOCK of another level does not imply a full memory barrier;
369 * and most importantly transitivity is lost.
370 *
371 * In order to restore full ordering between tree levels, augment the regular
372 * lock acquire functions with smp_mb__after_unlock_lock().
373 *
374 * As ->lock of struct rcu_node is a __private field, therefore one should use
375 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
376 */
377 #define raw_spin_lock_rcu_node(p) \
378 do { \
379 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
380 smp_mb__after_unlock_lock(); \
381 } while (0)
382
383 #define raw_spin_unlock_rcu_node(p) \
384 do { \
385 lockdep_assert_irqs_disabled(); \
386 raw_spin_unlock(&ACCESS_PRIVATE(p, lock)); \
387 } while (0)
388
389 #define raw_spin_lock_irq_rcu_node(p) \
390 do { \
391 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
392 smp_mb__after_unlock_lock(); \
393 } while (0)
394
395 #define raw_spin_unlock_irq_rcu_node(p) \
396 do { \
397 lockdep_assert_irqs_disabled(); \
398 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)); \
399 } while (0)
400
401 #define raw_spin_lock_irqsave_rcu_node(p, flags) \
402 do { \
403 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
404 smp_mb__after_unlock_lock(); \
405 } while (0)
406
407 #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
408 do { \
409 lockdep_assert_irqs_disabled(); \
410 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags); \
411 } while (0)
412
413 #define raw_spin_trylock_rcu_node(p) \
414 ({ \
415 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
416 \
417 if (___locked) \
418 smp_mb__after_unlock_lock(); \
419 ___locked; \
420 })
421
422 #define raw_lockdep_assert_held_rcu_node(p) \
423 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
424
425 #endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */
426
427 #ifdef CONFIG_TINY_RCU
428 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
rcu_gp_is_normal(void)429 static inline bool rcu_gp_is_normal(void) { return true; }
rcu_gp_is_expedited(void)430 static inline bool rcu_gp_is_expedited(void) { return false; }
rcu_expedite_gp(void)431 static inline void rcu_expedite_gp(void) { }
rcu_unexpedite_gp(void)432 static inline void rcu_unexpedite_gp(void) { }
rcu_request_urgent_qs_task(struct task_struct * t)433 static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
434 #else /* #ifdef CONFIG_TINY_RCU */
435 bool rcu_gp_is_normal(void); /* Internal RCU use. */
436 bool rcu_gp_is_expedited(void); /* Internal RCU use. */
437 void rcu_expedite_gp(void);
438 void rcu_unexpedite_gp(void);
439 void rcupdate_announce_bootup_oddness(void);
440 #ifdef CONFIG_TASKS_RCU_GENERIC
441 void show_rcu_tasks_gp_kthreads(void);
442 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
show_rcu_tasks_gp_kthreads(void)443 static inline void show_rcu_tasks_gp_kthreads(void) {}
444 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
445 void rcu_request_urgent_qs_task(struct task_struct *t);
446 #endif /* #else #ifdef CONFIG_TINY_RCU */
447
448 #define RCU_SCHEDULER_INACTIVE 0
449 #define RCU_SCHEDULER_INIT 1
450 #define RCU_SCHEDULER_RUNNING 2
451
452 enum rcutorture_type {
453 RCU_FLAVOR,
454 RCU_TASKS_FLAVOR,
455 RCU_TASKS_RUDE_FLAVOR,
456 RCU_TASKS_TRACING_FLAVOR,
457 RCU_TRIVIAL_FLAVOR,
458 SRCU_FLAVOR,
459 INVALID_RCU_FLAVOR
460 };
461
462 #if defined(CONFIG_TREE_RCU)
463 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
464 unsigned long *gp_seq);
465 void do_trace_rcu_torture_read(const char *rcutorturename,
466 struct rcu_head *rhp,
467 unsigned long secs,
468 unsigned long c_old,
469 unsigned long c);
470 void rcu_gp_set_torture_wait(int duration);
471 #else
rcutorture_get_gp_data(enum rcutorture_type test_type,int * flags,unsigned long * gp_seq)472 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
473 int *flags, unsigned long *gp_seq)
474 {
475 *flags = 0;
476 *gp_seq = 0;
477 }
478 #ifdef CONFIG_RCU_TRACE
479 void do_trace_rcu_torture_read(const char *rcutorturename,
480 struct rcu_head *rhp,
481 unsigned long secs,
482 unsigned long c_old,
483 unsigned long c);
484 #else
485 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
486 do { } while (0)
487 #endif
rcu_gp_set_torture_wait(int duration)488 static inline void rcu_gp_set_torture_wait(int duration) { }
489 #endif
490
491 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
492 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
493 #endif
494
495 #ifdef CONFIG_TINY_SRCU
496
srcutorture_get_gp_data(enum rcutorture_type test_type,struct srcu_struct * sp,int * flags,unsigned long * gp_seq)497 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
498 struct srcu_struct *sp, int *flags,
499 unsigned long *gp_seq)
500 {
501 if (test_type != SRCU_FLAVOR)
502 return;
503 *flags = 0;
504 *gp_seq = sp->srcu_idx;
505 }
506
507 #elif defined(CONFIG_TREE_SRCU)
508
509 void srcutorture_get_gp_data(enum rcutorture_type test_type,
510 struct srcu_struct *sp, int *flags,
511 unsigned long *gp_seq);
512
513 #endif
514
515 #ifdef CONFIG_TINY_RCU
rcu_dynticks_zero_in_eqs(int cpu,int * vp)516 static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
rcu_get_gp_seq(void)517 static inline unsigned long rcu_get_gp_seq(void) { return 0; }
rcu_exp_batches_completed(void)518 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
519 static inline unsigned long
srcu_batches_completed(struct srcu_struct * sp)520 srcu_batches_completed(struct srcu_struct *sp) { return 0; }
rcu_force_quiescent_state(void)521 static inline void rcu_force_quiescent_state(void) { }
rcu_check_boost_fail(unsigned long gp_state,int * cpup)522 static inline bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) { return true; }
show_rcu_gp_kthreads(void)523 static inline void show_rcu_gp_kthreads(void) { }
rcu_get_gp_kthreads_prio(void)524 static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
rcu_fwd_progress_check(unsigned long j)525 static inline void rcu_fwd_progress_check(unsigned long j) { }
526 #else /* #ifdef CONFIG_TINY_RCU */
527 bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
528 unsigned long rcu_get_gp_seq(void);
529 unsigned long rcu_exp_batches_completed(void);
530 unsigned long srcu_batches_completed(struct srcu_struct *sp);
531 bool rcu_check_boost_fail(unsigned long gp_state, int *cpup);
532 void show_rcu_gp_kthreads(void);
533 int rcu_get_gp_kthreads_prio(void);
534 void rcu_fwd_progress_check(unsigned long j);
535 void rcu_force_quiescent_state(void);
536 extern struct workqueue_struct *rcu_gp_wq;
537 #ifdef CONFIG_RCU_EXP_KTHREAD
538 extern struct kthread_worker *rcu_exp_gp_kworker;
539 extern struct kthread_worker *rcu_exp_par_gp_kworker;
540 #else /* !CONFIG_RCU_EXP_KTHREAD */
541 extern struct workqueue_struct *rcu_par_gp_wq;
542 #endif /* CONFIG_RCU_EXP_KTHREAD */
543 #endif /* #else #ifdef CONFIG_TINY_RCU */
544
545 #ifdef CONFIG_RCU_NOCB_CPU
546 bool rcu_is_nocb_cpu(int cpu);
547 void rcu_bind_current_to_nocb(void);
548 #else
rcu_is_nocb_cpu(int cpu)549 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
rcu_bind_current_to_nocb(void)550 static inline void rcu_bind_current_to_nocb(void) { }
551 #endif
552
553 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
554 void show_rcu_tasks_classic_gp_kthread(void);
555 #else
show_rcu_tasks_classic_gp_kthread(void)556 static inline void show_rcu_tasks_classic_gp_kthread(void) {}
557 #endif
558 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
559 void show_rcu_tasks_rude_gp_kthread(void);
560 #else
show_rcu_tasks_rude_gp_kthread(void)561 static inline void show_rcu_tasks_rude_gp_kthread(void) {}
562 #endif
563 #if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
564 void show_rcu_tasks_trace_gp_kthread(void);
565 #else
show_rcu_tasks_trace_gp_kthread(void)566 static inline void show_rcu_tasks_trace_gp_kthread(void) {}
567 #endif
568
569 #endif /* __LINUX_RCU_H */
570