• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Read-Copy Update mechanism for mutual exclusion (RT implementation)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2006
19  *
20  * Author:  Paul McKenney <paulmck@us.ibm.com>
21  *
22  * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com>
23  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
24  * Papers:
25  * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
26  * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
27  *
28  * For detailed explanation of Read-Copy Update mechanism see -
29  * 		Documentation/RCU
30  *
31  */
32 
33 #ifndef __LINUX_RCUPREEMPT_H
34 #define __LINUX_RCUPREEMPT_H
35 
36 #include <linux/cache.h>
37 #include <linux/spinlock.h>
38 #include <linux/threads.h>
39 #include <linux/percpu.h>
40 #include <linux/cpumask.h>
41 #include <linux/seqlock.h>
42 
43 struct rcu_dyntick_sched {
44 	int dynticks;
45 	int dynticks_snap;
46 	int sched_qs;
47 	int sched_qs_snap;
48 	int sched_dynticks_snap;
49 };
50 
51 DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
52 
rcu_qsctr_inc(int cpu)53 static inline void rcu_qsctr_inc(int cpu)
54 {
55 	struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
56 
57 	rdssp->sched_qs++;
58 }
59 #define rcu_bh_qsctr_inc(cpu)
60 
61 /*
62  * Someone might want to pass call_rcu_bh as a function pointer.
63  * So this needs to just be a rename and not a macro function.
64  *  (no parentheses)
65  */
66 #define call_rcu_bh	 	call_rcu
67 
68 /**
69  * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
70  * @head: structure to be used for queueing the RCU updates.
71  * @func: actual update function to be invoked after the grace period
72  *
73  * The update function will be invoked some time after a full
74  * synchronize_sched()-style grace period elapses, in other words after
75  * all currently executing preempt-disabled sections of code (including
76  * hardirq handlers, NMI handlers, and local_irq_save() blocks) have
77  * completed.
78  */
79 extern void call_rcu_sched(struct rcu_head *head,
80 			   void (*func)(struct rcu_head *head));
81 
82 extern void __rcu_read_lock(void)	__acquires(RCU);
83 extern void __rcu_read_unlock(void)	__releases(RCU);
84 extern int rcu_pending(int cpu);
85 extern int rcu_needs_cpu(int cpu);
86 
87 #define __rcu_read_lock_bh()	{ rcu_read_lock(); local_bh_disable(); }
88 #define __rcu_read_unlock_bh()	{ local_bh_enable(); rcu_read_unlock(); }
89 
90 extern void __synchronize_sched(void);
91 
92 extern void __rcu_init(void);
93 extern void rcu_init_sched(void);
94 extern void rcu_check_callbacks(int cpu, int user);
95 extern void rcu_restart_cpu(int cpu);
96 extern long rcu_batches_completed(void);
97 
98 /*
99  * Return the number of RCU batches processed thus far. Useful for debug
100  * and statistic. The _bh variant is identifcal to straight RCU
101  */
rcu_batches_completed_bh(void)102 static inline long rcu_batches_completed_bh(void)
103 {
104 	return rcu_batches_completed();
105 }
106 
107 #ifdef CONFIG_RCU_TRACE
108 struct rcupreempt_trace;
109 extern long *rcupreempt_flipctr(int cpu);
110 extern long rcupreempt_data_completed(void);
111 extern int rcupreempt_flip_flag(int cpu);
112 extern int rcupreempt_mb_flag(int cpu);
113 extern char *rcupreempt_try_flip_state_name(void);
114 extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
115 #endif
116 
117 struct softirq_action;
118 
119 #ifdef CONFIG_NO_HZ
120 
rcu_enter_nohz(void)121 static inline void rcu_enter_nohz(void)
122 {
123 	static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
124 
125 	smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */
126 	__get_cpu_var(rcu_dyntick_sched).dynticks++;
127 	WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
128 }
129 
rcu_exit_nohz(void)130 static inline void rcu_exit_nohz(void)
131 {
132 	static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
133 
134 	__get_cpu_var(rcu_dyntick_sched).dynticks++;
135 	smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
136 	WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
137 				&rs);
138 }
139 
140 #else /* CONFIG_NO_HZ */
141 #define rcu_enter_nohz()	do { } while (0)
142 #define rcu_exit_nohz()		do { } while (0)
143 #endif /* CONFIG_NO_HZ */
144 
145 /*
146  * A context switch is a grace period for rcupreempt synchronize_rcu()
147  * only during early boot, before the scheduler has been initialized.
148  * So, how the heck do we get a context switch?  Well, if the caller
149  * invokes synchronize_rcu(), they are willing to accept a context
150  * switch, so we simply pretend that one happened.
151  *
152  * After boot, there might be a blocked or preempted task in an RCU
153  * read-side critical section, so we cannot then take the fastpath.
154  */
rcu_blocking_is_gp(void)155 static inline int rcu_blocking_is_gp(void)
156 {
157 	return num_online_cpus() == 1 && !rcu_scheduler_active;
158 }
159 
160 #endif /* __LINUX_RCUPREEMPT_H */
161