• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright IBM Corporation, 2008
19  *
20  * Author: Dipankar Sarma <dipankar@in.ibm.com>
21  *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
22  *
23  * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24  * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25  *
26  * For detailed explanation of Read-Copy Update mechanism see -
27  * 	Documentation/RCU
28  */
29 
30 #ifndef __LINUX_RCUTREE_H
31 #define __LINUX_RCUTREE_H
32 
33 #include <linux/cache.h>
34 #include <linux/spinlock.h>
35 #include <linux/threads.h>
36 #include <linux/percpu.h>
37 #include <linux/cpumask.h>
38 #include <linux/seqlock.h>
39 
40 /*
41  * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
42  * In theory, it should be possible to add more levels straightforwardly.
43  * In practice, this has not been tested, so there is probably some
44  * bug somewhere.
45  */
46 #define MAX_RCU_LVLS 3
47 #define RCU_FANOUT	      (CONFIG_RCU_FANOUT)
48 #define RCU_FANOUT_SQ	      (RCU_FANOUT * RCU_FANOUT)
49 #define RCU_FANOUT_CUBE	      (RCU_FANOUT_SQ * RCU_FANOUT)
50 
51 #if NR_CPUS <= RCU_FANOUT
52 #  define NUM_RCU_LVLS	      1
53 #  define NUM_RCU_LVL_0	      1
54 #  define NUM_RCU_LVL_1	      (NR_CPUS)
55 #  define NUM_RCU_LVL_2	      0
56 #  define NUM_RCU_LVL_3	      0
57 #elif NR_CPUS <= RCU_FANOUT_SQ
58 #  define NUM_RCU_LVLS	      2
59 #  define NUM_RCU_LVL_0	      1
60 #  define NUM_RCU_LVL_1	      (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
61 #  define NUM_RCU_LVL_2	      (NR_CPUS)
62 #  define NUM_RCU_LVL_3	      0
63 #elif NR_CPUS <= RCU_FANOUT_CUBE
64 #  define NUM_RCU_LVLS	      3
65 #  define NUM_RCU_LVL_0	      1
66 #  define NUM_RCU_LVL_1	      (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
67 #  define NUM_RCU_LVL_2	      (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
68 #  define NUM_RCU_LVL_3	      NR_CPUS
69 #else
70 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
71 #endif /* #if (NR_CPUS) <= RCU_FANOUT */
72 
73 #define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
74 #define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
75 
76 /*
77  * Dynticks per-CPU state.
78  */
79 struct rcu_dynticks {
80 	int dynticks_nesting;	/* Track nesting level, sort of. */
81 	int dynticks;		/* Even value for dynticks-idle, else odd. */
82 	int dynticks_nmi;	/* Even value for either dynticks-idle or */
83 				/*  not in nmi handler, else odd.  So this */
84 				/*  remains even for nmi from irq handler. */
85 };
86 
87 /*
88  * Definition for node within the RCU grace-period-detection hierarchy.
89  */
90 struct rcu_node {
91 	spinlock_t lock;
92 	unsigned long qsmask;	/* CPUs or groups that need to switch in */
93 				/*  order for current grace period to proceed.*/
94 	unsigned long qsmaskinit;
95 				/* Per-GP initialization for qsmask. */
96 	unsigned long grpmask;	/* Mask to apply to parent qsmask. */
97 	int	grplo;		/* lowest-numbered CPU or group here. */
98 	int	grphi;		/* highest-numbered CPU or group here. */
99 	u8	grpnum;		/* CPU/group number for next level up. */
100 	u8	level;		/* root is at level 0. */
101 	struct rcu_node *parent;
102 } ____cacheline_internodealigned_in_smp;
103 
104 /* Index values for nxttail array in struct rcu_data. */
105 #define RCU_DONE_TAIL		0	/* Also RCU_WAIT head. */
106 #define RCU_WAIT_TAIL		1	/* Also RCU_NEXT_READY head. */
107 #define RCU_NEXT_READY_TAIL	2	/* Also RCU_NEXT head. */
108 #define RCU_NEXT_TAIL		3
109 #define RCU_NEXT_SIZE		4
110 
111 /* Per-CPU data for read-copy update. */
112 struct rcu_data {
113 	/* 1) quiescent-state and grace-period handling : */
114 	long		completed;	/* Track rsp->completed gp number */
115 					/*  in order to detect GP end. */
116 	long		gpnum;		/* Highest gp number that this CPU */
117 					/*  is aware of having started. */
118 	long		passed_quiesc_completed;
119 					/* Value of completed at time of qs. */
120 	bool		passed_quiesc;	/* User-mode/idle loop etc. */
121 	bool		qs_pending;	/* Core waits for quiesc state. */
122 	bool		beenonline;	/* CPU online at least once. */
123 	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
124 	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
125 
126 	/* 2) batch handling */
127 	/*
128 	 * If nxtlist is not NULL, it is partitioned as follows.
129 	 * Any of the partitions might be empty, in which case the
130 	 * pointer to that partition will be equal to the pointer for
131 	 * the following partition.  When the list is empty, all of
132 	 * the nxttail elements point to nxtlist, which is NULL.
133 	 *
134 	 * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
135 	 *	Entries that might have arrived after current GP ended
136 	 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
137 	 *	Entries known to have arrived before current GP ended
138 	 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
139 	 *	Entries that batch # <= ->completed - 1: waiting for current GP
140 	 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
141 	 *	Entries that batch # <= ->completed
142 	 *	The grace period for these entries has completed, and
143 	 *	the other grace-period-completed entries may be moved
144 	 *	here temporarily in rcu_process_callbacks().
145 	 */
146 	struct rcu_head *nxtlist;
147 	struct rcu_head **nxttail[RCU_NEXT_SIZE];
148 	long		qlen; 	 	/* # of queued callbacks */
149 	long		blimit;		/* Upper limit on a processed batch */
150 
151 #ifdef CONFIG_NO_HZ
152 	/* 3) dynticks interface. */
153 	struct rcu_dynticks *dynticks;	/* Shared per-CPU dynticks state. */
154 	int dynticks_snap;		/* Per-GP tracking for dynticks. */
155 	int dynticks_nmi_snap;		/* Per-GP tracking for dynticks_nmi. */
156 #endif /* #ifdef CONFIG_NO_HZ */
157 
158 	/* 4) reasons this CPU needed to be kicked by force_quiescent_state */
159 #ifdef CONFIG_NO_HZ
160 	unsigned long dynticks_fqs;	/* Kicked due to dynticks idle. */
161 #endif /* #ifdef CONFIG_NO_HZ */
162 	unsigned long offline_fqs;	/* Kicked due to being offline. */
163 	unsigned long resched_ipi;	/* Sent a resched IPI. */
164 
165 	/* 5) state to allow this CPU to force_quiescent_state on others */
166 	long n_rcu_pending;		/* rcu_pending() calls since boot. */
167 	long n_rcu_pending_force_qs;	/* when to force quiescent states. */
168 
169 	int cpu;
170 };
171 
172 /* Values for signaled field in struct rcu_state. */
173 #define RCU_GP_INIT		0	/* Grace period being initialized. */
174 #define RCU_SAVE_DYNTICK	1	/* Need to scan dyntick state. */
175 #define RCU_FORCE_QS		2	/* Need to force quiescent state. */
176 #ifdef CONFIG_NO_HZ
177 #define RCU_SIGNAL_INIT		RCU_SAVE_DYNTICK
178 #else /* #ifdef CONFIG_NO_HZ */
179 #define RCU_SIGNAL_INIT		RCU_FORCE_QS
180 #endif /* #else #ifdef CONFIG_NO_HZ */
181 
182 #define RCU_JIFFIES_TILL_FORCE_QS	 3	/* for rsp->jiffies_force_qs */
183 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
184 #define RCU_SECONDS_TILL_STALL_CHECK   (10 * HZ)  /* for rsp->jiffies_stall */
185 #define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ)  /* for rsp->jiffies_stall */
186 #define RCU_STALL_RAT_DELAY		2	  /* Allow other CPUs time */
187 						  /*  to take at least one */
188 						  /*  scheduling clock irq */
189 						  /*  before ratting on them. */
190 
191 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
192 
193 /*
194  * RCU global state, including node hierarchy.  This hierarchy is
195  * represented in "heap" form in a dense array.  The root (first level)
196  * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
197  * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
198  * and the third level in ->node[m+1] and following (->node[m+1] referenced
199  * by ->level[2]).  The number of levels is determined by the number of
200  * CPUs and by CONFIG_RCU_FANOUT.  Small systems will have a "hierarchy"
201  * consisting of a single rcu_node.
202  */
203 struct rcu_state {
204 	struct rcu_node node[NUM_RCU_NODES];	/* Hierarchy. */
205 	struct rcu_node *level[NUM_RCU_LVLS];	/* Hierarchy levels. */
206 	u32 levelcnt[MAX_RCU_LVLS + 1];		/* # nodes in each level. */
207 	u8 levelspread[NUM_RCU_LVLS];		/* kids/node in each level. */
208 	struct rcu_data *rda[NR_CPUS];		/* array of rdp pointers. */
209 
210 	/* The following fields are guarded by the root rcu_node's lock. */
211 
212 	u8	signaled ____cacheline_internodealigned_in_smp;
213 						/* Force QS state. */
214 	long	gpnum;				/* Current gp number. */
215 	long	completed;			/* # of last completed gp. */
216 	spinlock_t onofflock;			/* exclude on/offline and */
217 						/*  starting new GP. */
218 	spinlock_t fqslock;			/* Only one task forcing */
219 						/*  quiescent states. */
220 	unsigned long jiffies_force_qs;		/* Time at which to invoke */
221 						/*  force_quiescent_state(). */
222 	unsigned long n_force_qs;		/* Number of calls to */
223 						/*  force_quiescent_state(). */
224 	unsigned long n_force_qs_lh;		/* ~Number of calls leaving */
225 						/*  due to lock unavailable. */
226 	unsigned long n_force_qs_ngp;		/* Number of calls leaving */
227 						/*  due to no GP active. */
228 #ifdef CONFIG_RCU_CPU_STALL_DETECTOR
229 	unsigned long gp_start;			/* Time at which GP started, */
230 						/*  but in jiffies. */
231 	unsigned long jiffies_stall;		/* Time at which to check */
232 						/*  for CPU stalls. */
233 #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
234 #ifdef CONFIG_NO_HZ
235 	long dynticks_completed;		/* Value of completed @ snap. */
236 #endif /* #ifdef CONFIG_NO_HZ */
237 };
238 
239 extern struct rcu_state rcu_state;
240 DECLARE_PER_CPU(struct rcu_data, rcu_data);
241 
242 extern struct rcu_state rcu_bh_state;
243 DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
244 
245 /*
246  * Increment the quiescent state counter.
247  * The counter is a bit degenerated: We do not need to know
248  * how many quiescent states passed, just if there was at least
249  * one since the start of the grace period. Thus just a flag.
250  */
rcu_qsctr_inc(int cpu)251 static inline void rcu_qsctr_inc(int cpu)
252 {
253 	struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
254 	rdp->passed_quiesc = 1;
255 	rdp->passed_quiesc_completed = rdp->completed;
256 }
rcu_bh_qsctr_inc(int cpu)257 static inline void rcu_bh_qsctr_inc(int cpu)
258 {
259 	struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
260 	rdp->passed_quiesc = 1;
261 	rdp->passed_quiesc_completed = rdp->completed;
262 }
263 
264 extern int rcu_pending(int cpu);
265 extern int rcu_needs_cpu(int cpu);
266 
267 #ifdef CONFIG_DEBUG_LOCK_ALLOC
268 extern struct lockdep_map rcu_lock_map;
269 # define rcu_read_acquire()	\
270 			lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
271 # define rcu_read_release()	lock_release(&rcu_lock_map, 1, _THIS_IP_)
272 #else
273 # define rcu_read_acquire()	do { } while (0)
274 # define rcu_read_release()	do { } while (0)
275 #endif
276 
__rcu_read_lock(void)277 static inline void __rcu_read_lock(void)
278 {
279 	preempt_disable();
280 	__acquire(RCU);
281 	rcu_read_acquire();
282 }
__rcu_read_unlock(void)283 static inline void __rcu_read_unlock(void)
284 {
285 	rcu_read_release();
286 	__release(RCU);
287 	preempt_enable();
288 }
__rcu_read_lock_bh(void)289 static inline void __rcu_read_lock_bh(void)
290 {
291 	local_bh_disable();
292 	__acquire(RCU_BH);
293 	rcu_read_acquire();
294 }
__rcu_read_unlock_bh(void)295 static inline void __rcu_read_unlock_bh(void)
296 {
297 	rcu_read_release();
298 	__release(RCU_BH);
299 	local_bh_enable();
300 }
301 
302 #define __synchronize_sched() synchronize_rcu()
303 
304 #define call_rcu_sched(head, func) call_rcu(head, func)
305 
rcu_init_sched(void)306 static inline void rcu_init_sched(void)
307 {
308 }
309 
310 extern void __rcu_init(void);
311 extern void rcu_check_callbacks(int cpu, int user);
312 extern void rcu_restart_cpu(int cpu);
313 
314 extern long rcu_batches_completed(void);
315 extern long rcu_batches_completed_bh(void);
316 
317 #ifdef CONFIG_NO_HZ
318 void rcu_enter_nohz(void);
319 void rcu_exit_nohz(void);
320 #else /* CONFIG_NO_HZ */
rcu_enter_nohz(void)321 static inline void rcu_enter_nohz(void)
322 {
323 }
rcu_exit_nohz(void)324 static inline void rcu_exit_nohz(void)
325 {
326 }
327 #endif /* CONFIG_NO_HZ */
328 
329 /* A context switch is a grace period for rcutree. */
rcu_blocking_is_gp(void)330 static inline int rcu_blocking_is_gp(void)
331 {
332 	return num_online_cpus() == 1;
333 }
334 
335 #endif /* __LINUX_RCUTREE_H */
336