1 /* 2 * Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2001 19 * 20 * Author: Dipankar Sarma <dipankar@in.ibm.com> 21 * 22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com> 23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 24 * Papers: 25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf 26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) 27 * 28 * For detailed explanation of Read-Copy Update mechanism see - 29 * http://lse.sourceforge.net/locking/rcupdate.html 30 * 31 */ 32 33 #ifndef __LINUX_RCUPDATE_H 34 #define __LINUX_RCUPDATE_H 35 36 #include <linux/cache.h> 37 #include <linux/spinlock.h> 38 #include <linux/threads.h> 39 #include <linux/percpu.h> 40 #include <linux/cpumask.h> 41 #include <linux/seqlock.h> 42 #include <linux/lockdep.h> 43 #include <linux/completion.h> 44 45 /** 46 * struct rcu_head - callback structure for use with RCU 47 * @next: next update requests in a list 48 * @func: actual update function to call after the grace period. 49 */ 50 struct rcu_head { 51 struct rcu_head *next; 52 void (*func)(struct rcu_head *head); 53 }; 54 55 /* Internal to kernel, but needed by rcupreempt.h. */ 56 extern int rcu_scheduler_active; 57 58 #if defined(CONFIG_CLASSIC_RCU) 59 #include <linux/rcuclassic.h> 60 #elif defined(CONFIG_TREE_RCU) 61 #include <linux/rcutree.h> 62 #elif defined(CONFIG_PREEMPT_RCU) 63 #include <linux/rcupreempt.h> 64 #else 65 #error "Unknown RCU implementation specified to kernel configuration" 66 #endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ 67 68 #define RCU_HEAD_INIT { .next = NULL, .func = NULL } 69 #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 70 #define INIT_RCU_HEAD(ptr) do { \ 71 (ptr)->next = NULL; (ptr)->func = NULL; \ 72 } while (0) 73 74 /** 75 * rcu_read_lock - mark the beginning of an RCU read-side critical section. 76 * 77 * When synchronize_rcu() is invoked on one CPU while other CPUs 78 * are within RCU read-side critical sections, then the 79 * synchronize_rcu() is guaranteed to block until after all the other 80 * CPUs exit their critical sections. Similarly, if call_rcu() is invoked 81 * on one CPU while other CPUs are within RCU read-side critical 82 * sections, invocation of the corresponding RCU callback is deferred 83 * until after the all the other CPUs exit their critical sections. 84 * 85 * Note, however, that RCU callbacks are permitted to run concurrently 86 * with RCU read-side critical sections. One way that this can happen 87 * is via the following sequence of events: (1) CPU 0 enters an RCU 88 * read-side critical section, (2) CPU 1 invokes call_rcu() to register 89 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, 90 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU 91 * callback is invoked. This is legal, because the RCU read-side critical 92 * section that was running concurrently with the call_rcu() (and which 93 * therefore might be referencing something that the corresponding RCU 94 * callback would free up) has completed before the corresponding 95 * RCU callback is invoked. 96 * 97 * RCU read-side critical sections may be nested. Any deferred actions 98 * will be deferred until the outermost RCU read-side critical section 99 * completes. 100 * 101 * It is illegal to block while in an RCU read-side critical section. 102 */ 103 #define rcu_read_lock() __rcu_read_lock() 104 105 /** 106 * rcu_read_unlock - marks the end of an RCU read-side critical section. 107 * 108 * See rcu_read_lock() for more information. 109 */ 110 111 /* 112 * So where is rcu_write_lock()? It does not exist, as there is no 113 * way for writers to lock out RCU readers. This is a feature, not 114 * a bug -- this property is what provides RCU's performance benefits. 115 * Of course, writers must coordinate with each other. The normal 116 * spinlock primitives work well for this, but any other technique may be 117 * used as well. RCU does not care how the writers keep out of each 118 * others' way, as long as they do so. 119 */ 120 #define rcu_read_unlock() __rcu_read_unlock() 121 122 /** 123 * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section 124 * 125 * This is equivalent of rcu_read_lock(), but to be used when updates 126 * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks 127 * consider completion of a softirq handler to be a quiescent state, 128 * a process in RCU read-side critical section must be protected by 129 * disabling softirqs. Read-side critical sections in interrupt context 130 * can use just rcu_read_lock(). 131 * 132 */ 133 #define rcu_read_lock_bh() __rcu_read_lock_bh() 134 135 /* 136 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section 137 * 138 * See rcu_read_lock_bh() for more information. 139 */ 140 #define rcu_read_unlock_bh() __rcu_read_unlock_bh() 141 142 /** 143 * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section 144 * 145 * Should be used with either 146 * - synchronize_sched() 147 * or 148 * - call_rcu_sched() and rcu_barrier_sched() 149 * on the write-side to insure proper synchronization. 150 */ 151 #define rcu_read_lock_sched() preempt_disable() 152 #define rcu_read_lock_sched_notrace() preempt_disable_notrace() 153 154 /* 155 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section 156 * 157 * See rcu_read_lock_sched for more information. 158 */ 159 #define rcu_read_unlock_sched() preempt_enable() 160 #define rcu_read_unlock_sched_notrace() preempt_enable_notrace() 161 162 163 164 /** 165 * rcu_dereference - fetch an RCU-protected pointer in an 166 * RCU read-side critical section. This pointer may later 167 * be safely dereferenced. 168 * 169 * Inserts memory barriers on architectures that require them 170 * (currently only the Alpha), and, more importantly, documents 171 * exactly which pointers are protected by RCU. 172 */ 173 174 #define rcu_dereference(p) ({ \ 175 typeof(p) _________p1 = ACCESS_ONCE(p); \ 176 smp_read_barrier_depends(); \ 177 (_________p1); \ 178 }) 179 180 /** 181 * rcu_assign_pointer - assign (publicize) a pointer to a newly 182 * initialized structure that will be dereferenced by RCU read-side 183 * critical sections. Returns the value assigned. 184 * 185 * Inserts memory barriers on architectures that require them 186 * (pretty much all of them other than x86), and also prevents 187 * the compiler from reordering the code that initializes the 188 * structure after the pointer assignment. More importantly, this 189 * call documents which pointers will be dereferenced by RCU read-side 190 * code. 191 */ 192 193 #define rcu_assign_pointer(p, v) \ 194 ({ \ 195 if (!__builtin_constant_p(v) || \ 196 ((v) != NULL)) \ 197 smp_wmb(); \ 198 (p) = (v); \ 199 }) 200 201 /* Infrastructure to implement the synchronize_() primitives. */ 202 203 struct rcu_synchronize { 204 struct rcu_head head; 205 struct completion completion; 206 }; 207 208 extern void wakeme_after_rcu(struct rcu_head *head); 209 210 /** 211 * synchronize_sched - block until all CPUs have exited any non-preemptive 212 * kernel code sequences. 213 * 214 * This means that all preempt_disable code sequences, including NMI and 215 * hardware-interrupt handlers, in progress on entry will have completed 216 * before this primitive returns. However, this does not guarantee that 217 * softirq handlers will have completed, since in some kernels, these 218 * handlers can run in process context, and can block. 219 * 220 * This primitive provides the guarantees made by the (now removed) 221 * synchronize_kernel() API. In contrast, synchronize_rcu() only 222 * guarantees that rcu_read_lock() sections will have completed. 223 * In "classic RCU", these two guarantees happen to be one and 224 * the same, but can differ in realtime RCU implementations. 225 */ 226 #define synchronize_sched() __synchronize_sched() 227 228 /** 229 * call_rcu - Queue an RCU callback for invocation after a grace period. 230 * @head: structure to be used for queueing the RCU updates. 231 * @func: actual update function to be invoked after the grace period 232 * 233 * The update function will be invoked some time after a full grace 234 * period elapses, in other words after all currently executing RCU 235 * read-side critical sections have completed. RCU read-side critical 236 * sections are delimited by rcu_read_lock() and rcu_read_unlock(), 237 * and may be nested. 238 */ 239 extern void call_rcu(struct rcu_head *head, 240 void (*func)(struct rcu_head *head)); 241 242 /** 243 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. 244 * @head: structure to be used for queueing the RCU updates. 245 * @func: actual update function to be invoked after the grace period 246 * 247 * The update function will be invoked some time after a full grace 248 * period elapses, in other words after all currently executing RCU 249 * read-side critical sections have completed. call_rcu_bh() assumes 250 * that the read-side critical sections end on completion of a softirq 251 * handler. This means that read-side critical sections in process 252 * context must not be interrupted by softirqs. This interface is to be 253 * used when most of the read-side critical sections are in softirq context. 254 * RCU read-side critical sections are delimited by : 255 * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. 256 * OR 257 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. 258 * These may be nested. 259 */ 260 extern void call_rcu_bh(struct rcu_head *head, 261 void (*func)(struct rcu_head *head)); 262 263 /* Exported common interfaces */ 264 extern void synchronize_rcu(void); 265 extern void rcu_barrier(void); 266 extern void rcu_barrier_bh(void); 267 extern void rcu_barrier_sched(void); 268 269 /* Internal to kernel */ 270 extern void rcu_init(void); 271 extern void rcu_scheduler_starting(void); 272 extern int rcu_needs_cpu(int cpu); 273 274 #endif /* __LINUX_RCUPDATE_H */ 275