1 /*
2 * Percpu refcounts:
3 * (C) 2012 Google, Inc.
4 * Author: Kent Overstreet <koverstreet@google.com>
5 *
6 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
7 * atomic_dec_and_test() - but percpu.
8 *
9 * There's one important difference between percpu refs and normal atomic_t
10 * refcounts; you have to keep track of your initial refcount, and then when you
11 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
12 * refcount.
13 *
14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15 * than an atomic_t - this is because of the way shutdown works, see
16 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
17 *
18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
20 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
21 * issuing the appropriate barriers, and then marks the ref as shutting down so
22 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
23 * it's safe to drop the initial ref.
24 *
25 * USAGE:
26 *
27 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
28 * is created when userspaces calls io_setup(), and destroyed when userspace
29 * calls io_destroy() or the process exits.
30 *
31 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
32 * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
33 * the kioctx from the proccess's list of kioctxs - after that, there can't be
34 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
35 * the initial ref with percpu_ref_put().
36 *
37 * Code that does a two stage shutdown like this often needs some kind of
38 * explicit synchronization to ensure the initial refcount can only be dropped
39 * once - percpu_ref_kill() does this for you, it returns true once and false if
40 * someone else already called it. The aio code uses it this way, but it's not
41 * necessary if the code has some other mechanism to synchronize teardown.
42 * around.
43 */
44
45 #ifndef _LINUX_PERCPU_REFCOUNT_H
46 #define _LINUX_PERCPU_REFCOUNT_H
47
48 #include <linux/atomic.h>
49 #include <linux/kernel.h>
50 #include <linux/percpu.h>
51 #include <linux/rcupdate.h>
52 #include <linux/gfp.h>
53
54 struct percpu_ref;
55 typedef void (percpu_ref_func_t)(struct percpu_ref *);
56
57 /* flags set in the lower bits of percpu_ref->percpu_count_ptr */
58 enum {
59 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
60 __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
61 __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
62
63 __PERCPU_REF_FLAG_BITS = 2,
64 };
65
66 /* @flags for percpu_ref_init() */
67 enum {
68 /*
69 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
70 * operation using percpu_ref_switch_to_percpu(). If initialized
71 * with this flag, the ref will stay in atomic mode until
72 * percpu_ref_switch_to_percpu() is invoked on it.
73 */
74 PERCPU_REF_INIT_ATOMIC = 1 << 0,
75
76 /*
77 * Start dead w/ ref == 0 in atomic mode. Must be revived with
78 * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
79 */
80 PERCPU_REF_INIT_DEAD = 1 << 1,
81 };
82
83 struct percpu_ref {
84 atomic_long_t count;
85 /*
86 * The low bit of the pointer indicates whether the ref is in percpu
87 * mode; if set, then get/put will manipulate the atomic_t.
88 */
89 unsigned long percpu_count_ptr;
90 percpu_ref_func_t *release;
91 percpu_ref_func_t *confirm_switch;
92 bool force_atomic:1;
93 struct rcu_head rcu;
94 };
95
96 int __must_check percpu_ref_init(struct percpu_ref *ref,
97 percpu_ref_func_t *release, unsigned int flags,
98 gfp_t gfp);
99 void percpu_ref_exit(struct percpu_ref *ref);
100 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
101 percpu_ref_func_t *confirm_switch);
102 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
103 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
104 percpu_ref_func_t *confirm_kill);
105 void percpu_ref_reinit(struct percpu_ref *ref);
106
107 /**
108 * percpu_ref_kill - drop the initial ref
109 * @ref: percpu_ref to kill
110 *
111 * Must be used to drop the initial ref on a percpu refcount; must be called
112 * precisely once before shutdown.
113 *
114 * Puts @ref in non percpu mode, then does a call_rcu() before gathering up the
115 * percpu counters and dropping the initial ref.
116 */
percpu_ref_kill(struct percpu_ref * ref)117 static inline void percpu_ref_kill(struct percpu_ref *ref)
118 {
119 return percpu_ref_kill_and_confirm(ref, NULL);
120 }
121
122 /*
123 * Internal helper. Don't use outside percpu-refcount proper. The
124 * function doesn't return the pointer and let the caller test it for NULL
125 * because doing so forces the compiler to generate two conditional
126 * branches as it can't assume that @ref->percpu_count is not NULL.
127 */
__ref_is_percpu(struct percpu_ref * ref,unsigned long __percpu ** percpu_countp)128 static inline bool __ref_is_percpu(struct percpu_ref *ref,
129 unsigned long __percpu **percpu_countp)
130 {
131 unsigned long percpu_ptr;
132
133 /*
134 * The value of @ref->percpu_count_ptr is tested for
135 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
136 * used as a pointer. If the compiler generates a separate fetch
137 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
138 * between contaminating the pointer value, meaning that
139 * ACCESS_ONCE() is required when fetching it.
140 *
141 * Also, we need a data dependency barrier to be paired with
142 * smp_store_release() in __percpu_ref_switch_to_percpu().
143 *
144 * Use lockless deref which contains both.
145 */
146 percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
147
148 /*
149 * Theoretically, the following could test just ATOMIC; however,
150 * then we'd have to mask off DEAD separately as DEAD may be
151 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
152 * implies ATOMIC anyway. Test them together.
153 */
154 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
155 return false;
156
157 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
158 return true;
159 }
160
161 /**
162 * percpu_ref_get_many - increment a percpu refcount
163 * @ref: percpu_ref to get
164 * @nr: number of references to get
165 *
166 * Analogous to atomic_long_add().
167 *
168 * This function is safe to call as long as @ref is between init and exit.
169 */
percpu_ref_get_many(struct percpu_ref * ref,unsigned long nr)170 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
171 {
172 unsigned long __percpu *percpu_count;
173
174 rcu_read_lock_sched();
175
176 if (__ref_is_percpu(ref, &percpu_count))
177 this_cpu_add(*percpu_count, nr);
178 else
179 atomic_long_add(nr, &ref->count);
180
181 rcu_read_unlock_sched();
182 }
183
184 /**
185 * percpu_ref_get - increment a percpu refcount
186 * @ref: percpu_ref to get
187 *
188 * Analagous to atomic_long_inc().
189 *
190 * This function is safe to call as long as @ref is between init and exit.
191 */
percpu_ref_get(struct percpu_ref * ref)192 static inline void percpu_ref_get(struct percpu_ref *ref)
193 {
194 percpu_ref_get_many(ref, 1);
195 }
196
197 /**
198 * percpu_ref_tryget - try to increment a percpu refcount
199 * @ref: percpu_ref to try-get
200 *
201 * Increment a percpu refcount unless its count already reached zero.
202 * Returns %true on success; %false on failure.
203 *
204 * This function is safe to call as long as @ref is between init and exit.
205 */
percpu_ref_tryget(struct percpu_ref * ref)206 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
207 {
208 unsigned long __percpu *percpu_count;
209 bool ret;
210
211 rcu_read_lock_sched();
212
213 if (__ref_is_percpu(ref, &percpu_count)) {
214 this_cpu_inc(*percpu_count);
215 ret = true;
216 } else {
217 ret = atomic_long_inc_not_zero(&ref->count);
218 }
219
220 rcu_read_unlock_sched();
221
222 return ret;
223 }
224
225 /**
226 * percpu_ref_tryget_live - try to increment a live percpu refcount
227 * @ref: percpu_ref to try-get
228 *
229 * Increment a percpu refcount unless it has already been killed. Returns
230 * %true on success; %false on failure.
231 *
232 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
233 * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
234 * should be used. After the confirm_kill callback is invoked, it's
235 * guaranteed that no new reference will be given out by
236 * percpu_ref_tryget_live().
237 *
238 * This function is safe to call as long as @ref is between init and exit.
239 */
percpu_ref_tryget_live(struct percpu_ref * ref)240 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
241 {
242 unsigned long __percpu *percpu_count;
243 bool ret = false;
244
245 rcu_read_lock_sched();
246
247 if (__ref_is_percpu(ref, &percpu_count)) {
248 this_cpu_inc(*percpu_count);
249 ret = true;
250 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
251 ret = atomic_long_inc_not_zero(&ref->count);
252 }
253
254 rcu_read_unlock_sched();
255
256 return ret;
257 }
258
259 /**
260 * percpu_ref_put_many - decrement a percpu refcount
261 * @ref: percpu_ref to put
262 * @nr: number of references to put
263 *
264 * Decrement the refcount, and if 0, call the release function (which was passed
265 * to percpu_ref_init())
266 *
267 * This function is safe to call as long as @ref is between init and exit.
268 */
percpu_ref_put_many(struct percpu_ref * ref,unsigned long nr)269 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
270 {
271 unsigned long __percpu *percpu_count;
272
273 rcu_read_lock_sched();
274
275 if (__ref_is_percpu(ref, &percpu_count))
276 this_cpu_sub(*percpu_count, nr);
277 else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
278 ref->release(ref);
279
280 rcu_read_unlock_sched();
281 }
282
283 /**
284 * percpu_ref_put - decrement a percpu refcount
285 * @ref: percpu_ref to put
286 *
287 * Decrement the refcount, and if 0, call the release function (which was passed
288 * to percpu_ref_init())
289 *
290 * This function is safe to call as long as @ref is between init and exit.
291 */
percpu_ref_put(struct percpu_ref * ref)292 static inline void percpu_ref_put(struct percpu_ref *ref)
293 {
294 percpu_ref_put_many(ref, 1);
295 }
296
297 /**
298 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
299 * @ref: percpu_ref to test
300 *
301 * Returns %true if @ref is dying or dead.
302 *
303 * This function is safe to call as long as @ref is between init and exit
304 * and the caller is responsible for synchronizing against state changes.
305 */
percpu_ref_is_dying(struct percpu_ref * ref)306 static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
307 {
308 return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
309 }
310
311 /**
312 * percpu_ref_is_zero - test whether a percpu refcount reached zero
313 * @ref: percpu_ref to test
314 *
315 * Returns %true if @ref reached zero.
316 *
317 * This function is safe to call as long as @ref is between init and exit.
318 */
percpu_ref_is_zero(struct percpu_ref * ref)319 static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
320 {
321 unsigned long __percpu *percpu_count;
322
323 if (__ref_is_percpu(ref, &percpu_count))
324 return false;
325 return !atomic_long_read(&ref->count);
326 }
327
328 #endif
329