Lines Matching +full:switch +full:- +full:mode
6 #include <linux/percpu-refcount.h>
10 * don't try to detect the ref hitting 0 - which means that get/put can just
12 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
21 * the ref hitting 0 on every put - this would require global synchronization
26 * convert to non percpu mode before the initial ref is dropped everything
29 * Converting to non percpu mode is done with some RCUish stuff in
34 #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
42 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); in percpu_count_ptr()
46 * percpu_ref_init - initialize a percpu refcount
52 * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
56 * Note that @release must not sleep - it may potentially be called from RCU
66 ref->percpu_count_ptr = (unsigned long) in percpu_ref_init()
68 if (!ref->percpu_count_ptr) in percpu_ref_init()
69 return -ENOMEM; in percpu_ref_init()
71 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; in percpu_ref_init()
74 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in percpu_ref_init()
79 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_init()
83 atomic_long_set(&ref->count, start_count); in percpu_ref_init()
85 ref->release = release; in percpu_ref_init()
86 ref->confirm_switch = NULL; in percpu_ref_init()
92 * percpu_ref_exit - undo percpu_ref_init()
97 * function from are the @ref->release() callback or in init failure path
106 /* non-NULL confirm_switch indicates switching in progress */ in percpu_ref_exit()
107 WARN_ON_ONCE(ref->confirm_switch); in percpu_ref_exit()
109 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; in percpu_ref_exit()
118 ref->confirm_switch(ref); in percpu_ref_call_confirm_rcu()
119 ref->confirm_switch = NULL; in percpu_ref_call_confirm_rcu()
137 atomic_long_read(&ref->count), (long)count); in percpu_ref_switch_to_atomic_rcu()
141 * to &ref->count; since gets could be happening on one cpu while puts in percpu_ref_switch_to_atomic_rcu()
143 * @ref->count to hit 0 before we've got a consistent value - but the in percpu_ref_switch_to_atomic_rcu()
147 * &ref->count; we need the bias value to prevent &ref->count from in percpu_ref_switch_to_atomic_rcu()
151 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); in percpu_ref_switch_to_atomic_rcu()
153 WARN_ONCE(atomic_long_read(&ref->count) <= 0, in percpu_ref_switch_to_atomic_rcu()
155 ref->release, atomic_long_read(&ref->count)); in percpu_ref_switch_to_atomic_rcu()
157 /* @ref is viewed as dead on all CPUs, send out switch confirmation */ in percpu_ref_switch_to_atomic_rcu()
168 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { in __percpu_ref_switch_to_atomic()
175 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in __percpu_ref_switch_to_atomic()
178 * Non-NULL ->confirm_switch is used to indicate that switching is in __percpu_ref_switch_to_atomic()
181 ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; in __percpu_ref_switch_to_atomic()
184 call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); in __percpu_ref_switch_to_atomic()
194 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) in __percpu_ref_switch_to_percpu()
197 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); in __percpu_ref_switch_to_percpu()
200 * Restore per-cpu operation. smp_store_release() is paired in __percpu_ref_switch_to_percpu()
208 smp_store_release(&ref->percpu_count_ptr, in __percpu_ref_switch_to_percpu()
209 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); in __percpu_ref_switch_to_percpu()
222 wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch, in __percpu_ref_switch_mode()
225 if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) in __percpu_ref_switch_mode()
232 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
233 * @ref: percpu_ref to switch to atomic mode
239 * Schedule switching of @ref to atomic mode. All its percpu counts will
241 * are guaraneed to be in atomic mode, @confirm_switch, which may not
244 * operations. Note that @ref will stay in atomic mode across kill/reinit
248 * mode. If the caller ensures that @ref is not in the process of
249 * switching to atomic mode, this function can be called from any context.
258 ref->force_atomic = true; in percpu_ref_switch_to_atomic()
266 * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
267 * @ref: percpu_ref to switch to atomic mode
269 * Schedule switching the ref to atomic mode, and wait for the
270 * switch to complete. Caller must ensure that no other thread
271 * will switch back to percpu mode.
276 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); in percpu_ref_switch_to_atomic_sync()
281 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
282 * @ref: percpu_ref to switch to percpu mode
285 * To re-use an expired ref, use percpu_ref_reinit().
287 * Switch @ref to percpu mode. This function may be invoked concurrently
295 * mode. If the caller ensures that @ref is not in the process of
296 * switching to atomic mode, this function can be called from any context.
304 ref->force_atomic = false; in percpu_ref_switch_to_percpu()
312 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
324 * process of switching to atomic mode by percpu_ref_switch_to_atomic().
335 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, in percpu_ref_kill_and_confirm()
336 "%s called more than once on %pf!", __func__, ref->release); in percpu_ref_kill_and_confirm()
338 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_kill_and_confirm()
347 * percpu_ref_reinit - re-initialize a percpu refcount
348 * @ref: perpcu_ref to re-initialize
350 * Re-initialize @ref so that it's in the same state as when it finished
365 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; in percpu_ref_reinit()