Lines Matching +full:cpu +full:- +full:viewed
1 // SPDX-License-Identifier: GPL-2.0-only
8 #include <linux/percpu-refcount.h>
12 * don't try to detect the ref hitting 0 - which means that get/put can just
14 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
23 * the ref hitting 0 on every put - this would require global synchronization
36 #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
44 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); in percpu_count_ptr()
48 * percpu_ref_init - initialize a percpu refcount
59 * Note that @release must not sleep - it may potentially be called from RCU
70 ref->percpu_count_ptr = (unsigned long) in percpu_ref_init()
72 if (!ref->percpu_count_ptr) in percpu_ref_init()
73 return -ENOMEM; in percpu_ref_init()
75 data = kzalloc(sizeof(*ref->data), gfp); in percpu_ref_init()
77 free_percpu((void __percpu *)ref->percpu_count_ptr); in percpu_ref_init()
78 ref->percpu_count_ptr = 0; in percpu_ref_init()
79 return -ENOMEM; in percpu_ref_init()
82 data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; in percpu_ref_init()
83 data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT; in percpu_ref_init()
86 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in percpu_ref_init()
87 data->allow_reinit = true; in percpu_ref_init()
93 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_init()
97 atomic_long_set(&data->count, start_count); in percpu_ref_init()
99 data->release = release; in percpu_ref_init()
100 data->confirm_switch = NULL; in percpu_ref_init()
101 data->ref = ref; in percpu_ref_init()
102 ref->data = data; in percpu_ref_init()
112 /* non-NULL confirm_switch indicates switching in progress */ in __percpu_ref_exit()
113 WARN_ON_ONCE(ref->data && ref->data->confirm_switch); in __percpu_ref_exit()
115 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; in __percpu_ref_exit()
120 * percpu_ref_exit - undo percpu_ref_init()
125 * function from are the @ref->release() callback or in init failure path
131 struct percpu_ref_data *data = ref->data; in percpu_ref_exit()
140 ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) << in percpu_ref_exit()
142 ref->data = NULL; in percpu_ref_exit()
153 struct percpu_ref *ref = data->ref; in percpu_ref_call_confirm_rcu()
155 data->confirm_switch(ref); in percpu_ref_call_confirm_rcu()
156 data->confirm_switch = NULL; in percpu_ref_call_confirm_rcu()
159 if (!data->allow_reinit) in percpu_ref_call_confirm_rcu()
170 struct percpu_ref *ref = data->ref; in percpu_ref_switch_to_atomic_rcu()
173 int cpu; in percpu_ref_switch_to_atomic_rcu() local
175 for_each_possible_cpu(cpu) in percpu_ref_switch_to_atomic_rcu()
176 count += *per_cpu_ptr(percpu_count, cpu); in percpu_ref_switch_to_atomic_rcu()
179 atomic_long_read(&data->count), count); in percpu_ref_switch_to_atomic_rcu()
183 * to &ref->count; since gets could be happening on one cpu while puts in percpu_ref_switch_to_atomic_rcu()
184 * happen on another, adding a single cpu's count could cause in percpu_ref_switch_to_atomic_rcu()
185 * @ref->count to hit 0 before we've got a consistent value - but the in percpu_ref_switch_to_atomic_rcu()
189 * &ref->count; we need the bias value to prevent &ref->count from in percpu_ref_switch_to_atomic_rcu()
193 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count); in percpu_ref_switch_to_atomic_rcu()
195 WARN_ONCE(atomic_long_read(&data->count) <= 0, in percpu_ref_switch_to_atomic_rcu()
197 data->release, atomic_long_read(&data->count)); in percpu_ref_switch_to_atomic_rcu()
199 /* @ref is viewed as dead on all CPUs, send out switch confirmation */ in percpu_ref_switch_to_atomic_rcu()
210 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { in __percpu_ref_switch_to_atomic()
217 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in __percpu_ref_switch_to_atomic()
220 * Non-NULL ->confirm_switch is used to indicate that switching is in __percpu_ref_switch_to_atomic()
223 ref->data->confirm_switch = confirm_switch ?: in __percpu_ref_switch_to_atomic()
227 call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu); in __percpu_ref_switch_to_atomic()
233 int cpu; in __percpu_ref_switch_to_percpu() local
237 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) in __percpu_ref_switch_to_percpu()
240 if (WARN_ON_ONCE(!ref->data->allow_reinit)) in __percpu_ref_switch_to_percpu()
243 atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count); in __percpu_ref_switch_to_percpu()
246 * Restore per-cpu operation. smp_store_release() is paired in __percpu_ref_switch_to_percpu()
251 for_each_possible_cpu(cpu) in __percpu_ref_switch_to_percpu()
252 *per_cpu_ptr(percpu_count, cpu) = 0; in __percpu_ref_switch_to_percpu()
254 smp_store_release(&ref->percpu_count_ptr, in __percpu_ref_switch_to_percpu()
255 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); in __percpu_ref_switch_to_percpu()
261 struct percpu_ref_data *data = ref->data; in __percpu_ref_switch_mode()
270 wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch, in __percpu_ref_switch_mode()
273 if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) in __percpu_ref_switch_mode()
280 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
306 ref->data->force_atomic = true; in percpu_ref_switch_to_atomic()
314 * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
324 wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch); in percpu_ref_switch_to_atomic_sync()
329 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
333 * To re-use an expired ref, use percpu_ref_reinit().
352 ref->data->force_atomic = false; in percpu_ref_switch_to_percpu()
360 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
383 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, in percpu_ref_kill_and_confirm()
385 ref->data->release); in percpu_ref_kill_and_confirm()
387 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_kill_and_confirm()
396 * percpu_ref_is_zero - test whether a percpu refcount reached zero
413 if (ref->data) in percpu_ref_is_zero()
414 count = atomic_long_read(&ref->data->count); in percpu_ref_is_zero()
416 count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS; in percpu_ref_is_zero()
424 * percpu_ref_reinit - re-initialize a percpu refcount
425 * @ref: perpcu_ref to re-initialize
427 * Re-initialize @ref so that it's in the same state as when it finished
443 * percpu_ref_resurrect - modify a percpu refcount from dead to live
449 * If @ref->release() frees @ref then the caller is responsible for
450 * guaranteeing that @ref->release() does not get called while this
463 WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)); in percpu_ref_resurrect()
466 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; in percpu_ref_resurrect()