• Home
  • Raw
  • Download

Lines Matching +full:cpu +full:- +full:viewed

1 // SPDX-License-Identifier: GPL-2.0-only
8 #include <linux/percpu-refcount.h>
12 * don't try to detect the ref hitting 0 - which means that get/put can just
14 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
23 * the ref hitting 0 on every put - this would require global synchronization
36 #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
44 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); in percpu_count_ptr()
48 * percpu_ref_init - initialize a percpu refcount
59 * Note that @release must not sleep - it may potentially be called from RCU
70 ref->percpu_count_ptr = (unsigned long) in percpu_ref_init()
72 if (!ref->percpu_count_ptr) in percpu_ref_init()
73 return -ENOMEM; in percpu_ref_init()
75 data = kzalloc(sizeof(*ref->data), gfp); in percpu_ref_init()
77 free_percpu((void __percpu *)ref->percpu_count_ptr); in percpu_ref_init()
78 return -ENOMEM; in percpu_ref_init()
81 data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; in percpu_ref_init()
82 data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT; in percpu_ref_init()
85 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in percpu_ref_init()
86 data->allow_reinit = true; in percpu_ref_init()
92 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_init()
96 atomic_long_set(&data->count, start_count); in percpu_ref_init()
98 data->release = release; in percpu_ref_init()
99 data->confirm_switch = NULL; in percpu_ref_init()
100 data->ref = ref; in percpu_ref_init()
101 ref->data = data; in percpu_ref_init()
111 /* non-NULL confirm_switch indicates switching in progress */ in __percpu_ref_exit()
112 WARN_ON_ONCE(ref->data && ref->data->confirm_switch); in __percpu_ref_exit()
114 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; in __percpu_ref_exit()
119 * percpu_ref_exit - undo percpu_ref_init()
124 * function from are the @ref->release() callback or in init failure path
130 struct percpu_ref_data *data = ref->data; in percpu_ref_exit()
139 ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) << in percpu_ref_exit()
141 ref->data = NULL; in percpu_ref_exit()
152 struct percpu_ref *ref = data->ref; in percpu_ref_call_confirm_rcu()
154 data->confirm_switch(ref); in percpu_ref_call_confirm_rcu()
155 data->confirm_switch = NULL; in percpu_ref_call_confirm_rcu()
158 if (!data->allow_reinit) in percpu_ref_call_confirm_rcu()
169 struct percpu_ref *ref = data->ref; in percpu_ref_switch_to_atomic_rcu()
172 int cpu; in percpu_ref_switch_to_atomic_rcu() local
174 for_each_possible_cpu(cpu) in percpu_ref_switch_to_atomic_rcu()
175 count += *per_cpu_ptr(percpu_count, cpu); in percpu_ref_switch_to_atomic_rcu()
178 atomic_long_read(&data->count), count); in percpu_ref_switch_to_atomic_rcu()
182 * to &ref->count; since gets could be happening on one cpu while puts in percpu_ref_switch_to_atomic_rcu()
183 * happen on another, adding a single cpu's count could cause in percpu_ref_switch_to_atomic_rcu()
184 * @ref->count to hit 0 before we've got a consistent value - but the in percpu_ref_switch_to_atomic_rcu()
188 * &ref->count; we need the bias value to prevent &ref->count from in percpu_ref_switch_to_atomic_rcu()
192 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count); in percpu_ref_switch_to_atomic_rcu()
194 WARN_ONCE(atomic_long_read(&data->count) <= 0, in percpu_ref_switch_to_atomic_rcu()
196 data->release, atomic_long_read(&data->count)); in percpu_ref_switch_to_atomic_rcu()
198 /* @ref is viewed as dead on all CPUs, send out switch confirmation */ in percpu_ref_switch_to_atomic_rcu()
209 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { in __percpu_ref_switch_to_atomic()
216 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in __percpu_ref_switch_to_atomic()
219 * Non-NULL ->confirm_switch is used to indicate that switching is in __percpu_ref_switch_to_atomic()
222 ref->data->confirm_switch = confirm_switch ?: in __percpu_ref_switch_to_atomic()
226 call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu); in __percpu_ref_switch_to_atomic()
232 int cpu; in __percpu_ref_switch_to_percpu() local
236 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) in __percpu_ref_switch_to_percpu()
239 if (WARN_ON_ONCE(!ref->data->allow_reinit)) in __percpu_ref_switch_to_percpu()
242 atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count); in __percpu_ref_switch_to_percpu()
245 * Restore per-cpu operation. smp_store_release() is paired in __percpu_ref_switch_to_percpu()
250 for_each_possible_cpu(cpu) in __percpu_ref_switch_to_percpu()
251 *per_cpu_ptr(percpu_count, cpu) = 0; in __percpu_ref_switch_to_percpu()
253 smp_store_release(&ref->percpu_count_ptr, in __percpu_ref_switch_to_percpu()
254 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); in __percpu_ref_switch_to_percpu()
260 struct percpu_ref_data *data = ref->data; in __percpu_ref_switch_mode()
269 wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch, in __percpu_ref_switch_mode()
272 if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) in __percpu_ref_switch_mode()
279 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
305 ref->data->force_atomic = true; in percpu_ref_switch_to_atomic()
313 * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
323 wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch); in percpu_ref_switch_to_atomic_sync()
328 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
332 * To re-use an expired ref, use percpu_ref_reinit().
351 ref->data->force_atomic = false; in percpu_ref_switch_to_percpu()
359 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
382 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, in percpu_ref_kill_and_confirm()
384 ref->data->release); in percpu_ref_kill_and_confirm()
386 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_kill_and_confirm()
395 * percpu_ref_is_zero - test whether a percpu refcount reached zero
412 if (ref->data) in percpu_ref_is_zero()
413 count = atomic_long_read(&ref->data->count); in percpu_ref_is_zero()
415 count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS; in percpu_ref_is_zero()
423 * percpu_ref_reinit - re-initialize a percpu refcount
424 * @ref: perpcu_ref to re-initialize
426 * Re-initialize @ref so that it's in the same state as when it finished
442 * percpu_ref_resurrect - modify a percpu refcount from dead to live
448 * If @ref->release() frees @ref then the caller is responsible for
449 * guaranteeing that @ref->release() does not get called while this
462 WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)); in percpu_ref_resurrect()
465 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; in percpu_ref_resurrect()