• Home
  • Raw
  • Download

Lines Matching +full:cpu +full:- +full:viewed

6 #include <linux/percpu-refcount.h>
10 * don't try to detect the ref hitting 0 - which means that get/put can just
12 * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
21 * the ref hitting 0 on every put - this would require global synchronization
34 #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
42 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); in percpu_count_ptr()
46 * percpu_ref_init - initialize a percpu refcount
56 * Note that @release must not sleep - it may potentially be called from RCU
66 ref->percpu_count_ptr = (unsigned long) in percpu_ref_init()
68 if (!ref->percpu_count_ptr) in percpu_ref_init()
69 return -ENOMEM; in percpu_ref_init()
71 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; in percpu_ref_init()
74 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in percpu_ref_init()
79 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_init()
83 atomic_long_set(&ref->count, start_count); in percpu_ref_init()
85 ref->release = release; in percpu_ref_init()
86 ref->confirm_switch = NULL; in percpu_ref_init()
92 * percpu_ref_exit - undo percpu_ref_init()
97 * function from are the @ref->release() callback or in init failure path
106 /* non-NULL confirm_switch indicates switching in progress */ in percpu_ref_exit()
107 WARN_ON_ONCE(ref->confirm_switch); in percpu_ref_exit()
109 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; in percpu_ref_exit()
118 ref->confirm_switch(ref); in percpu_ref_call_confirm_rcu()
119 ref->confirm_switch = NULL; in percpu_ref_call_confirm_rcu()
131 int cpu; in percpu_ref_switch_to_atomic_rcu() local
133 for_each_possible_cpu(cpu) in percpu_ref_switch_to_atomic_rcu()
134 count += *per_cpu_ptr(percpu_count, cpu); in percpu_ref_switch_to_atomic_rcu()
137 atomic_long_read(&ref->count), (long)count); in percpu_ref_switch_to_atomic_rcu()
141 * to &ref->count; since gets could be happening on one cpu while puts in percpu_ref_switch_to_atomic_rcu()
142 * happen on another, adding a single cpu's count could cause in percpu_ref_switch_to_atomic_rcu()
143 * @ref->count to hit 0 before we've got a consistent value - but the in percpu_ref_switch_to_atomic_rcu()
147 * &ref->count; we need the bias value to prevent &ref->count from in percpu_ref_switch_to_atomic_rcu()
151 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); in percpu_ref_switch_to_atomic_rcu()
153 WARN_ONCE(atomic_long_read(&ref->count) <= 0, in percpu_ref_switch_to_atomic_rcu()
155 ref->release, atomic_long_read(&ref->count)); in percpu_ref_switch_to_atomic_rcu()
157 /* @ref is viewed as dead on all CPUs, send out switch confirmation */ in percpu_ref_switch_to_atomic_rcu()
168 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { in __percpu_ref_switch_to_atomic()
175 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in __percpu_ref_switch_to_atomic()
178 * Non-NULL ->confirm_switch is used to indicate that switching is in __percpu_ref_switch_to_atomic()
181 ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; in __percpu_ref_switch_to_atomic()
184 call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); in __percpu_ref_switch_to_atomic()
190 int cpu; in __percpu_ref_switch_to_percpu() local
194 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) in __percpu_ref_switch_to_percpu()
197 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); in __percpu_ref_switch_to_percpu()
200 * Restore per-cpu operation. smp_store_release() is paired in __percpu_ref_switch_to_percpu()
205 for_each_possible_cpu(cpu) in __percpu_ref_switch_to_percpu()
206 *per_cpu_ptr(percpu_count, cpu) = 0; in __percpu_ref_switch_to_percpu()
208 smp_store_release(&ref->percpu_count_ptr, in __percpu_ref_switch_to_percpu()
209 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); in __percpu_ref_switch_to_percpu()
222 wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch, in __percpu_ref_switch_mode()
225 if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) in __percpu_ref_switch_mode()
232 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
258 ref->force_atomic = true; in percpu_ref_switch_to_atomic()
266 * percpu_ref_switch_to_atomic_sync - switch a percpu_ref to atomic mode
276 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); in percpu_ref_switch_to_atomic_sync()
281 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
285 * To re-use an expired ref, use percpu_ref_reinit().
304 ref->force_atomic = false; in percpu_ref_switch_to_percpu()
312 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
335 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, in percpu_ref_kill_and_confirm()
336 "%s called more than once on %pf!", __func__, ref->release); in percpu_ref_kill_and_confirm()
338 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_kill_and_confirm()
347 * percpu_ref_reinit - re-initialize a percpu refcount
348 * @ref: perpcu_ref to re-initialize
350 * Re-initialize @ref so that it's in the same state as when it finished
365 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; in percpu_ref_reinit()