• Home
  • Raw
  • Download

Lines Matching full:ref

10  * don't try to detect the ref hitting 0 - which means that get/put can just
21 * the ref hitting 0 on every put - this would require global synchronization
25 * the ref can't hit 0 before the user drops the initial ref, so as long as we
26 * convert to non percpu mode before the initial ref is dropped everything
39 static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) in percpu_count_ptr() argument
42 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); in percpu_count_ptr()
47 * @ref: percpu_ref to initialize
52 * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
53 * refcount of 1; analagous to atomic_long_set(ref, 1). See the
59 int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, in percpu_ref_init() argument
66 ref->percpu_count_ptr = (unsigned long) in percpu_ref_init()
68 if (!ref->percpu_count_ptr) in percpu_ref_init()
71 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; in percpu_ref_init()
74 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in percpu_ref_init()
79 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_init()
83 atomic_long_set(&ref->count, start_count); in percpu_ref_init()
85 ref->release = release; in percpu_ref_init()
86 ref->confirm_switch = NULL; in percpu_ref_init()
93 * @ref: percpu_ref to exit
95 * This function exits @ref. The caller is responsible for ensuring that
96 * @ref is no longer in active use. The usual places to invoke this
97 * function from are the @ref->release() callback or in init failure path
101 void percpu_ref_exit(struct percpu_ref *ref) in percpu_ref_exit() argument
103 unsigned long __percpu *percpu_count = percpu_count_ptr(ref); in percpu_ref_exit()
107 WARN_ON_ONCE(ref->confirm_switch); in percpu_ref_exit()
109 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; in percpu_ref_exit()
116 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); in percpu_ref_call_confirm_rcu() local
118 ref->confirm_switch(ref); in percpu_ref_call_confirm_rcu()
119 ref->confirm_switch = NULL; in percpu_ref_call_confirm_rcu()
122 /* drop ref from percpu_ref_switch_to_atomic() */ in percpu_ref_call_confirm_rcu()
123 percpu_ref_put(ref); in percpu_ref_call_confirm_rcu()
128 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); in percpu_ref_switch_to_atomic_rcu() local
129 unsigned long __percpu *percpu_count = percpu_count_ptr(ref); in percpu_ref_switch_to_atomic_rcu()
137 atomic_long_read(&ref->count), (long)count); in percpu_ref_switch_to_atomic_rcu()
141 * to &ref->count; since gets could be happening on one cpu while puts in percpu_ref_switch_to_atomic_rcu()
143 * @ref->count to hit 0 before we've got a consistent value - but the in percpu_ref_switch_to_atomic_rcu()
147 * &ref->count; we need the bias value to prevent &ref->count from in percpu_ref_switch_to_atomic_rcu()
151 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); in percpu_ref_switch_to_atomic_rcu()
153 WARN_ONCE(atomic_long_read(&ref->count) <= 0, in percpu_ref_switch_to_atomic_rcu()
154 "percpu ref (%pf) <= 0 (%ld) after switching to atomic", in percpu_ref_switch_to_atomic_rcu()
155 ref->release, atomic_long_read(&ref->count)); in percpu_ref_switch_to_atomic_rcu()
157 /* @ref is viewed as dead on all CPUs, send out switch confirmation */ in percpu_ref_switch_to_atomic_rcu()
161 static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref) in percpu_ref_noop_confirm_switch() argument
165 static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, in __percpu_ref_switch_to_atomic() argument
168 if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) { in __percpu_ref_switch_to_atomic()
170 confirm_switch(ref); in __percpu_ref_switch_to_atomic()
175 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; in __percpu_ref_switch_to_atomic()
181 ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; in __percpu_ref_switch_to_atomic()
183 percpu_ref_get(ref); /* put after confirmation */ in __percpu_ref_switch_to_atomic()
184 call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); in __percpu_ref_switch_to_atomic()
187 static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) in __percpu_ref_switch_to_percpu() argument
189 unsigned long __percpu *percpu_count = percpu_count_ptr(ref); in __percpu_ref_switch_to_percpu()
194 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) in __percpu_ref_switch_to_percpu()
197 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); in __percpu_ref_switch_to_percpu()
208 smp_store_release(&ref->percpu_count_ptr, in __percpu_ref_switch_to_percpu()
209 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); in __percpu_ref_switch_to_percpu()
212 static void __percpu_ref_switch_mode(struct percpu_ref *ref, in __percpu_ref_switch_mode() argument
222 wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch, in __percpu_ref_switch_mode()
225 if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) in __percpu_ref_switch_mode()
226 __percpu_ref_switch_to_atomic(ref, confirm_switch); in __percpu_ref_switch_mode()
228 __percpu_ref_switch_to_percpu(ref); in __percpu_ref_switch_mode()
233 * @ref: percpu_ref to switch to atomic mode
239 * Schedule switching of @ref to atomic mode. All its percpu counts will
244 * operations. Note that @ref will stay in atomic mode across kill/reinit
247 * This function may block if @ref is in the process of switching to atomic
248 * mode. If the caller ensures that @ref is not in the process of
251 void percpu_ref_switch_to_atomic(struct percpu_ref *ref, in percpu_ref_switch_to_atomic() argument
258 ref->force_atomic = true; in percpu_ref_switch_to_atomic()
259 __percpu_ref_switch_mode(ref, confirm_switch); in percpu_ref_switch_to_atomic()
267 * @ref: percpu_ref to switch to atomic mode
269 * Schedule switching the ref to atomic mode, and wait for the
273 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref) in percpu_ref_switch_to_atomic_sync() argument
275 percpu_ref_switch_to_atomic(ref, NULL); in percpu_ref_switch_to_atomic_sync()
276 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); in percpu_ref_switch_to_atomic_sync()
282 * @ref: percpu_ref to switch to percpu mode
285 * To re-use an expired ref, use percpu_ref_reinit().
287 * Switch @ref to percpu mode. This function may be invoked concurrently
290 * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
294 * This function may block if @ref is in the process of switching to atomic
295 * mode. If the caller ensures that @ref is not in the process of
298 void percpu_ref_switch_to_percpu(struct percpu_ref *ref) in percpu_ref_switch_to_percpu() argument
304 ref->force_atomic = false; in percpu_ref_switch_to_percpu()
305 __percpu_ref_switch_mode(ref, NULL); in percpu_ref_switch_to_percpu()
312 * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation
313 * @ref: percpu_ref to kill
318 * called after @ref is seen as dead from all CPUs at which point all
323 * but it may block if @confirm_kill is specified and @ref is in the
328 void percpu_ref_kill_and_confirm(struct percpu_ref *ref, in percpu_ref_kill_and_confirm() argument
335 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, in percpu_ref_kill_and_confirm()
336 "%s called more than once on %pf!", __func__, ref->release); in percpu_ref_kill_and_confirm()
338 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; in percpu_ref_kill_and_confirm()
339 __percpu_ref_switch_mode(ref, confirm_kill); in percpu_ref_kill_and_confirm()
340 percpu_ref_put(ref); in percpu_ref_kill_and_confirm()
348 * @ref: perpcu_ref to re-initialize
350 * Re-initialize @ref so that it's in the same state as when it finished
351 * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
354 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
357 void percpu_ref_reinit(struct percpu_ref *ref) in percpu_ref_reinit() argument
363 WARN_ON_ONCE(!percpu_ref_is_zero(ref)); in percpu_ref_reinit()
365 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; in percpu_ref_reinit()
366 percpu_ref_get(ref); in percpu_ref_reinit()
367 __percpu_ref_switch_mode(ref, NULL); in percpu_ref_reinit()