1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Variant of atomic_t specialized for reference counts.
4 *
5 * The interface matches the atomic_t interface (to aid in porting) but only
6 * provides the few functions one should use for reference counting.
7 *
8 * Saturation semantics
9 * ====================
10 *
11 * refcount_t differs from atomic_t in that the counter saturates at
12 * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
13 * counter and causing 'spurious' use-after-free issues. In order to avoid the
14 * cost associated with introducing cmpxchg() loops into all of the saturating
15 * operations, we temporarily allow the counter to take on an unchecked value
16 * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
17 * or overflow has occurred. Although this is racy when multiple threads
18 * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
19 * equidistant from 0 and INT_MAX we minimise the scope for error:
20 *
21 * INT_MAX REFCOUNT_SATURATED UINT_MAX
22 * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff)
23 * +--------------------------------+----------------+----------------+
24 * <---------- bad value! ---------->
25 *
26 * (in a signed view of the world, the "bad value" range corresponds to
27 * a negative counter value).
28 *
29 * As an example, consider a refcount_inc() operation that causes the counter
30 * to overflow:
31 *
32 * int old = atomic_fetch_add_relaxed(r);
33 * // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
34 * if (old < 0)
35 * atomic_set(r, REFCOUNT_SATURATED);
36 *
37 * If another thread also performs a refcount_inc() operation between the two
38 * atomic operations, then the count will continue to edge closer to 0. If it
39 * reaches a value of 1 before /any/ of the threads reset it to the saturated
40 * value, then a concurrent refcount_dec_and_test() may erroneously free the
41 * underlying object. Given the precise timing details involved with the
42 * round-robin scheduling of each thread manipulating the refcount and the need
43 * to hit the race multiple times in succession, there doesn't appear to be a
44 * practical avenue of attack even if using refcount_add() operations with
45 * larger increments.
46 *
47 * Memory ordering
48 * ===============
49 *
50 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
51 * and provide only what is strictly required for refcounts.
52 *
53 * The increments are fully relaxed; these will not provide ordering. The
54 * rationale is that whatever is used to obtain the object we're increasing the
55 * reference count on will provide the ordering. For locked data structures,
56 * its the lock acquire, for RCU/lockless data structures its the dependent
57 * load.
58 *
59 * Do note that inc_not_zero() provides a control dependency which will order
60 * future stores against the inc, this ensures we'll never modify the object
61 * if we did not in fact acquire a reference.
62 *
63 * The decrements will provide release order, such that all the prior loads and
64 * stores will be issued before, it also provides a control dependency, which
65 * will order us against the subsequent free().
66 *
67 * The control dependency is against the load of the cmpxchg (ll/sc) that
68 * succeeded. This means the stores aren't fully ordered, but this is fine
69 * because the 1->0 transition indicates no concurrency.
70 *
71 * Note that the allocator is responsible for ordering things between free()
72 * and alloc().
73 *
74 * The decrements dec_and_test() and sub_and_test() also provide acquire
75 * ordering on success.
76 *
77 */
78
79 #ifndef _LINUX_REFCOUNT_H
80 #define _LINUX_REFCOUNT_H
81
82 #include <linux/atomic.h>
83 #include <linux/bug.h>
84 #include <linux/compiler.h>
85 #include <linux/limits.h>
86 #include <linux/spinlock_types.h>
87
88 struct mutex;
89
90 /**
91 * struct refcount_t - variant of atomic_t specialized for reference counts
92 * @refs: atomic_t counter field
93 *
94 * The counter saturates at REFCOUNT_SATURATED and will not move once
95 * there. This avoids wrapping the counter and causing 'spurious'
96 * use-after-free bugs.
97 */
98 typedef struct refcount_struct {
99 atomic_t refs;
100 } refcount_t;
101
102 #define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
103 #define REFCOUNT_MAX INT_MAX
104 #define REFCOUNT_SATURATED (INT_MIN / 2)
105
106 enum refcount_saturation_type {
107 REFCOUNT_ADD_NOT_ZERO_OVF,
108 REFCOUNT_ADD_OVF,
109 REFCOUNT_ADD_UAF,
110 REFCOUNT_SUB_UAF,
111 REFCOUNT_DEC_LEAK,
112 };
113
114 void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
115
116 /**
117 * refcount_set - set a refcount's value
118 * @r: the refcount
119 * @n: value to which the refcount will be set
120 */
refcount_set(refcount_t * r,int n)121 static inline void refcount_set(refcount_t *r, int n)
122 {
123 atomic_set(&r->refs, n);
124 }
125
126 /**
127 * refcount_read - get a refcount's value
128 * @r: the refcount
129 *
130 * Return: the refcount's value
131 */
refcount_read(const refcount_t * r)132 static inline unsigned int refcount_read(const refcount_t *r)
133 {
134 return atomic_read(&r->refs);
135 }
136
137 /**
138 * refcount_add_not_zero - add a value to a refcount unless it is 0
139 * @i: the value to add to the refcount
140 * @r: the refcount
141 *
142 * Will saturate at REFCOUNT_SATURATED and WARN.
143 *
144 * Provides no memory ordering, it is assumed the caller has guaranteed the
145 * object memory to be stable (RCU, etc.). It does provide a control dependency
146 * and thereby orders future stores. See the comment on top.
147 *
148 * Use of this function is not recommended for the normal reference counting
149 * use case in which references are taken and released one at a time. In these
150 * cases, refcount_inc(), or one of its variants, should instead be used to
151 * increment a reference count.
152 *
153 * Return: false if the passed refcount is 0, true otherwise
154 */
refcount_add_not_zero(int i,refcount_t * r)155 static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
156 {
157 int old = refcount_read(r);
158
159 do {
160 if (!old)
161 break;
162 } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
163
164 if (unlikely(old < 0 || old + i < 0))
165 refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
166
167 return old;
168 }
169
170 /**
171 * refcount_add - add a value to a refcount
172 * @i: the value to add to the refcount
173 * @r: the refcount
174 *
175 * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
176 *
177 * Provides no memory ordering, it is assumed the caller has guaranteed the
178 * object memory to be stable (RCU, etc.). It does provide a control dependency
179 * and thereby orders future stores. See the comment on top.
180 *
181 * Use of this function is not recommended for the normal reference counting
182 * use case in which references are taken and released one at a time. In these
183 * cases, refcount_inc(), or one of its variants, should instead be used to
184 * increment a reference count.
185 */
refcount_add(int i,refcount_t * r)186 static inline void refcount_add(int i, refcount_t *r)
187 {
188 int old = atomic_fetch_add_relaxed(i, &r->refs);
189
190 if (unlikely(!old))
191 refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
192 else if (unlikely(old < 0 || old + i < 0))
193 refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
194 }
195
196 /**
197 * refcount_inc_not_zero - increment a refcount unless it is 0
198 * @r: the refcount to increment
199 *
200 * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
201 * and WARN.
202 *
203 * Provides no memory ordering, it is assumed the caller has guaranteed the
204 * object memory to be stable (RCU, etc.). It does provide a control dependency
205 * and thereby orders future stores. See the comment on top.
206 *
207 * Return: true if the increment was successful, false otherwise
208 */
refcount_inc_not_zero(refcount_t * r)209 static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
210 {
211 return refcount_add_not_zero(1, r);
212 }
213
214 /**
215 * refcount_inc - increment a refcount
216 * @r: the refcount to increment
217 *
218 * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
219 *
220 * Provides no memory ordering, it is assumed the caller already has a
221 * reference on the object.
222 *
223 * Will WARN if the refcount is 0, as this represents a possible use-after-free
224 * condition.
225 */
refcount_inc(refcount_t * r)226 static inline void refcount_inc(refcount_t *r)
227 {
228 refcount_add(1, r);
229 }
230
231 /**
232 * refcount_sub_and_test - subtract from a refcount and test if it is 0
233 * @i: amount to subtract from the refcount
234 * @r: the refcount
235 *
236 * Similar to atomic_dec_and_test(), but it will WARN, return false and
237 * ultimately leak on underflow and will fail to decrement when saturated
238 * at REFCOUNT_SATURATED.
239 *
240 * Provides release memory ordering, such that prior loads and stores are done
241 * before, and provides an acquire ordering on success such that free()
242 * must come after.
243 *
244 * Use of this function is not recommended for the normal reference counting
245 * use case in which references are taken and released one at a time. In these
246 * cases, refcount_dec(), or one of its variants, should instead be used to
247 * decrement a reference count.
248 *
249 * Return: true if the resulting refcount is 0, false otherwise
250 */
refcount_sub_and_test(int i,refcount_t * r)251 static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
252 {
253 int old = atomic_fetch_sub_release(i, &r->refs);
254
255 if (old == i) {
256 smp_acquire__after_ctrl_dep();
257 return true;
258 }
259
260 if (unlikely(old < 0 || old - i < 0))
261 refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
262
263 return false;
264 }
265
266 /**
267 * refcount_dec_and_test - decrement a refcount and test if it is 0
268 * @r: the refcount
269 *
270 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
271 * decrement when saturated at REFCOUNT_SATURATED.
272 *
273 * Provides release memory ordering, such that prior loads and stores are done
274 * before, and provides an acquire ordering on success such that free()
275 * must come after.
276 *
277 * Return: true if the resulting refcount is 0, false otherwise
278 */
refcount_dec_and_test(refcount_t * r)279 static inline __must_check bool refcount_dec_and_test(refcount_t *r)
280 {
281 return refcount_sub_and_test(1, r);
282 }
283
284 /**
285 * refcount_dec - decrement a refcount
286 * @r: the refcount
287 *
288 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
289 * when saturated at REFCOUNT_SATURATED.
290 *
291 * Provides release memory ordering, such that prior loads and stores are done
292 * before.
293 */
refcount_dec(refcount_t * r)294 static inline void refcount_dec(refcount_t *r)
295 {
296 if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1))
297 refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
298 }
299
300 extern __must_check bool refcount_dec_if_one(refcount_t *r);
301 extern __must_check bool refcount_dec_not_one(refcount_t *r);
302 extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
303 extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
304 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
305 spinlock_t *lock,
306 unsigned long *flags);
307 #endif /* _LINUX_REFCOUNT_H */
308