• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Out-of-line refcount functions.
4   */
5  
6  #include <linux/mutex.h>
7  #include <linux/refcount.h>
8  #include <linux/spinlock.h>
9  #include <linux/bug.h>
10  
11  #define REFCOUNT_WARN(str)	WARN_ONCE(1, "refcount_t: " str ".\n")
12  
refcount_warn_saturate(refcount_t * r,enum refcount_saturation_type t)13  void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t)
14  {
15  	refcount_set(r, REFCOUNT_SATURATED);
16  
17  	switch (t) {
18  	case REFCOUNT_ADD_NOT_ZERO_OVF:
19  		REFCOUNT_WARN("saturated; leaking memory");
20  		break;
21  	case REFCOUNT_ADD_OVF:
22  		REFCOUNT_WARN("saturated; leaking memory");
23  		break;
24  	case REFCOUNT_ADD_UAF:
25  		REFCOUNT_WARN("addition on 0; use-after-free");
26  		break;
27  	case REFCOUNT_SUB_UAF:
28  		REFCOUNT_WARN("underflow; use-after-free");
29  		break;
30  	case REFCOUNT_DEC_LEAK:
31  		REFCOUNT_WARN("decrement hit 0; leaking memory");
32  		break;
33  	default:
34  		REFCOUNT_WARN("unknown saturation event!?");
35  	}
36  }
37  EXPORT_SYMBOL(refcount_warn_saturate);
38  
39  /**
40   * refcount_dec_if_one - decrement a refcount if it is 1
41   * @r: the refcount
42   *
43   * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
44   * success thereof.
45   *
46   * Like all decrement operations, it provides release memory order and provides
47   * a control dependency.
48   *
49   * It can be used like a try-delete operator; this explicit case is provided
50   * and not cmpxchg in generic, because that would allow implementing unsafe
51   * operations.
52   *
53   * Return: true if the resulting refcount is 0, false otherwise
54   */
refcount_dec_if_one(refcount_t * r)55  bool refcount_dec_if_one(refcount_t *r)
56  {
57  	int val = 1;
58  
59  	return atomic_try_cmpxchg_release(&r->refs, &val, 0);
60  }
61  EXPORT_SYMBOL(refcount_dec_if_one);
62  
63  /**
64   * refcount_dec_not_one - decrement a refcount if it is not 1
65   * @r: the refcount
66   *
67   * No atomic_t counterpart, it decrements unless the value is 1, in which case
68   * it will return false.
69   *
70   * Was often done like: atomic_add_unless(&var, -1, 1)
71   *
72   * Return: true if the decrement operation was successful, false otherwise
73   */
refcount_dec_not_one(refcount_t * r)74  bool refcount_dec_not_one(refcount_t *r)
75  {
76  	unsigned int new, val = atomic_read(&r->refs);
77  
78  	do {
79  		if (unlikely(val == REFCOUNT_SATURATED))
80  			return true;
81  
82  		if (val == 1)
83  			return false;
84  
85  		new = val - 1;
86  		if (new > val) {
87  			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
88  			return true;
89  		}
90  
91  	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
92  
93  	return true;
94  }
95  EXPORT_SYMBOL(refcount_dec_not_one);
96  
97  /**
98   * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
99   *                               refcount to 0
100   * @r: the refcount
101   * @lock: the mutex to be locked
102   *
103   * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
104   * to decrement when saturated at REFCOUNT_SATURATED.
105   *
106   * Provides release memory ordering, such that prior loads and stores are done
107   * before, and provides a control dependency such that free() must come after.
108   * See the comment on top.
109   *
110   * Return: true and hold mutex if able to decrement refcount to 0, false
111   *         otherwise
112   */
refcount_dec_and_mutex_lock(refcount_t * r,struct mutex * lock)113  bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
114  {
115  	if (refcount_dec_not_one(r))
116  		return false;
117  
118  	mutex_lock(lock);
119  	if (!refcount_dec_and_test(r)) {
120  		mutex_unlock(lock);
121  		return false;
122  	}
123  
124  	return true;
125  }
126  EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
127  
128  /**
129   * refcount_dec_and_lock - return holding spinlock if able to decrement
130   *                         refcount to 0
131   * @r: the refcount
132   * @lock: the spinlock to be locked
133   *
134   * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
135   * decrement when saturated at REFCOUNT_SATURATED.
136   *
137   * Provides release memory ordering, such that prior loads and stores are done
138   * before, and provides a control dependency such that free() must come after.
139   * See the comment on top.
140   *
141   * Return: true and hold spinlock if able to decrement refcount to 0, false
142   *         otherwise
143   */
refcount_dec_and_lock(refcount_t * r,spinlock_t * lock)144  bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
145  {
146  	if (refcount_dec_not_one(r))
147  		return false;
148  
149  	spin_lock(lock);
150  	if (!refcount_dec_and_test(r)) {
151  		spin_unlock(lock);
152  		return false;
153  	}
154  
155  	return true;
156  }
157  EXPORT_SYMBOL(refcount_dec_and_lock);
158  
159  /**
160   * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
161   *                                 interrupts if able to decrement refcount to 0
162   * @r: the refcount
163   * @lock: the spinlock to be locked
164   * @flags: saved IRQ-flags if the is acquired
165   *
166   * Same as refcount_dec_and_lock() above except that the spinlock is acquired
167   * with disabled interupts.
168   *
169   * Return: true and hold spinlock if able to decrement refcount to 0, false
170   *         otherwise
171   */
refcount_dec_and_lock_irqsave(refcount_t * r,spinlock_t * lock,unsigned long * flags)172  bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
173  				   unsigned long *flags)
174  {
175  	if (refcount_dec_not_one(r))
176  		return false;
177  
178  	spin_lock_irqsave(lock, *flags);
179  	if (!refcount_dec_and_test(r)) {
180  		spin_unlock_irqrestore(lock, *flags);
181  		return false;
182  	}
183  
184  	return true;
185  }
186  EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
187