1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 /*
4 * This file provides wrappers with sanitizer instrumentation for bit
5 * operations.
6 *
7 * To use this functionality, an arch's bitops.h file needs to define each of
8 * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
9 * arch___set_bit(), etc.).
10 */
11 #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H
12 #define _ASM_GENERIC_BITOPS_INSTRUMENTED_H
13
14 #include <linux/kasan-checks.h>
15
16 /**
17 * set_bit - Atomically set a bit in memory
18 * @nr: the bit to set
19 * @addr: the address to start counting from
20 *
21 * This is a relaxed atomic operation (no implied memory barriers).
22 *
23 * Note that @nr may be almost arbitrarily large; this function is not
24 * restricted to acting on a single-word quantity.
25 */
set_bit(long nr,volatile unsigned long * addr)26 static inline void set_bit(long nr, volatile unsigned long *addr)
27 {
28 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
29 arch_set_bit(nr, addr);
30 }
31
32 /**
33 * __set_bit - Set a bit in memory
34 * @nr: the bit to set
35 * @addr: the address to start counting from
36 *
37 * Unlike set_bit(), this function is non-atomic. If it is called on the same
38 * region of memory concurrently, the effect may be that only one operation
39 * succeeds.
40 */
__set_bit(long nr,volatile unsigned long * addr)41 static inline void __set_bit(long nr, volatile unsigned long *addr)
42 {
43 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
44 arch___set_bit(nr, addr);
45 }
46
47 /**
48 * clear_bit - Clears a bit in memory
49 * @nr: Bit to clear
50 * @addr: Address to start counting from
51 *
52 * This is a relaxed atomic operation (no implied memory barriers).
53 */
clear_bit(long nr,volatile unsigned long * addr)54 static inline void clear_bit(long nr, volatile unsigned long *addr)
55 {
56 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
57 arch_clear_bit(nr, addr);
58 }
59
60 /**
61 * __clear_bit - Clears a bit in memory
62 * @nr: the bit to clear
63 * @addr: the address to start counting from
64 *
65 * Unlike clear_bit(), this function is non-atomic. If it is called on the same
66 * region of memory concurrently, the effect may be that only one operation
67 * succeeds.
68 */
__clear_bit(long nr,volatile unsigned long * addr)69 static inline void __clear_bit(long nr, volatile unsigned long *addr)
70 {
71 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
72 arch___clear_bit(nr, addr);
73 }
74
75 /**
76 * clear_bit_unlock - Clear a bit in memory, for unlock
77 * @nr: the bit to set
78 * @addr: the address to start counting from
79 *
80 * This operation is atomic and provides release barrier semantics.
81 */
clear_bit_unlock(long nr,volatile unsigned long * addr)82 static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
83 {
84 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
85 arch_clear_bit_unlock(nr, addr);
86 }
87
88 /**
89 * __clear_bit_unlock - Clears a bit in memory
90 * @nr: Bit to clear
91 * @addr: Address to start counting from
92 *
93 * This is a non-atomic operation but implies a release barrier before the
94 * memory operation. It can be used for an unlock if no other CPUs can
95 * concurrently modify other bits in the word.
96 */
__clear_bit_unlock(long nr,volatile unsigned long * addr)97 static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
98 {
99 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
100 arch___clear_bit_unlock(nr, addr);
101 }
102
103 /**
104 * change_bit - Toggle a bit in memory
105 * @nr: Bit to change
106 * @addr: Address to start counting from
107 *
108 * This is a relaxed atomic operation (no implied memory barriers).
109 *
110 * Note that @nr may be almost arbitrarily large; this function is not
111 * restricted to acting on a single-word quantity.
112 */
change_bit(long nr,volatile unsigned long * addr)113 static inline void change_bit(long nr, volatile unsigned long *addr)
114 {
115 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
116 arch_change_bit(nr, addr);
117 }
118
119 /**
120 * __change_bit - Toggle a bit in memory
121 * @nr: the bit to change
122 * @addr: the address to start counting from
123 *
124 * Unlike change_bit(), this function is non-atomic. If it is called on the same
125 * region of memory concurrently, the effect may be that only one operation
126 * succeeds.
127 */
__change_bit(long nr,volatile unsigned long * addr)128 static inline void __change_bit(long nr, volatile unsigned long *addr)
129 {
130 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
131 arch___change_bit(nr, addr);
132 }
133
134 /**
135 * test_and_set_bit - Set a bit and return its old value
136 * @nr: Bit to set
137 * @addr: Address to count from
138 *
139 * This is an atomic fully-ordered operation (implied full memory barrier).
140 */
test_and_set_bit(long nr,volatile unsigned long * addr)141 static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
142 {
143 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
144 return arch_test_and_set_bit(nr, addr);
145 }
146
147 /**
148 * __test_and_set_bit - Set a bit and return its old value
149 * @nr: Bit to set
150 * @addr: Address to count from
151 *
152 * This operation is non-atomic. If two instances of this operation race, one
153 * can appear to succeed but actually fail.
154 */
__test_and_set_bit(long nr,volatile unsigned long * addr)155 static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
156 {
157 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
158 return arch___test_and_set_bit(nr, addr);
159 }
160
161 /**
162 * test_and_set_bit_lock - Set a bit and return its old value, for lock
163 * @nr: Bit to set
164 * @addr: Address to count from
165 *
166 * This operation is atomic and provides acquire barrier semantics if
167 * the returned value is 0.
168 * It can be used to implement bit locks.
169 */
test_and_set_bit_lock(long nr,volatile unsigned long * addr)170 static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
171 {
172 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
173 return arch_test_and_set_bit_lock(nr, addr);
174 }
175
176 /**
177 * test_and_clear_bit - Clear a bit and return its old value
178 * @nr: Bit to clear
179 * @addr: Address to count from
180 *
181 * This is an atomic fully-ordered operation (implied full memory barrier).
182 */
test_and_clear_bit(long nr,volatile unsigned long * addr)183 static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
184 {
185 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
186 return arch_test_and_clear_bit(nr, addr);
187 }
188
189 /**
190 * __test_and_clear_bit - Clear a bit and return its old value
191 * @nr: Bit to clear
192 * @addr: Address to count from
193 *
194 * This operation is non-atomic. If two instances of this operation race, one
195 * can appear to succeed but actually fail.
196 */
__test_and_clear_bit(long nr,volatile unsigned long * addr)197 static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
198 {
199 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
200 return arch___test_and_clear_bit(nr, addr);
201 }
202
203 /**
204 * test_and_change_bit - Change a bit and return its old value
205 * @nr: Bit to change
206 * @addr: Address to count from
207 *
208 * This is an atomic fully-ordered operation (implied full memory barrier).
209 */
test_and_change_bit(long nr,volatile unsigned long * addr)210 static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
211 {
212 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
213 return arch_test_and_change_bit(nr, addr);
214 }
215
216 /**
217 * __test_and_change_bit - Change a bit and return its old value
218 * @nr: Bit to change
219 * @addr: Address to count from
220 *
221 * This operation is non-atomic. If two instances of this operation race, one
222 * can appear to succeed but actually fail.
223 */
__test_and_change_bit(long nr,volatile unsigned long * addr)224 static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
225 {
226 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
227 return arch___test_and_change_bit(nr, addr);
228 }
229
230 /**
231 * test_bit - Determine whether a bit is set
232 * @nr: bit number to test
233 * @addr: Address to start counting from
234 */
test_bit(long nr,const volatile unsigned long * addr)235 static inline bool test_bit(long nr, const volatile unsigned long *addr)
236 {
237 kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
238 return arch_test_bit(nr, addr);
239 }
240
241 #if defined(arch_clear_bit_unlock_is_negative_byte)
242 /**
243 * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
244 * byte is negative, for unlock.
245 * @nr: the bit to clear
246 * @addr: the address to start counting from
247 *
248 * This operation is atomic and provides release barrier semantics.
249 *
250 * This is a bit of a one-trick-pony for the filemap code, which clears
251 * PG_locked and tests PG_waiters,
252 */
253 static inline bool
clear_bit_unlock_is_negative_byte(long nr,volatile unsigned long * addr)254 clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
255 {
256 kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
257 return arch_clear_bit_unlock_is_negative_byte(nr, addr);
258 }
259 /* Let everybody know we have it. */
260 #define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
261 #endif
262
263 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_H */
264