1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* rwsem.h: R/W semaphores, public interface
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from asm-i386/semaphore.h
6 */
7
8 #ifndef _LINUX_RWSEM_H
9 #define _LINUX_RWSEM_H
10
11 #include <linux/linkage.h>
12
13 #include <linux/types.h>
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/atomic.h>
17 #include <linux/err.h>
18 #include <linux/cleanup.h>
19
20 #ifdef CONFIG_DEBUG_LOCK_ALLOC
21 # define __RWSEM_DEP_MAP_INIT(lockname) \
22 .dep_map = { \
23 .name = #lockname, \
24 .wait_type_inner = LD_WAIT_SLEEP, \
25 },
26 #else
27 # define __RWSEM_DEP_MAP_INIT(lockname)
28 #endif
29
30 #ifndef CONFIG_PREEMPT_RT
31
32 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
33 #include <linux/osq_lock.h>
34 #endif
35 #include <linux/android_vendor.h>
36
37 /*
38 * For an uncontended rwsem, count and owner are the only fields a task
39 * needs to touch when acquiring the rwsem. So they are put next to each
40 * other to increase the chance that they will share the same cacheline.
41 *
42 * In a contended rwsem, the owner is likely the most frequently accessed
43 * field in the structure as the optimistic waiter that holds the osq lock
44 * will spin on owner. For an embedded rwsem, other hot fields in the
45 * containing structure should be moved further away from the rwsem to
46 * reduce the chance that they will share the same cacheline causing
47 * cacheline bouncing problem.
48 */
49 struct rw_semaphore {
50 atomic_long_t count;
51 /*
52 * Write owner or one of the read owners as well flags regarding
53 * the current state of the rwsem. Can be used as a speculative
54 * check to see if the write owner is running on the cpu.
55 */
56 atomic_long_t owner;
57 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
58 struct optimistic_spin_queue osq; /* spinner MCS lock */
59 #endif
60 raw_spinlock_t wait_lock;
61 struct list_head wait_list;
62 #ifdef CONFIG_DEBUG_RWSEMS
63 void *magic;
64 #endif
65 #ifdef CONFIG_DEBUG_LOCK_ALLOC
66 struct lockdep_map dep_map;
67 #endif
68 ANDROID_VENDOR_DATA(1);
69 ANDROID_OEM_DATA_ARRAY(1, 2);
70 };
71
72 #define RWSEM_UNLOCKED_VALUE 0UL
73 #define RWSEM_WRITER_LOCKED (1UL << 0)
74 #define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
75
rwsem_is_locked(struct rw_semaphore * sem)76 static inline int rwsem_is_locked(struct rw_semaphore *sem)
77 {
78 return atomic_long_read(&sem->count) != RWSEM_UNLOCKED_VALUE;
79 }
80
rwsem_assert_held_nolockdep(const struct rw_semaphore * sem)81 static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
82 {
83 WARN_ON(atomic_long_read(&sem->count) == RWSEM_UNLOCKED_VALUE);
84 }
85
rwsem_assert_held_write_nolockdep(const struct rw_semaphore * sem)86 static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
87 {
88 WARN_ON(!(atomic_long_read(&sem->count) & RWSEM_WRITER_LOCKED));
89 }
90
91 /* Common initializer macros and functions */
92
93 #ifdef CONFIG_DEBUG_RWSEMS
94 # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
95 #else
96 # define __RWSEM_DEBUG_INIT(lockname)
97 #endif
98
99 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
100 #define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED,
101 #else
102 #define __RWSEM_OPT_INIT(lockname)
103 #endif
104
105 #define __RWSEM_INITIALIZER(name) \
106 { __RWSEM_COUNT_INIT(name), \
107 .owner = ATOMIC_LONG_INIT(0), \
108 __RWSEM_OPT_INIT(name) \
109 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
110 .wait_list = LIST_HEAD_INIT((name).wait_list), \
111 __RWSEM_DEBUG_INIT(name) \
112 __RWSEM_DEP_MAP_INIT(name) }
113
114 #define DECLARE_RWSEM(name) \
115 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
116
117 extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
118 struct lock_class_key *key);
119
120 #define init_rwsem(sem) \
121 do { \
122 static struct lock_class_key __key; \
123 \
124 __init_rwsem((sem), #sem, &__key); \
125 } while (0)
126
127 /*
128 * This is the same regardless of which rwsem implementation that is being used.
129 * It is just a heuristic meant to be called by somebody already holding the
130 * rwsem to see if somebody from an incompatible type is wanting access to the
131 * lock.
132 */
rwsem_is_contended(struct rw_semaphore * sem)133 static inline int rwsem_is_contended(struct rw_semaphore *sem)
134 {
135 return !list_empty(&sem->wait_list);
136 }
137
138 #else /* !CONFIG_PREEMPT_RT */
139
140 #include <linux/rwbase_rt.h>
141
142 struct rw_semaphore {
143 struct rwbase_rt rwbase;
144 #ifdef CONFIG_DEBUG_LOCK_ALLOC
145 struct lockdep_map dep_map;
146 #endif
147 };
148
149 #define __RWSEM_INITIALIZER(name) \
150 { \
151 .rwbase = __RWBASE_INITIALIZER(name), \
152 __RWSEM_DEP_MAP_INIT(name) \
153 }
154
155 #define DECLARE_RWSEM(lockname) \
156 struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
157
158 extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name,
159 struct lock_class_key *key);
160
161 #define init_rwsem(sem) \
162 do { \
163 static struct lock_class_key __key; \
164 \
165 __init_rwsem((sem), #sem, &__key); \
166 } while (0)
167
rwsem_is_locked(const struct rw_semaphore * sem)168 static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
169 {
170 return rw_base_is_locked(&sem->rwbase);
171 }
172
rwsem_assert_held_nolockdep(const struct rw_semaphore * sem)173 static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
174 {
175 WARN_ON(!rwsem_is_locked(sem));
176 }
177
rwsem_assert_held_write_nolockdep(const struct rw_semaphore * sem)178 static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
179 {
180 WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
181 }
182
rwsem_is_contended(struct rw_semaphore * sem)183 static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
184 {
185 return rw_base_is_contended(&sem->rwbase);
186 }
187
188 #endif /* CONFIG_PREEMPT_RT */
189
190 /*
191 * The functions below are the same for all rwsem implementations including
192 * the RT specific variant.
193 */
194
rwsem_assert_held(const struct rw_semaphore * sem)195 static inline void rwsem_assert_held(const struct rw_semaphore *sem)
196 {
197 if (IS_ENABLED(CONFIG_LOCKDEP))
198 lockdep_assert_held(sem);
199 else
200 rwsem_assert_held_nolockdep(sem);
201 }
202
rwsem_assert_held_write(const struct rw_semaphore * sem)203 static inline void rwsem_assert_held_write(const struct rw_semaphore *sem)
204 {
205 if (IS_ENABLED(CONFIG_LOCKDEP))
206 lockdep_assert_held_write(sem);
207 else
208 rwsem_assert_held_write_nolockdep(sem);
209 }
210
211 /*
212 * lock for reading
213 */
214 extern void down_read(struct rw_semaphore *sem);
215 extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
216 extern int __must_check down_read_killable(struct rw_semaphore *sem);
217
218 /*
219 * trylock for reading -- returns 1 if successful, 0 if contention
220 */
221 extern int down_read_trylock(struct rw_semaphore *sem);
222
223 /*
224 * lock for writing
225 */
226 extern void down_write(struct rw_semaphore *sem);
227 extern int __must_check down_write_killable(struct rw_semaphore *sem);
228
229 /*
230 * trylock for writing -- returns 1 if successful, 0 if contention
231 */
232 extern int down_write_trylock(struct rw_semaphore *sem);
233
234 /*
235 * release a read lock
236 */
237 extern void up_read(struct rw_semaphore *sem);
238
239 /*
240 * release a write lock
241 */
242 extern void up_write(struct rw_semaphore *sem);
243
244 DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
245 DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
246 DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
247
248 DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
249 DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
250
251 /*
252 * downgrade write lock to read lock
253 */
254 extern void downgrade_write(struct rw_semaphore *sem);
255
256 #ifdef CONFIG_DEBUG_LOCK_ALLOC
257 /*
258 * nested locking. NOTE: rwsems are not allowed to recurse
259 * (which occurs if the same task tries to acquire the same
260 * lock instance multiple times), but multiple locks of the
261 * same lock class might be taken, if the order of the locks
262 * is always the same. This ordering rule can be expressed
263 * to lockdep via the _nested() APIs, but enumerating the
264 * subclasses that are used. (If the nesting relationship is
265 * static then another method for expressing nested locking is
266 * the explicit definition of lock class keys and the use of
267 * lockdep_set_class() at lock initialization time.
268 * See Documentation/locking/lockdep-design.rst for more details.)
269 */
270 extern void down_read_nested(struct rw_semaphore *sem, int subclass);
271 extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
272 extern void down_write_nested(struct rw_semaphore *sem, int subclass);
273 extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
274 extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
275
276 # define down_write_nest_lock(sem, nest_lock) \
277 do { \
278 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
279 _down_write_nest_lock(sem, &(nest_lock)->dep_map); \
280 } while (0)
281
282 /*
283 * Take/release a lock when not the owner will release it.
284 *
285 * [ This API should be avoided as much as possible - the
286 * proper abstraction for this case is completions. ]
287 */
288 extern void down_read_non_owner(struct rw_semaphore *sem);
289 extern void up_read_non_owner(struct rw_semaphore *sem);
290 #else
291 # define down_read_nested(sem, subclass) down_read(sem)
292 # define down_read_killable_nested(sem, subclass) down_read_killable(sem)
293 # define down_write_nest_lock(sem, nest_lock) down_write(sem)
294 # define down_write_nested(sem, subclass) down_write(sem)
295 # define down_write_killable_nested(sem, subclass) down_write_killable(sem)
296 # define down_read_non_owner(sem) down_read(sem)
297 # define up_read_non_owner(sem) up_read(sem)
298 #endif
299
300 #endif /* _LINUX_RWSEM_H */
301