• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SPINLOCK_H
3 #define __LINUX_SPINLOCK_H
4 
5 /*
6  * include/linux/spinlock.h - generic spinlock/rwlock declarations
7  *
8  * here's the role of the various spinlock/rwlock related include files:
9  *
10  * on SMP builds:
11  *
12  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
13  *                        initializers
14  *
15  *  linux/spinlock_types_raw:
16  *			  The raw types and initializers
17  *  linux/spinlock_types.h:
18  *                        defines the generic type and initializers
19  *
20  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
21  *                        implementations, mostly inline assembly code
22  *
23  *   (also included on UP-debug builds:)
24  *
25  *  linux/spinlock_api_smp.h:
26  *                        contains the prototypes for the _spin_*() APIs.
27  *
28  *  linux/spinlock.h:     builds the final spin_*() APIs.
29  *
30  * on UP builds:
31  *
32  *  linux/spinlock_type_up.h:
33  *                        contains the generic, simplified UP spinlock type.
34  *                        (which is an empty structure on non-debug builds)
35  *
36  *  linux/spinlock_types_raw:
37  *			  The raw RT types and initializers
38  *  linux/spinlock_types.h:
39  *                        defines the generic type and initializers
40  *
41  *  linux/spinlock_up.h:
42  *                        contains the arch_spin_*()/etc. version of UP
43  *                        builds. (which are NOPs on non-debug, non-preempt
44  *                        builds)
45  *
46  *   (included on UP-non-debug builds:)
47  *
48  *  linux/spinlock_api_up.h:
49  *                        builds the _spin_*() APIs.
50  *
51  *  linux/spinlock.h:     builds the final spin_*() APIs.
52  */
53 
54 #include <linux/typecheck.h>
55 #include <linux/preempt.h>
56 #include <linux/linkage.h>
57 #include <linux/compiler.h>
58 #include <linux/irqflags.h>
59 #include <linux/thread_info.h>
60 #include <linux/kernel.h>
61 #include <linux/stringify.h>
62 #include <linux/bottom_half.h>
63 #include <linux/lockdep.h>
64 #include <asm/barrier.h>
65 #include <asm/mmiowb.h>
66 
67 
68 /*
69  * Must define these before including other files, inline functions need them
70  */
71 #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
72 
73 #define LOCK_SECTION_START(extra)               \
74         ".subsection 1\n\t"                     \
75         extra                                   \
76         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
77         LOCK_SECTION_NAME ":\n\t"               \
78         ".endif\n"
79 
80 #define LOCK_SECTION_END                        \
81         ".previous\n\t"
82 
83 #define __lockfunc __section(".spinlock.text")
84 
85 /*
86  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
87  */
88 #include <linux/spinlock_types.h>
89 
90 /*
91  * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
92  */
93 #ifdef CONFIG_SMP
94 # include <asm/spinlock.h>
95 #else
96 # include <linux/spinlock_up.h>
97 #endif
98 
99 #ifdef CONFIG_DEBUG_SPINLOCK
100   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
101 				   struct lock_class_key *key, short inner);
102 
103 # define raw_spin_lock_init(lock)					\
104 do {									\
105 	static struct lock_class_key __key;				\
106 									\
107 	__raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN);	\
108 } while (0)
109 
110 #else
111 # define raw_spin_lock_init(lock)				\
112 	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
113 #endif
114 
115 #define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
116 
117 #ifdef arch_spin_is_contended
118 #define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
119 #else
120 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
121 #endif /*arch_spin_is_contended*/
122 
123 /*
124  * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
125  * between program-order earlier lock acquisitions and program-order later
126  * memory accesses.
127  *
128  * This guarantees that the following two properties hold:
129  *
130  *   1) Given the snippet:
131  *
132  *	  { X = 0;  Y = 0; }
133  *
134  *	  CPU0				CPU1
135  *
136  *	  WRITE_ONCE(X, 1);		WRITE_ONCE(Y, 1);
137  *	  spin_lock(S);			smp_mb();
138  *	  smp_mb__after_spinlock();	r1 = READ_ONCE(X);
139  *	  r0 = READ_ONCE(Y);
140  *	  spin_unlock(S);
141  *
142  *      it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
143  *      and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
144  *      preceding the call to smp_mb__after_spinlock() in __schedule() and in
145  *      try_to_wake_up().
146  *
147  *   2) Given the snippet:
148  *
149  *  { X = 0;  Y = 0; }
150  *
151  *  CPU0		CPU1				CPU2
152  *
153  *  spin_lock(S);	spin_lock(S);			r1 = READ_ONCE(Y);
154  *  WRITE_ONCE(X, 1);	smp_mb__after_spinlock();	smp_rmb();
155  *  spin_unlock(S);	r0 = READ_ONCE(X);		r2 = READ_ONCE(X);
156  *			WRITE_ONCE(Y, 1);
157  *			spin_unlock(S);
158  *
159  *      it is forbidden that CPU0's critical section executes before CPU1's
160  *      critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
161  *      and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
162  *      preceding the calls to smp_rmb() in try_to_wake_up() for similar
163  *      snippets but "projected" onto two CPUs.
164  *
165  * Property (2) upgrades the lock to an RCsc lock.
166  *
167  * Since most load-store architectures implement ACQUIRE with an smp_mb() after
168  * the LL/SC loop, they need no further barriers. Similarly all our TSO
169  * architectures imply an smp_mb() for each atomic instruction and equally don't
170  * need more.
171  *
172  * Architectures that can implement ACQUIRE better need to take care.
173  */
174 #ifndef smp_mb__after_spinlock
175 #define smp_mb__after_spinlock()	do { } while (0)
176 #endif
177 
178 #ifdef CONFIG_DEBUG_SPINLOCK
179  extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
180 #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
181  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
182  extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
183 #else
do_raw_spin_lock(raw_spinlock_t * lock)184 static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
185 {
186 	__acquire(lock);
187 	arch_spin_lock(&lock->raw_lock);
188 	mmiowb_spin_lock();
189 }
190 
191 #ifndef arch_spin_lock_flags
192 #define arch_spin_lock_flags(lock, flags)	arch_spin_lock(lock)
193 #endif
194 
195 static inline void
do_raw_spin_lock_flags(raw_spinlock_t * lock,unsigned long * flags)196 do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
197 {
198 	__acquire(lock);
199 	arch_spin_lock_flags(&lock->raw_lock, *flags);
200 	mmiowb_spin_lock();
201 }
202 
do_raw_spin_trylock(raw_spinlock_t * lock)203 static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
204 {
205 	int ret = arch_spin_trylock(&(lock)->raw_lock);
206 
207 	if (ret)
208 		mmiowb_spin_lock();
209 
210 	return ret;
211 }
212 
do_raw_spin_unlock(raw_spinlock_t * lock)213 static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
214 {
215 	mmiowb_spin_unlock();
216 	arch_spin_unlock(&lock->raw_lock);
217 	__release(lock);
218 }
219 #endif
220 
221 /*
222  * Define the various spin_lock methods.  Note we define these
223  * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The
224  * various methods are defined as nops in the case they are not
225  * required.
226  */
227 #define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
228 
229 #define raw_spin_lock(lock)	_raw_spin_lock(lock)
230 
231 #ifdef CONFIG_DEBUG_LOCK_ALLOC
232 # define raw_spin_lock_nested(lock, subclass) \
233 	_raw_spin_lock_nested(lock, subclass)
234 
235 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
236 	 do {								\
237 		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
238 		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
239 	 } while (0)
240 #else
241 /*
242  * Always evaluate the 'subclass' argument to avoid that the compiler
243  * warns about set-but-not-used variables when building with
244  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
245  */
246 # define raw_spin_lock_nested(lock, subclass)		\
247 	_raw_spin_lock(((void)(subclass), (lock)))
248 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
249 #endif
250 
251 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
252 
253 #define raw_spin_lock_irqsave(lock, flags)			\
254 	do {						\
255 		typecheck(unsigned long, flags);	\
256 		flags = _raw_spin_lock_irqsave(lock);	\
257 	} while (0)
258 
259 #ifdef CONFIG_DEBUG_LOCK_ALLOC
260 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
261 	do {								\
262 		typecheck(unsigned long, flags);			\
263 		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
264 	} while (0)
265 #else
266 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
267 	do {								\
268 		typecheck(unsigned long, flags);			\
269 		flags = _raw_spin_lock_irqsave(lock);			\
270 	} while (0)
271 #endif
272 
273 #else
274 
275 #define raw_spin_lock_irqsave(lock, flags)		\
276 	do {						\
277 		typecheck(unsigned long, flags);	\
278 		_raw_spin_lock_irqsave(lock, flags);	\
279 	} while (0)
280 
281 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
282 	raw_spin_lock_irqsave(lock, flags)
283 
284 #endif
285 
286 #define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
287 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
288 #define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
289 #define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
290 
291 #define raw_spin_unlock_irqrestore(lock, flags)		\
292 	do {							\
293 		typecheck(unsigned long, flags);		\
294 		_raw_spin_unlock_irqrestore(lock, flags);	\
295 	} while (0)
296 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
297 
298 #define raw_spin_trylock_bh(lock) \
299 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
300 
301 #define raw_spin_trylock_irq(lock) \
302 ({ \
303 	local_irq_disable(); \
304 	raw_spin_trylock(lock) ? \
305 	1 : ({ local_irq_enable(); 0;  }); \
306 })
307 
308 #define raw_spin_trylock_irqsave(lock, flags) \
309 ({ \
310 	local_irq_save(flags); \
311 	raw_spin_trylock(lock) ? \
312 	1 : ({ local_irq_restore(flags); 0; }); \
313 })
314 
315 #ifndef CONFIG_PREEMPT_RT
316 /* Include rwlock functions for !RT */
317 #include <linux/rwlock.h>
318 #endif
319 
320 /*
321  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
322  */
323 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
324 # include <linux/spinlock_api_smp.h>
325 #else
326 # include <linux/spinlock_api_up.h>
327 #endif
328 
329 /* Non PREEMPT_RT kernel, map to raw spinlocks: */
330 #ifndef CONFIG_PREEMPT_RT
331 
332 /*
333  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
334  */
335 
spinlock_check(spinlock_t * lock)336 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
337 {
338 	return &lock->rlock;
339 }
340 
341 #ifdef CONFIG_DEBUG_SPINLOCK
342 
343 # define spin_lock_init(lock)					\
344 do {								\
345 	static struct lock_class_key __key;			\
346 								\
347 	__raw_spin_lock_init(spinlock_check(lock),		\
348 			     #lock, &__key, LD_WAIT_CONFIG);	\
349 } while (0)
350 
351 #else
352 
353 # define spin_lock_init(_lock)			\
354 do {						\
355 	spinlock_check(_lock);			\
356 	*(_lock) = __SPIN_LOCK_UNLOCKED(_lock);	\
357 } while (0)
358 
359 #endif
360 
spin_lock(spinlock_t * lock)361 static __always_inline void spin_lock(spinlock_t *lock)
362 {
363 	raw_spin_lock(&lock->rlock);
364 }
365 
spin_lock_bh(spinlock_t * lock)366 static __always_inline void spin_lock_bh(spinlock_t *lock)
367 {
368 	raw_spin_lock_bh(&lock->rlock);
369 }
370 
spin_trylock(spinlock_t * lock)371 static __always_inline int spin_trylock(spinlock_t *lock)
372 {
373 	return raw_spin_trylock(&lock->rlock);
374 }
375 
376 #define spin_lock_nested(lock, subclass)			\
377 do {								\
378 	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
379 } while (0)
380 
381 #define spin_lock_nest_lock(lock, nest_lock)				\
382 do {									\
383 	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
384 } while (0)
385 
spin_lock_irq(spinlock_t * lock)386 static __always_inline void spin_lock_irq(spinlock_t *lock)
387 {
388 	raw_spin_lock_irq(&lock->rlock);
389 }
390 
391 #define spin_lock_irqsave(lock, flags)				\
392 do {								\
393 	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
394 } while (0)
395 
396 #define spin_lock_irqsave_nested(lock, flags, subclass)			\
397 do {									\
398 	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
399 } while (0)
400 
spin_unlock(spinlock_t * lock)401 static __always_inline void spin_unlock(spinlock_t *lock)
402 {
403 	raw_spin_unlock(&lock->rlock);
404 }
405 
spin_unlock_bh(spinlock_t * lock)406 static __always_inline void spin_unlock_bh(spinlock_t *lock)
407 {
408 	raw_spin_unlock_bh(&lock->rlock);
409 }
410 
spin_unlock_irq(spinlock_t * lock)411 static __always_inline void spin_unlock_irq(spinlock_t *lock)
412 {
413 	raw_spin_unlock_irq(&lock->rlock);
414 }
415 
spin_unlock_irqrestore(spinlock_t * lock,unsigned long flags)416 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
417 {
418 	raw_spin_unlock_irqrestore(&lock->rlock, flags);
419 }
420 
spin_trylock_bh(spinlock_t * lock)421 static __always_inline int spin_trylock_bh(spinlock_t *lock)
422 {
423 	return raw_spin_trylock_bh(&lock->rlock);
424 }
425 
spin_trylock_irq(spinlock_t * lock)426 static __always_inline int spin_trylock_irq(spinlock_t *lock)
427 {
428 	return raw_spin_trylock_irq(&lock->rlock);
429 }
430 
431 #define spin_trylock_irqsave(lock, flags)			\
432 ({								\
433 	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
434 })
435 
436 /**
437  * spin_is_locked() - Check whether a spinlock is locked.
438  * @lock: Pointer to the spinlock.
439  *
440  * This function is NOT required to provide any memory ordering
441  * guarantees; it could be used for debugging purposes or, when
442  * additional synchronization is needed, accompanied with other
443  * constructs (memory barriers) enforcing the synchronization.
444  *
445  * Returns: 1 if @lock is locked, 0 otherwise.
446  *
447  * Note that the function only tells you that the spinlock is
448  * seen to be locked, not that it is locked on your CPU.
449  *
450  * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
451  * the return value is always 0 (see include/linux/spinlock_up.h).
452  * Therefore you should not rely heavily on the return value.
453  */
spin_is_locked(spinlock_t * lock)454 static __always_inline int spin_is_locked(spinlock_t *lock)
455 {
456 	return raw_spin_is_locked(&lock->rlock);
457 }
458 
spin_is_contended(spinlock_t * lock)459 static __always_inline int spin_is_contended(spinlock_t *lock)
460 {
461 	return raw_spin_is_contended(&lock->rlock);
462 }
463 
464 #define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
465 
466 #else  /* !CONFIG_PREEMPT_RT */
467 # include <linux/spinlock_rt.h>
468 #endif /* CONFIG_PREEMPT_RT */
469 
470 /*
471  * Pull the atomic_t declaration:
472  * (asm-mips/atomic.h needs above definitions)
473  */
474 #include <linux/atomic.h>
475 /**
476  * atomic_dec_and_lock - lock on reaching reference count zero
477  * @atomic: the atomic counter
478  * @lock: the spinlock in question
479  *
480  * Decrements @atomic by 1.  If the result is 0, returns true and locks
481  * @lock.  Returns false for all other cases.
482  */
483 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
484 #define atomic_dec_and_lock(atomic, lock) \
485 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
486 
487 extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
488 					unsigned long *flags);
489 #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
490 		__cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
491 
492 int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
493 			     size_t max_size, unsigned int cpu_mult,
494 			     gfp_t gfp, const char *name,
495 			     struct lock_class_key *key);
496 
497 #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp)    \
498 	({								     \
499 		static struct lock_class_key key;			     \
500 		int ret;						     \
501 									     \
502 		ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size,   \
503 					       cpu_mult, gfp, #locks, &key); \
504 		ret;							     \
505 	})
506 
507 void free_bucket_spinlocks(spinlock_t *locks);
508 
509 #endif /* __LINUX_SPINLOCK_H */
510