• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * include/asm-sh/spinlock-llsc.h
4  *
5  * Copyright (C) 2002, 2003 Paul Mundt
6  * Copyright (C) 2006, 2007 Akio Idehara
7  */
8 #ifndef __ASM_SH_SPINLOCK_LLSC_H
9 #define __ASM_SH_SPINLOCK_LLSC_H
10 
11 #include <asm/barrier.h>
12 #include <asm/processor.h>
13 
14 /*
15  * Your basic SMP spinlocks, allowing only a single CPU anywhere
16  */
17 
18 #define arch_spin_is_locked(x)		((x)->lock <= 0)
19 
20 /*
21  * Simple spin lock operations.  There are two variants, one clears IRQ's
22  * on the local processor, one does not.
23  *
24  * We make no fairness assumptions.  They have a cost.
25  */
arch_spin_lock(arch_spinlock_t * lock)26 static inline void arch_spin_lock(arch_spinlock_t *lock)
27 {
28 	unsigned long tmp;
29 	unsigned long oldval;
30 
31 	__asm__ __volatile__ (
32 		"1:						\n\t"
33 		"movli.l	@%2, %0	! arch_spin_lock	\n\t"
34 		"mov		%0, %1				\n\t"
35 		"mov		#0, %0				\n\t"
36 		"movco.l	%0, @%2				\n\t"
37 		"bf		1b				\n\t"
38 		"cmp/pl		%1				\n\t"
39 		"bf		1b				\n\t"
40 		: "=&z" (tmp), "=&r" (oldval)
41 		: "r" (&lock->lock)
42 		: "t", "memory"
43 	);
44 }
45 
arch_spin_unlock(arch_spinlock_t * lock)46 static inline void arch_spin_unlock(arch_spinlock_t *lock)
47 {
48 	unsigned long tmp;
49 
50 	/* This could be optimised with ARCH_HAS_MMIOWB */
51 	mmiowb();
52 	__asm__ __volatile__ (
53 		"mov		#1, %0 ! arch_spin_unlock	\n\t"
54 		"mov.l		%0, @%1				\n\t"
55 		: "=&z" (tmp)
56 		: "r" (&lock->lock)
57 		: "t", "memory"
58 	);
59 }
60 
arch_spin_trylock(arch_spinlock_t * lock)61 static inline int arch_spin_trylock(arch_spinlock_t *lock)
62 {
63 	unsigned long tmp, oldval;
64 
65 	__asm__ __volatile__ (
66 		"1:						\n\t"
67 		"movli.l	@%2, %0	! arch_spin_trylock	\n\t"
68 		"mov		%0, %1				\n\t"
69 		"mov		#0, %0				\n\t"
70 		"movco.l	%0, @%2				\n\t"
71 		"bf		1b				\n\t"
72 		"synco						\n\t"
73 		: "=&z" (tmp), "=&r" (oldval)
74 		: "r" (&lock->lock)
75 		: "t", "memory"
76 	);
77 
78 	return oldval;
79 }
80 
81 /*
82  * Read-write spinlocks, allowing multiple readers but only one writer.
83  *
84  * NOTE! it is quite common to have readers in interrupts but no interrupt
85  * writers. For those circumstances we can "mix" irq-safe locks - any writer
86  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
87  * read-locks.
88  */
89 
arch_read_lock(arch_rwlock_t * rw)90 static inline void arch_read_lock(arch_rwlock_t *rw)
91 {
92 	unsigned long tmp;
93 
94 	__asm__ __volatile__ (
95 		"1:						\n\t"
96 		"movli.l	@%1, %0	! arch_read_lock	\n\t"
97 		"cmp/pl		%0				\n\t"
98 		"bf		1b				\n\t"
99 		"add		#-1, %0				\n\t"
100 		"movco.l	%0, @%1				\n\t"
101 		"bf		1b				\n\t"
102 		: "=&z" (tmp)
103 		: "r" (&rw->lock)
104 		: "t", "memory"
105 	);
106 }
107 
arch_read_unlock(arch_rwlock_t * rw)108 static inline void arch_read_unlock(arch_rwlock_t *rw)
109 {
110 	unsigned long tmp;
111 
112 	__asm__ __volatile__ (
113 		"1:						\n\t"
114 		"movli.l	@%1, %0	! arch_read_unlock	\n\t"
115 		"add		#1, %0				\n\t"
116 		"movco.l	%0, @%1				\n\t"
117 		"bf		1b				\n\t"
118 		: "=&z" (tmp)
119 		: "r" (&rw->lock)
120 		: "t", "memory"
121 	);
122 }
123 
arch_write_lock(arch_rwlock_t * rw)124 static inline void arch_write_lock(arch_rwlock_t *rw)
125 {
126 	unsigned long tmp;
127 
128 	__asm__ __volatile__ (
129 		"1:						\n\t"
130 		"movli.l	@%1, %0	! arch_write_lock	\n\t"
131 		"cmp/hs		%2, %0				\n\t"
132 		"bf		1b				\n\t"
133 		"sub		%2, %0				\n\t"
134 		"movco.l	%0, @%1				\n\t"
135 		"bf		1b				\n\t"
136 		: "=&z" (tmp)
137 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
138 		: "t", "memory"
139 	);
140 }
141 
arch_write_unlock(arch_rwlock_t * rw)142 static inline void arch_write_unlock(arch_rwlock_t *rw)
143 {
144 	__asm__ __volatile__ (
145 		"mov.l		%1, @%0 ! arch_write_unlock	\n\t"
146 		:
147 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
148 		: "t", "memory"
149 	);
150 }
151 
arch_read_trylock(arch_rwlock_t * rw)152 static inline int arch_read_trylock(arch_rwlock_t *rw)
153 {
154 	unsigned long tmp, oldval;
155 
156 	__asm__ __volatile__ (
157 		"1:						\n\t"
158 		"movli.l	@%2, %0	! arch_read_trylock	\n\t"
159 		"mov		%0, %1				\n\t"
160 		"cmp/pl		%0				\n\t"
161 		"bf		2f				\n\t"
162 		"add		#-1, %0				\n\t"
163 		"movco.l	%0, @%2				\n\t"
164 		"bf		1b				\n\t"
165 		"2:						\n\t"
166 		"synco						\n\t"
167 		: "=&z" (tmp), "=&r" (oldval)
168 		: "r" (&rw->lock)
169 		: "t", "memory"
170 	);
171 
172 	return (oldval > 0);
173 }
174 
arch_write_trylock(arch_rwlock_t * rw)175 static inline int arch_write_trylock(arch_rwlock_t *rw)
176 {
177 	unsigned long tmp, oldval;
178 
179 	__asm__ __volatile__ (
180 		"1:						\n\t"
181 		"movli.l	@%2, %0	! arch_write_trylock	\n\t"
182 		"mov		%0, %1				\n\t"
183 		"cmp/hs		%3, %0				\n\t"
184 		"bf		2f				\n\t"
185 		"sub		%3, %0				\n\t"
186 		"2:						\n\t"
187 		"movco.l	%0, @%2				\n\t"
188 		"bf		1b				\n\t"
189 		"synco						\n\t"
190 		: "=&z" (tmp), "=&r" (oldval)
191 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
192 		: "t", "memory"
193 	);
194 
195 	return (oldval > (RW_LOCK_BIAS - 1));
196 }
197 
198 #endif /* __ASM_SH_SPINLOCK_LLSC_H */
199