• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_M32R_SPINLOCK_H
3 #define _ASM_M32R_SPINLOCK_H
4 
5 /*
6  *  linux/include/asm-m32r/spinlock.h
7  *
8  *  M32R version:
9  *    Copyright (C) 2001, 2002  Hitoshi Yamamoto
10  *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
11  */
12 
13 #include <linux/compiler.h>
14 #include <linux/atomic.h>
15 #include <asm/dcache_clear.h>
16 #include <asm/page.h>
17 #include <asm/barrier.h>
18 #include <asm/processor.h>
19 
20 /*
21  * Your basic SMP spinlocks, allowing only a single CPU anywhere
22  *
23  * (the type definitions are in asm/spinlock_types.h)
24  *
25  * Simple spin lock operations.  There are two variants, one clears IRQ's
26  * on the local processor, one does not.
27  *
28  * We make no fairness assumptions. They have a cost.
29  */
30 
31 #define arch_spin_is_locked(x)		(*(volatile int *)(&(x)->slock) <= 0)
32 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
33 
34 /**
35  * arch_spin_trylock - Try spin lock and return a result
36  * @lock: Pointer to the lock variable
37  *
38  * arch_spin_trylock() tries to get the lock and returns a result.
39  * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
40  */
arch_spin_trylock(arch_spinlock_t * lock)41 static inline int arch_spin_trylock(arch_spinlock_t *lock)
42 {
43 	int oldval;
44 	unsigned long tmp1, tmp2;
45 
46 	/*
47 	 * lock->slock :  =1 : unlock
48 	 *             : <=0 : lock
49 	 * {
50 	 *   oldval = lock->slock; <--+ need atomic operation
51 	 *   lock->slock = 0;      <--+
52 	 * }
53 	 */
54 	__asm__ __volatile__ (
55 		"# arch_spin_trylock		\n\t"
56 		"ldi	%1, #0;			\n\t"
57 		"mvfc	%2, psw;		\n\t"
58 		"clrpsw	#0x40 -> nop;		\n\t"
59 		DCACHE_CLEAR("%0", "r6", "%3")
60 		"lock	%0, @%3;		\n\t"
61 		"unlock	%1, @%3;		\n\t"
62 		"mvtc	%2, psw;		\n\t"
63 		: "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
64 		: "r" (&lock->slock)
65 		: "memory"
66 #ifdef CONFIG_CHIP_M32700_TS1
67 		, "r6"
68 #endif	/* CONFIG_CHIP_M32700_TS1 */
69 	);
70 
71 	return (oldval > 0);
72 }
73 
arch_spin_lock(arch_spinlock_t * lock)74 static inline void arch_spin_lock(arch_spinlock_t *lock)
75 {
76 	unsigned long tmp0, tmp1;
77 
78 	/*
79 	 * lock->slock :  =1 : unlock
80 	 *             : <=0 : lock
81 	 *
82 	 * for ( ; ; ) {
83 	 *   lock->slock -= 1;  <-- need atomic operation
84 	 *   if (lock->slock == 0) break;
85 	 *   for ( ; lock->slock <= 0 ; );
86 	 * }
87 	 */
88 	__asm__ __volatile__ (
89 		"# arch_spin_lock		\n\t"
90 		".fillinsn			\n"
91 		"1:				\n\t"
92 		"mvfc	%1, psw;		\n\t"
93 		"clrpsw	#0x40 -> nop;		\n\t"
94 		DCACHE_CLEAR("%0", "r6", "%2")
95 		"lock	%0, @%2;		\n\t"
96 		"addi	%0, #-1;		\n\t"
97 		"unlock	%0, @%2;		\n\t"
98 		"mvtc	%1, psw;		\n\t"
99 		"bltz	%0, 2f;			\n\t"
100 		LOCK_SECTION_START(".balign 4 \n\t")
101 		".fillinsn			\n"
102 		"2:				\n\t"
103 		"ld	%0, @%2;		\n\t"
104 		"bgtz	%0, 1b;			\n\t"
105 		"bra	2b;			\n\t"
106 		LOCK_SECTION_END
107 		: "=&r" (tmp0), "=&r" (tmp1)
108 		: "r" (&lock->slock)
109 		: "memory"
110 #ifdef CONFIG_CHIP_M32700_TS1
111 		, "r6"
112 #endif	/* CONFIG_CHIP_M32700_TS1 */
113 	);
114 }
115 
arch_spin_unlock(arch_spinlock_t * lock)116 static inline void arch_spin_unlock(arch_spinlock_t *lock)
117 {
118 	mb();
119 	lock->slock = 1;
120 }
121 
122 /*
123  * Read-write spinlocks, allowing multiple readers
124  * but only one writer.
125  *
126  * NOTE! it is quite common to have readers in interrupts
127  * but no interrupt writers. For those circumstances we
128  * can "mix" irq-safe locks - any writer needs to get a
129  * irq-safe write-lock, but readers can get non-irqsafe
130  * read-locks.
131  *
132  * On x86, we implement read-write locks as a 32-bit counter
133  * with the high bit (sign) being the "contended" bit.
134  *
135  * The inline assembly is non-obvious. Think about it.
136  *
137  * Changed to use the same technique as rw semaphores.  See
138  * semaphore.h for details.  -ben
139  */
140 
141 /**
142  * read_can_lock - would read_trylock() succeed?
143  * @lock: the rwlock in question.
144  */
145 #define arch_read_can_lock(x) ((int)(x)->lock > 0)
146 
147 /**
148  * write_can_lock - would write_trylock() succeed?
149  * @lock: the rwlock in question.
150  */
151 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
152 
arch_read_lock(arch_rwlock_t * rw)153 static inline void arch_read_lock(arch_rwlock_t *rw)
154 {
155 	unsigned long tmp0, tmp1;
156 
157 	/*
158 	 * rw->lock :  >0 : unlock
159 	 *          : <=0 : lock
160 	 *
161 	 * for ( ; ; ) {
162 	 *   rw->lock -= 1;  <-- need atomic operation
163 	 *   if (rw->lock >= 0) break;
164 	 *   rw->lock += 1;  <-- need atomic operation
165 	 *   for ( ; rw->lock <= 0 ; );
166 	 * }
167 	 */
168 	__asm__ __volatile__ (
169 		"# read_lock			\n\t"
170 		".fillinsn			\n"
171 		"1:				\n\t"
172 		"mvfc	%1, psw;		\n\t"
173 		"clrpsw	#0x40 -> nop;		\n\t"
174 		DCACHE_CLEAR("%0", "r6", "%2")
175 		"lock	%0, @%2;		\n\t"
176 		"addi	%0, #-1;		\n\t"
177 		"unlock	%0, @%2;		\n\t"
178 		"mvtc	%1, psw;		\n\t"
179 		"bltz	%0, 2f;			\n\t"
180 		LOCK_SECTION_START(".balign 4 \n\t")
181 		".fillinsn			\n"
182 		"2:				\n\t"
183 		"clrpsw	#0x40 -> nop;		\n\t"
184 		DCACHE_CLEAR("%0", "r6", "%2")
185 		"lock	%0, @%2;		\n\t"
186 		"addi	%0, #1;			\n\t"
187 		"unlock	%0, @%2;		\n\t"
188 		"mvtc	%1, psw;		\n\t"
189 		".fillinsn			\n"
190 		"3:				\n\t"
191 		"ld	%0, @%2;		\n\t"
192 		"bgtz	%0, 1b;			\n\t"
193 		"bra	3b;			\n\t"
194 		LOCK_SECTION_END
195 		: "=&r" (tmp0), "=&r" (tmp1)
196 		: "r" (&rw->lock)
197 		: "memory"
198 #ifdef CONFIG_CHIP_M32700_TS1
199 		, "r6"
200 #endif	/* CONFIG_CHIP_M32700_TS1 */
201 	);
202 }
203 
arch_write_lock(arch_rwlock_t * rw)204 static inline void arch_write_lock(arch_rwlock_t *rw)
205 {
206 	unsigned long tmp0, tmp1, tmp2;
207 
208 	/*
209 	 * rw->lock :  =RW_LOCK_BIAS_STR : unlock
210 	 *          : !=RW_LOCK_BIAS_STR : lock
211 	 *
212 	 * for ( ; ; ) {
213 	 *   rw->lock -= RW_LOCK_BIAS_STR;  <-- need atomic operation
214 	 *   if (rw->lock == 0) break;
215 	 *   rw->lock += RW_LOCK_BIAS_STR;  <-- need atomic operation
216 	 *   for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
217 	 * }
218 	 */
219 	__asm__ __volatile__ (
220 		"# write_lock					\n\t"
221 		"seth	%1, #high(" RW_LOCK_BIAS_STR ");	\n\t"
222 		"or3	%1, %1, #low(" RW_LOCK_BIAS_STR ");	\n\t"
223 		".fillinsn					\n"
224 		"1:						\n\t"
225 		"mvfc	%2, psw;				\n\t"
226 		"clrpsw	#0x40 -> nop;				\n\t"
227 		DCACHE_CLEAR("%0", "r7", "%3")
228 		"lock	%0, @%3;				\n\t"
229 		"sub	%0, %1;					\n\t"
230 		"unlock	%0, @%3;				\n\t"
231 		"mvtc	%2, psw;				\n\t"
232 		"bnez	%0, 2f;					\n\t"
233 		LOCK_SECTION_START(".balign 4 \n\t")
234 		".fillinsn					\n"
235 		"2:						\n\t"
236 		"clrpsw	#0x40 -> nop;				\n\t"
237 		DCACHE_CLEAR("%0", "r7", "%3")
238 		"lock	%0, @%3;				\n\t"
239 		"add	%0, %1;					\n\t"
240 		"unlock	%0, @%3;				\n\t"
241 		"mvtc	%2, psw;				\n\t"
242 		".fillinsn					\n"
243 		"3:						\n\t"
244 		"ld	%0, @%3;				\n\t"
245 		"beq	%0, %1, 1b;				\n\t"
246 		"bra	3b;					\n\t"
247 		LOCK_SECTION_END
248 		: "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
249 		: "r" (&rw->lock)
250 		: "memory"
251 #ifdef CONFIG_CHIP_M32700_TS1
252 		, "r7"
253 #endif	/* CONFIG_CHIP_M32700_TS1 */
254 	);
255 }
256 
arch_read_unlock(arch_rwlock_t * rw)257 static inline void arch_read_unlock(arch_rwlock_t *rw)
258 {
259 	unsigned long tmp0, tmp1;
260 
261 	__asm__ __volatile__ (
262 		"# read_unlock			\n\t"
263 		"mvfc	%1, psw;		\n\t"
264 		"clrpsw	#0x40 -> nop;		\n\t"
265 		DCACHE_CLEAR("%0", "r6", "%2")
266 		"lock	%0, @%2;		\n\t"
267 		"addi	%0, #1;			\n\t"
268 		"unlock	%0, @%2;		\n\t"
269 		"mvtc	%1, psw;		\n\t"
270 		: "=&r" (tmp0), "=&r" (tmp1)
271 		: "r" (&rw->lock)
272 		: "memory"
273 #ifdef CONFIG_CHIP_M32700_TS1
274 		, "r6"
275 #endif	/* CONFIG_CHIP_M32700_TS1 */
276 	);
277 }
278 
arch_write_unlock(arch_rwlock_t * rw)279 static inline void arch_write_unlock(arch_rwlock_t *rw)
280 {
281 	unsigned long tmp0, tmp1, tmp2;
282 
283 	__asm__ __volatile__ (
284 		"# write_unlock					\n\t"
285 		"seth	%1, #high(" RW_LOCK_BIAS_STR ");	\n\t"
286 		"or3	%1, %1, #low(" RW_LOCK_BIAS_STR ");	\n\t"
287 		"mvfc	%2, psw;				\n\t"
288 		"clrpsw	#0x40 -> nop;				\n\t"
289 		DCACHE_CLEAR("%0", "r7", "%3")
290 		"lock	%0, @%3;				\n\t"
291 		"add	%0, %1;					\n\t"
292 		"unlock	%0, @%3;				\n\t"
293 		"mvtc	%2, psw;				\n\t"
294 		: "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
295 		: "r" (&rw->lock)
296 		: "memory"
297 #ifdef CONFIG_CHIP_M32700_TS1
298 		, "r7"
299 #endif	/* CONFIG_CHIP_M32700_TS1 */
300 	);
301 }
302 
arch_read_trylock(arch_rwlock_t * lock)303 static inline int arch_read_trylock(arch_rwlock_t *lock)
304 {
305 	atomic_t *count = (atomic_t*)lock;
306 	if (atomic_dec_return(count) >= 0)
307 		return 1;
308 	atomic_inc(count);
309 	return 0;
310 }
311 
arch_write_trylock(arch_rwlock_t * lock)312 static inline int arch_write_trylock(arch_rwlock_t *lock)
313 {
314 	atomic_t *count = (atomic_t *)lock;
315 	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
316 		return 1;
317 	atomic_add(RW_LOCK_BIAS, count);
318 	return 0;
319 }
320 
321 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
322 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
323 
324 #define arch_spin_relax(lock)	cpu_relax()
325 #define arch_read_relax(lock)	cpu_relax()
326 #define arch_write_relax(lock)	cpu_relax()
327 
328 #endif	/* _ASM_M32R_SPINLOCK_H */
329