• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright (C) 2012 ARM Ltd.
3   *
4   * This program is free software; you can redistribute it and/or modify
5   * it under the terms of the GNU General Public License version 2 as
6   * published by the Free Software Foundation.
7   *
8   * This program is distributed in the hope that it will be useful,
9   * but WITHOUT ANY WARRANTY; without even the implied warranty of
10   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11   * GNU General Public License for more details.
12   *
13   * You should have received a copy of the GNU General Public License
14   * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15   */
16  #ifndef __ASM_SPINLOCK_H
17  #define __ASM_SPINLOCK_H
18  
19  #include <asm/lse.h>
20  #include <asm/spinlock_types.h>
21  #include <asm/processor.h>
22  
23  /*
24   * Spinlock implementation.
25   *
26   * The memory barriers are implicit with the load-acquire and store-release
27   * instructions.
28   */
29  
30  #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
31  
arch_spin_lock(arch_spinlock_t * lock)32  static inline void arch_spin_lock(arch_spinlock_t *lock)
33  {
34  	unsigned int tmp;
35  	arch_spinlock_t lockval, newval;
36  
37  	asm volatile(
38  	/* Atomically increment the next ticket. */
39  	ARM64_LSE_ATOMIC_INSN(
40  	/* LL/SC */
41  "	prfm	pstl1strm, %3\n"
42  "1:	ldaxr	%w0, %3\n"
43  "	add	%w1, %w0, %w5\n"
44  "	stxr	%w2, %w1, %3\n"
45  "	cbnz	%w2, 1b\n",
46  	/* LSE atomics */
47  "	mov	%w2, %w5\n"
48  "	ldadda	%w2, %w0, %3\n"
49  	__nops(3)
50  	)
51  
52  	/* Did we get the lock? */
53  "	eor	%w1, %w0, %w0, ror #16\n"
54  "	cbz	%w1, 3f\n"
55  	/*
56  	 * No: spin on the owner. Send a local event to avoid missing an
57  	 * unlock before the exclusive load.
58  	 */
59  "	sevl\n"
60  "2:	wfe\n"
61  "	ldaxrh	%w2, %4\n"
62  "	eor	%w1, %w2, %w0, lsr #16\n"
63  "	cbnz	%w1, 2b\n"
64  	/* We got the lock. Critical section starts here. */
65  "3:"
66  	: "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
67  	: "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
68  	: "memory");
69  }
70  
arch_spin_trylock(arch_spinlock_t * lock)71  static inline int arch_spin_trylock(arch_spinlock_t *lock)
72  {
73  	unsigned int tmp;
74  	arch_spinlock_t lockval;
75  
76  	asm volatile(ARM64_LSE_ATOMIC_INSN(
77  	/* LL/SC */
78  	"	prfm	pstl1strm, %2\n"
79  	"1:	ldaxr	%w0, %2\n"
80  	"	eor	%w1, %w0, %w0, ror #16\n"
81  	"	cbnz	%w1, 2f\n"
82  	"	add	%w0, %w0, %3\n"
83  	"	stxr	%w1, %w0, %2\n"
84  	"	cbnz	%w1, 1b\n"
85  	"2:",
86  	/* LSE atomics */
87  	"	ldr	%w0, %2\n"
88  	"	eor	%w1, %w0, %w0, ror #16\n"
89  	"	cbnz	%w1, 1f\n"
90  	"	add	%w1, %w0, %3\n"
91  	"	casa	%w0, %w1, %2\n"
92  	"	sub	%w1, %w1, %3\n"
93  	"	eor	%w1, %w1, %w0\n"
94  	"1:")
95  	: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
96  	: "I" (1 << TICKET_SHIFT)
97  	: "memory");
98  
99  	return !tmp;
100  }
101  
arch_spin_unlock(arch_spinlock_t * lock)102  static inline void arch_spin_unlock(arch_spinlock_t *lock)
103  {
104  	unsigned long tmp;
105  
106  	asm volatile(ARM64_LSE_ATOMIC_INSN(
107  	/* LL/SC */
108  	"	ldrh	%w1, %0\n"
109  	"	add	%w1, %w1, #1\n"
110  	"	stlrh	%w1, %0",
111  	/* LSE atomics */
112  	"	mov	%w1, #1\n"
113  	"	staddlh	%w1, %0\n"
114  	__nops(1))
115  	: "=Q" (lock->owner), "=&r" (tmp)
116  	:
117  	: "memory");
118  }
119  
arch_spin_value_unlocked(arch_spinlock_t lock)120  static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
121  {
122  	return lock.owner == lock.next;
123  }
124  
arch_spin_is_locked(arch_spinlock_t * lock)125  static inline int arch_spin_is_locked(arch_spinlock_t *lock)
126  {
127  	/*
128  	 * Ensure prior spin_lock operations to other locks have completed
129  	 * on this CPU before we test whether "lock" is locked.
130  	 */
131  	smp_mb(); /* ^^^ */
132  	return !arch_spin_value_unlocked(READ_ONCE(*lock));
133  }
134  
arch_spin_is_contended(arch_spinlock_t * lock)135  static inline int arch_spin_is_contended(arch_spinlock_t *lock)
136  {
137  	arch_spinlock_t lockval = READ_ONCE(*lock);
138  	return (lockval.next - lockval.owner) > 1;
139  }
140  #define arch_spin_is_contended	arch_spin_is_contended
141  
142  /*
143   * Write lock implementation.
144   *
145   * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
146   * exclusively held.
147   *
148   * The memory barriers are implicit with the load-acquire and store-release
149   * instructions.
150   */
151  
arch_write_lock(arch_rwlock_t * rw)152  static inline void arch_write_lock(arch_rwlock_t *rw)
153  {
154  	unsigned int tmp;
155  
156  	asm volatile(ARM64_LSE_ATOMIC_INSN(
157  	/* LL/SC */
158  	"	sevl\n"
159  	"1:	wfe\n"
160  	"2:	ldaxr	%w0, %1\n"
161  	"	cbnz	%w0, 1b\n"
162  	"	stxr	%w0, %w2, %1\n"
163  	"	cbnz	%w0, 2b\n"
164  	__nops(1),
165  	/* LSE atomics */
166  	"1:	mov	%w0, wzr\n"
167  	"2:	casa	%w0, %w2, %1\n"
168  	"	cbz	%w0, 3f\n"
169  	"	ldxr	%w0, %1\n"
170  	"	cbz	%w0, 2b\n"
171  	"	wfe\n"
172  	"	b	1b\n"
173  	"3:")
174  	: "=&r" (tmp), "+Q" (rw->lock)
175  	: "r" (0x80000000)
176  	: "memory");
177  }
178  
arch_write_trylock(arch_rwlock_t * rw)179  static inline int arch_write_trylock(arch_rwlock_t *rw)
180  {
181  	unsigned int tmp;
182  
183  	asm volatile(ARM64_LSE_ATOMIC_INSN(
184  	/* LL/SC */
185  	"1:	ldaxr	%w0, %1\n"
186  	"	cbnz	%w0, 2f\n"
187  	"	stxr	%w0, %w2, %1\n"
188  	"	cbnz	%w0, 1b\n"
189  	"2:",
190  	/* LSE atomics */
191  	"	mov	%w0, wzr\n"
192  	"	casa	%w0, %w2, %1\n"
193  	__nops(2))
194  	: "=&r" (tmp), "+Q" (rw->lock)
195  	: "r" (0x80000000)
196  	: "memory");
197  
198  	return !tmp;
199  }
200  
arch_write_unlock(arch_rwlock_t * rw)201  static inline void arch_write_unlock(arch_rwlock_t *rw)
202  {
203  	asm volatile(ARM64_LSE_ATOMIC_INSN(
204  	"	stlr	wzr, %0",
205  	"	swpl	wzr, wzr, %0")
206  	: "=Q" (rw->lock) :: "memory");
207  }
208  
209  /* write_can_lock - would write_trylock() succeed? */
210  #define arch_write_can_lock(x)		((x)->lock == 0)
211  
212  /*
213   * Read lock implementation.
214   *
215   * It exclusively loads the lock value, increments it and stores the new value
216   * back if positive and the CPU still exclusively owns the location. If the
217   * value is negative, the lock is already held.
218   *
219   * During unlocking there may be multiple active read locks but no write lock.
220   *
221   * The memory barriers are implicit with the load-acquire and store-release
222   * instructions.
223   *
224   * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
225   * and LSE implementations may exhibit different behaviour (although this
226   * will have no effect on lockdep).
227   */
arch_read_lock(arch_rwlock_t * rw)228  static inline void arch_read_lock(arch_rwlock_t *rw)
229  {
230  	unsigned int tmp, tmp2;
231  
232  	asm volatile(
233  	"	sevl\n"
234  	ARM64_LSE_ATOMIC_INSN(
235  	/* LL/SC */
236  	"1:	wfe\n"
237  	"2:	ldaxr	%w0, %2\n"
238  	"	add	%w0, %w0, #1\n"
239  	"	tbnz	%w0, #31, 1b\n"
240  	"	stxr	%w1, %w0, %2\n"
241  	"	cbnz	%w1, 2b\n"
242  	__nops(1),
243  	/* LSE atomics */
244  	"1:	wfe\n"
245  	"2:	ldxr	%w0, %2\n"
246  	"	adds	%w1, %w0, #1\n"
247  	"	tbnz	%w1, #31, 1b\n"
248  	"	casa	%w0, %w1, %2\n"
249  	"	sbc	%w0, %w1, %w0\n"
250  	"	cbnz	%w0, 2b")
251  	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
252  	:
253  	: "cc", "memory");
254  }
255  
arch_read_unlock(arch_rwlock_t * rw)256  static inline void arch_read_unlock(arch_rwlock_t *rw)
257  {
258  	unsigned int tmp, tmp2;
259  
260  	asm volatile(ARM64_LSE_ATOMIC_INSN(
261  	/* LL/SC */
262  	"1:	ldxr	%w0, %2\n"
263  	"	sub	%w0, %w0, #1\n"
264  	"	stlxr	%w1, %w0, %2\n"
265  	"	cbnz	%w1, 1b",
266  	/* LSE atomics */
267  	"	movn	%w0, #0\n"
268  	"	staddl	%w0, %2\n"
269  	__nops(2))
270  	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
271  	:
272  	: "memory");
273  }
274  
arch_read_trylock(arch_rwlock_t * rw)275  static inline int arch_read_trylock(arch_rwlock_t *rw)
276  {
277  	unsigned int tmp, tmp2;
278  
279  	asm volatile(ARM64_LSE_ATOMIC_INSN(
280  	/* LL/SC */
281  	"	mov	%w1, #1\n"
282  	"1:	ldaxr	%w0, %2\n"
283  	"	add	%w0, %w0, #1\n"
284  	"	tbnz	%w0, #31, 2f\n"
285  	"	stxr	%w1, %w0, %2\n"
286  	"	cbnz	%w1, 1b\n"
287  	"2:",
288  	/* LSE atomics */
289  	"	ldr	%w0, %2\n"
290  	"	adds	%w1, %w0, #1\n"
291  	"	tbnz	%w1, #31, 1f\n"
292  	"	casa	%w0, %w1, %2\n"
293  	"	sbc	%w1, %w1, %w0\n"
294  	__nops(1)
295  	"1:")
296  	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
297  	:
298  	: "cc", "memory");
299  
300  	return !tmp2;
301  }
302  
303  /* read_can_lock - would read_trylock() succeed? */
304  #define arch_read_can_lock(x)		((x)->lock < 0x80000000)
305  
306  #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
307  #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
308  
309  #define arch_spin_relax(lock)	cpu_relax()
310  #define arch_read_relax(lock)	cpu_relax()
311  #define arch_write_relax(lock)	cpu_relax()
312  
313  /* See include/linux/spinlock.h */
314  #define smp_mb__after_spinlock()	smp_mb()
315  
316  #endif /* __ASM_SPINLOCK_H */
317