• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_SPINLOCK_H
17 #define __ASM_SPINLOCK_H
18 
19 #include <asm/spinlock_types.h>
20 #include <asm/processor.h>
21 
22 /*
23  * Spinlock implementation.
24  *
25  * The memory barriers are implicit with the load-acquire and store-release
26  * instructions.
27  */
28 
29 #define arch_spin_unlock_wait(lock) \
30 	do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
31 
32 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
33 
arch_spin_lock(arch_spinlock_t * lock)34 static inline void arch_spin_lock(arch_spinlock_t *lock)
35 {
36 	unsigned int tmp;
37 	arch_spinlock_t lockval, newval;
38 
39 	asm volatile(
40 	/* Atomically increment the next ticket. */
41 "	prfm	pstl1strm, %3\n"
42 "1:	ldaxr	%w0, %3\n"
43 "	add	%w1, %w0, %w5\n"
44 "	stxr	%w2, %w1, %3\n"
45 "	cbnz	%w2, 1b\n"
46 	/* Did we get the lock? */
47 "	eor	%w1, %w0, %w0, ror #16\n"
48 "	cbz	%w1, 3f\n"
49 	/*
50 	 * No: spin on the owner. Send a local event to avoid missing an
51 	 * unlock before the exclusive load.
52 	 */
53 "	sevl\n"
54 "2:	wfe\n"
55 "	ldaxrh	%w2, %4\n"
56 "	eor	%w1, %w2, %w0, lsr #16\n"
57 "	cbnz	%w1, 2b\n"
58 	/* We got the lock. Critical section starts here. */
59 "3:"
60 	: "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
61 	: "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
62 	: "memory");
63 }
64 
arch_spin_trylock(arch_spinlock_t * lock)65 static inline int arch_spin_trylock(arch_spinlock_t *lock)
66 {
67 	unsigned int tmp;
68 	arch_spinlock_t lockval;
69 
70 	asm volatile(
71 "	prfm	pstl1strm, %2\n"
72 "1:	ldaxr	%w0, %2\n"
73 "	eor	%w1, %w0, %w0, ror #16\n"
74 "	cbnz	%w1, 2f\n"
75 "	add	%w0, %w0, %3\n"
76 "	stxr	%w1, %w0, %2\n"
77 "	cbnz	%w1, 1b\n"
78 "2:"
79 	: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
80 	: "I" (1 << TICKET_SHIFT)
81 	: "memory");
82 
83 	return !tmp;
84 }
85 
arch_spin_unlock(arch_spinlock_t * lock)86 static inline void arch_spin_unlock(arch_spinlock_t *lock)
87 {
88 	asm volatile(
89 "	stlrh	%w1, %0\n"
90 	: "=Q" (lock->owner)
91 	: "r" (lock->owner + 1)
92 	: "memory");
93 }
94 
arch_spin_value_unlocked(arch_spinlock_t lock)95 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
96 {
97 	return lock.owner == lock.next;
98 }
99 
arch_spin_is_locked(arch_spinlock_t * lock)100 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
101 {
102 	return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
103 }
104 
arch_spin_is_contended(arch_spinlock_t * lock)105 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
106 {
107 	arch_spinlock_t lockval = ACCESS_ONCE(*lock);
108 	return (lockval.next - lockval.owner) > 1;
109 }
110 #define arch_spin_is_contended	arch_spin_is_contended
111 
112 /*
113  * Write lock implementation.
114  *
115  * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
116  * exclusively held.
117  *
118  * The memory barriers are implicit with the load-acquire and store-release
119  * instructions.
120  */
121 
arch_write_lock(arch_rwlock_t * rw)122 static inline void arch_write_lock(arch_rwlock_t *rw)
123 {
124 	unsigned int tmp;
125 
126 	asm volatile(
127 	"	sevl\n"
128 	"1:	wfe\n"
129 	"2:	ldaxr	%w0, %1\n"
130 	"	cbnz	%w0, 1b\n"
131 	"	stxr	%w0, %w2, %1\n"
132 	"	cbnz	%w0, 2b\n"
133 	: "=&r" (tmp), "+Q" (rw->lock)
134 	: "r" (0x80000000)
135 	: "memory");
136 }
137 
arch_write_trylock(arch_rwlock_t * rw)138 static inline int arch_write_trylock(arch_rwlock_t *rw)
139 {
140 	unsigned int tmp;
141 
142 	asm volatile(
143 	"	ldaxr	%w0, %1\n"
144 	"	cbnz	%w0, 1f\n"
145 	"	stxr	%w0, %w2, %1\n"
146 	"1:\n"
147 	: "=&r" (tmp), "+Q" (rw->lock)
148 	: "r" (0x80000000)
149 	: "memory");
150 
151 	return !tmp;
152 }
153 
arch_write_unlock(arch_rwlock_t * rw)154 static inline void arch_write_unlock(arch_rwlock_t *rw)
155 {
156 	asm volatile(
157 	"	stlr	%w1, %0\n"
158 	: "=Q" (rw->lock) : "r" (0) : "memory");
159 }
160 
161 /* write_can_lock - would write_trylock() succeed? */
162 #define arch_write_can_lock(x)		((x)->lock == 0)
163 
164 /*
165  * Read lock implementation.
166  *
167  * It exclusively loads the lock value, increments it and stores the new value
168  * back if positive and the CPU still exclusively owns the location. If the
169  * value is negative, the lock is already held.
170  *
171  * During unlocking there may be multiple active read locks but no write lock.
172  *
173  * The memory barriers are implicit with the load-acquire and store-release
174  * instructions.
175  */
arch_read_lock(arch_rwlock_t * rw)176 static inline void arch_read_lock(arch_rwlock_t *rw)
177 {
178 	unsigned int tmp, tmp2;
179 
180 	asm volatile(
181 	"	sevl\n"
182 	"1:	wfe\n"
183 	"2:	ldaxr	%w0, %2\n"
184 	"	add	%w0, %w0, #1\n"
185 	"	tbnz	%w0, #31, 1b\n"
186 	"	stxr	%w1, %w0, %2\n"
187 	"	cbnz	%w1, 2b\n"
188 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
189 	:
190 	: "memory");
191 }
192 
arch_read_unlock(arch_rwlock_t * rw)193 static inline void arch_read_unlock(arch_rwlock_t *rw)
194 {
195 	unsigned int tmp, tmp2;
196 
197 	asm volatile(
198 	"1:	ldxr	%w0, %2\n"
199 	"	sub	%w0, %w0, #1\n"
200 	"	stlxr	%w1, %w0, %2\n"
201 	"	cbnz	%w1, 1b\n"
202 	: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
203 	:
204 	: "memory");
205 }
206 
arch_read_trylock(arch_rwlock_t * rw)207 static inline int arch_read_trylock(arch_rwlock_t *rw)
208 {
209 	unsigned int tmp, tmp2 = 1;
210 
211 	asm volatile(
212 	"	ldaxr	%w0, %2\n"
213 	"	add	%w0, %w0, #1\n"
214 	"	tbnz	%w0, #31, 1f\n"
215 	"	stxr	%w1, %w0, %2\n"
216 	"1:\n"
217 	: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
218 	:
219 	: "memory");
220 
221 	return !tmp2;
222 }
223 
224 /* read_can_lock - would read_trylock() succeed? */
225 #define arch_read_can_lock(x)		((x)->lock < 0x80000000)
226 
227 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
228 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
229 
230 #define arch_spin_relax(lock)	cpu_relax()
231 #define arch_read_relax(lock)	cpu_relax()
232 #define arch_write_relax(lock)	cpu_relax()
233 
234 /*
235  * Accesses appearing in program order before a spin_lock() operation
236  * can be reordered with accesses inside the critical section, by virtue
237  * of arch_spin_lock being constructed using acquire semantics.
238  *
239  * In cases where this is problematic (e.g. try_to_wake_up), an
240  * smp_mb__before_spinlock() can restore the required ordering.
241  */
242 #define smp_mb__before_spinlock()	smp_mb()
243 
244 #endif /* __ASM_SPINLOCK_H */
245