• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/include/asm-arm/locks.h
3  *
4  *  Copyright (C) 2000 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  Interrupt safe locking assembler.
11  */
12 #ifndef __ASM_PROC_LOCKS_H
13 #define __ASM_PROC_LOCKS_H
14 
15 #if __LINUX_ARM_ARCH__ >= 6
16 
17 #define __down_op(ptr,fail)			\
18 	({					\
19 	__asm__ __volatile__(			\
20 	"@ down_op\n"				\
21 "1:	ldrex	lr, [%0]\n"			\
22 "	sub	lr, lr, %1\n"			\
23 "	strex	ip, lr, [%0]\n"			\
24 "	teq	ip, #0\n"			\
25 "	bne	1b\n"				\
26 "	teq	lr, #0\n"			\
27 "	movmi	ip, %0\n"			\
28 "	blmi	" #fail				\
29 	:					\
30 	: "r" (ptr), "I" (1)			\
31 	: "ip", "lr", "cc");			\
32 	smp_mb();				\
33 	})
34 
35 #define __down_op_ret(ptr,fail)			\
36 	({					\
37 		unsigned int ret;		\
38 	__asm__ __volatile__(			\
39 	"@ down_op_ret\n"			\
40 "1:	ldrex	lr, [%1]\n"			\
41 "	sub	lr, lr, %2\n"			\
42 "	strex	ip, lr, [%1]\n"			\
43 "	teq	ip, #0\n"			\
44 "	bne	1b\n"				\
45 "	teq	lr, #0\n"			\
46 "	movmi	ip, %1\n"			\
47 "	movpl	ip, #0\n"			\
48 "	blmi	" #fail "\n"			\
49 "	mov	%0, ip"				\
50 	: "=&r" (ret)				\
51 	: "r" (ptr), "I" (1)			\
52 	: "ip", "lr", "cc");			\
53 	smp_mb();				\
54 	ret;					\
55 	})
56 
57 #define __up_op(ptr,wake)			\
58 	({					\
59 	smp_mb();				\
60 	__asm__ __volatile__(			\
61 	"@ up_op\n"				\
62 "1:	ldrex	lr, [%0]\n"			\
63 "	add	lr, lr, %1\n"			\
64 "	strex	ip, lr, [%0]\n"			\
65 "	teq	ip, #0\n"			\
66 "	bne	1b\n"				\
67 "	cmp	lr, #0\n"			\
68 "	movle	ip, %0\n"			\
69 "	blle	" #wake				\
70 	:					\
71 	: "r" (ptr), "I" (1)			\
72 	: "ip", "lr", "cc");			\
73 	})
74 
75 /*
76  * The value 0x01000000 supports up to 128 processors and
77  * lots of processes.  BIAS must be chosen such that sub'ing
78  * BIAS once per CPU will result in the long remaining
79  * negative.
80  */
81 #define RW_LOCK_BIAS      0x01000000
82 #define RW_LOCK_BIAS_STR "0x01000000"
83 
84 #define __down_op_write(ptr,fail)		\
85 	({					\
86 	__asm__ __volatile__(			\
87 	"@ down_op_write\n"			\
88 "1:	ldrex	lr, [%0]\n"			\
89 "	sub	lr, lr, %1\n"			\
90 "	strex	ip, lr, [%0]\n"			\
91 "	teq	ip, #0\n"			\
92 "	bne	1b\n"				\
93 "	teq	lr, #0\n"			\
94 "	movne	ip, %0\n"			\
95 "	blne	" #fail				\
96 	:					\
97 	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
98 	: "ip", "lr", "cc");			\
99 	smp_mb();				\
100 	})
101 
102 #define __up_op_write(ptr,wake)			\
103 	({					\
104 	smp_mb();				\
105 	__asm__ __volatile__(			\
106 	"@ up_op_write\n"			\
107 "1:	ldrex	lr, [%0]\n"			\
108 "	adds	lr, lr, %1\n"			\
109 "	strex	ip, lr, [%0]\n"			\
110 "	teq	ip, #0\n"			\
111 "	bne	1b\n"				\
112 "	movcs	ip, %0\n"			\
113 "	blcs	" #wake				\
114 	:					\
115 	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
116 	: "ip", "lr", "cc");			\
117 	})
118 
119 #define __down_op_read(ptr,fail)		\
120 	__down_op(ptr, fail)
121 
122 #define __up_op_read(ptr,wake)			\
123 	({					\
124 	smp_mb();				\
125 	__asm__ __volatile__(			\
126 	"@ up_op_read\n"			\
127 "1:	ldrex	lr, [%0]\n"			\
128 "	add	lr, lr, %1\n"			\
129 "	strex	ip, lr, [%0]\n"			\
130 "	teq	ip, #0\n"			\
131 "	bne	1b\n"				\
132 "	teq	lr, #0\n"			\
133 "	moveq	ip, %0\n"			\
134 "	bleq	" #wake				\
135 	:					\
136 	: "r" (ptr), "I" (1)			\
137 	: "ip", "lr", "cc");			\
138 	})
139 
140 #else
141 
142 #define __down_op(ptr,fail)			\
143 	({					\
144 	__asm__ __volatile__(			\
145 	"@ down_op\n"				\
146 "	mrs	ip, cpsr\n"			\
147 "	orr	lr, ip, #128\n"			\
148 "	msr	cpsr_c, lr\n"			\
149 "	ldr	lr, [%0]\n"			\
150 "	subs	lr, lr, %1\n"			\
151 "	str	lr, [%0]\n"			\
152 "	msr	cpsr_c, ip\n"			\
153 "	movmi	ip, %0\n"			\
154 "	blmi	" #fail				\
155 	:					\
156 	: "r" (ptr), "I" (1)			\
157 	: "ip", "lr", "cc");			\
158 	smp_mb();				\
159 	})
160 
161 #define __down_op_ret(ptr,fail)			\
162 	({					\
163 		unsigned int ret;		\
164 	__asm__ __volatile__(			\
165 	"@ down_op_ret\n"			\
166 "	mrs	ip, cpsr\n"			\
167 "	orr	lr, ip, #128\n"			\
168 "	msr	cpsr_c, lr\n"			\
169 "	ldr	lr, [%1]\n"			\
170 "	subs	lr, lr, %2\n"			\
171 "	str	lr, [%1]\n"			\
172 "	msr	cpsr_c, ip\n"			\
173 "	movmi	ip, %1\n"			\
174 "	movpl	ip, #0\n"			\
175 "	blmi	" #fail "\n"			\
176 "	mov	%0, ip"				\
177 	: "=&r" (ret)				\
178 	: "r" (ptr), "I" (1)			\
179 	: "ip", "lr", "cc");			\
180 	smp_mb();				\
181 	ret;					\
182 	})
183 
184 #define __up_op(ptr,wake)			\
185 	({					\
186 	smp_mb();				\
187 	__asm__ __volatile__(			\
188 	"@ up_op\n"				\
189 "	mrs	ip, cpsr\n"			\
190 "	orr	lr, ip, #128\n"			\
191 "	msr	cpsr_c, lr\n"			\
192 "	ldr	lr, [%0]\n"			\
193 "	adds	lr, lr, %1\n"			\
194 "	str	lr, [%0]\n"			\
195 "	msr	cpsr_c, ip\n"			\
196 "	movle	ip, %0\n"			\
197 "	blle	" #wake				\
198 	:					\
199 	: "r" (ptr), "I" (1)			\
200 	: "ip", "lr", "cc");			\
201 	})
202 
203 /*
204  * The value 0x01000000 supports up to 128 processors and
205  * lots of processes.  BIAS must be chosen such that sub'ing
206  * BIAS once per CPU will result in the long remaining
207  * negative.
208  */
209 #define RW_LOCK_BIAS      0x01000000
210 #define RW_LOCK_BIAS_STR "0x01000000"
211 
212 #define __down_op_write(ptr,fail)		\
213 	({					\
214 	__asm__ __volatile__(			\
215 	"@ down_op_write\n"			\
216 "	mrs	ip, cpsr\n"			\
217 "	orr	lr, ip, #128\n"			\
218 "	msr	cpsr_c, lr\n"			\
219 "	ldr	lr, [%0]\n"			\
220 "	subs	lr, lr, %1\n"			\
221 "	str	lr, [%0]\n"			\
222 "	msr	cpsr_c, ip\n"			\
223 "	movne	ip, %0\n"			\
224 "	blne	" #fail				\
225 	:					\
226 	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
227 	: "ip", "lr", "cc");			\
228 	smp_mb();				\
229 	})
230 
231 #define __up_op_write(ptr,wake)			\
232 	({					\
233 	__asm__ __volatile__(			\
234 	"@ up_op_write\n"			\
235 "	mrs	ip, cpsr\n"			\
236 "	orr	lr, ip, #128\n"			\
237 "	msr	cpsr_c, lr\n"			\
238 "	ldr	lr, [%0]\n"			\
239 "	adds	lr, lr, %1\n"			\
240 "	str	lr, [%0]\n"			\
241 "	msr	cpsr_c, ip\n"			\
242 "	movcs	ip, %0\n"			\
243 "	blcs	" #wake				\
244 	:					\
245 	: "r" (ptr), "I" (RW_LOCK_BIAS)		\
246 	: "ip", "lr", "cc");			\
247 	smp_mb();				\
248 	})
249 
250 #define __down_op_read(ptr,fail)		\
251 	__down_op(ptr, fail)
252 
253 #define __up_op_read(ptr,wake)			\
254 	({					\
255 	smp_mb();				\
256 	__asm__ __volatile__(			\
257 	"@ up_op_read\n"			\
258 "	mrs	ip, cpsr\n"			\
259 "	orr	lr, ip, #128\n"			\
260 "	msr	cpsr_c, lr\n"			\
261 "	ldr	lr, [%0]\n"			\
262 "	adds	lr, lr, %1\n"			\
263 "	str	lr, [%0]\n"			\
264 "	msr	cpsr_c, ip\n"			\
265 "	moveq	ip, %0\n"			\
266 "	bleq	" #wake				\
267 	:					\
268 	: "r" (ptr), "I" (1)			\
269 	: "ip", "lr", "cc");			\
270 	})
271 
272 #endif
273 
274 #endif
275