• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _ASM_M32R_SYSTEM_H
2 #define _ASM_M32R_SYSTEM_H
3 
4 /*
5  * This file is subject to the terms and conditions of the GNU General Public
6  * License.  See the file "COPYING" in the main directory of this archive
7  * for more details.
8  *
9  * Copyright (C) 2001  Hiroyuki Kondo, Hirokazu Takata, and Hitoshi Yamamoto
10  * Copyright (C) 2004, 2006  Hirokazu Takata <takata at linux-m32r.org>
11  */
12 
13 #include <linux/compiler.h>
14 #include <asm/assembler.h>
15 
16 #ifdef __KERNEL__
17 
18 /*
19  * switch_to(prev, next) should switch from task `prev' to `next'
20  * `prev' will never be the same as `next'.
21  *
22  * `next' and `prev' should be struct task_struct, but it isn't always defined
23  */
24 
25 #if defined(CONFIG_FRAME_POINTER) || \
26 	!defined(CONFIG_SCHED_OMIT_FRAME_POINTER)
27 #define M32R_PUSH_FP "	push fp\n"
28 #define M32R_POP_FP  "	pop  fp\n"
29 #else
30 #define M32R_PUSH_FP ""
31 #define M32R_POP_FP  ""
32 #endif
33 
34 #define switch_to(prev, next, last)  do { \
35 	__asm__ __volatile__ ( \
36 		"	seth	lr, #high(1f)				\n" \
37 		"	or3	lr, lr, #low(1f)			\n" \
38 		"	st	lr, @%4  ; store old LR			\n" \
39 		"	ld	lr, @%5  ; load new LR			\n" \
40 			M32R_PUSH_FP \
41 		"	st	sp, @%2  ; store old SP			\n" \
42 		"	ld	sp, @%3  ; load new SP			\n" \
43 		"	push	%1  ; store `prev' on new stack		\n" \
44 		"	jmp	lr					\n" \
45 		"	.fillinsn					\n" \
46 		"1:							\n" \
47 		"	pop	%0  ; restore `__last' from new stack	\n" \
48 			M32R_POP_FP \
49 		: "=r" (last) \
50 		: "0" (prev), \
51 		  "r" (&(prev->thread.sp)), "r" (&(next->thread.sp)), \
52 		  "r" (&(prev->thread.lr)), "r" (&(next->thread.lr)) \
53 		: "memory", "lr" \
54 	); \
55 } while(0)
56 
57 /* Interrupt Control */
58 #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
59 #define local_irq_enable() \
60 	__asm__ __volatile__ ("setpsw #0x40 -> nop": : :"memory")
61 #define local_irq_disable() \
62 	__asm__ __volatile__ ("clrpsw #0x40 -> nop": : :"memory")
63 #else	/* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
local_irq_enable(void)64 static inline void local_irq_enable(void)
65 {
66 	unsigned long tmpreg;
67 	__asm__ __volatile__(
68 		"mvfc	%0, psw;		\n\t"
69 		"or3	%0, %0, #0x0040;	\n\t"
70 		"mvtc	%0, psw;		\n\t"
71 	: "=&r" (tmpreg) : : "cbit", "memory");
72 }
73 
local_irq_disable(void)74 static inline void local_irq_disable(void)
75 {
76 	unsigned long tmpreg0, tmpreg1;
77 	__asm__ __volatile__(
78 		"ld24	%0, #0	; Use 32-bit insn. \n\t"
79 		"mvfc	%1, psw	; No interrupt can be accepted here. \n\t"
80 		"mvtc	%0, psw	\n\t"
81 		"and3	%0, %1, #0xffbf	\n\t"
82 		"mvtc	%0, psw	\n\t"
83 	: "=&r" (tmpreg0), "=&r" (tmpreg1) : : "cbit", "memory");
84 }
85 #endif	/* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
86 
87 #define local_save_flags(x) \
88 	__asm__ __volatile__("mvfc %0,psw" : "=r"(x) : /* no input */)
89 
90 #define local_irq_restore(x) \
91 	__asm__ __volatile__("mvtc %0,psw" : /* no outputs */ \
92 		: "r" (x) : "cbit", "memory")
93 
94 #if !(defined(CONFIG_CHIP_M32102) || defined(CONFIG_CHIP_M32104))
95 #define local_irq_save(x)				\
96 	__asm__ __volatile__(				\
97   		"mvfc	%0, psw;		\n\t"	\
98 	  	"clrpsw	#0x40 -> nop;		\n\t"	\
99   		: "=r" (x) : /* no input */ : "memory")
100 #else	/* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
101 #define local_irq_save(x) 				\
102 	({						\
103 		unsigned long tmpreg;			\
104 		__asm__ __volatile__( 			\
105 			"ld24	%1, #0 \n\t" 		\
106 			"mvfc	%0, psw \n\t"		\
107 			"mvtc	%1, psw \n\t"		\
108 			"and3	%1, %0, #0xffbf \n\t"	\
109 			"mvtc	%1, psw \n\t" 		\
110 			: "=r" (x), "=&r" (tmpreg)	\
111 			: : "cbit", "memory");		\
112 	})
113 #endif	/* CONFIG_CHIP_M32102 || CONFIG_CHIP_M32104 */
114 
115 #define irqs_disabled()					\
116 	({						\
117 		unsigned long flags;			\
118 		local_save_flags(flags);		\
119 		!(flags & 0x40);			\
120 	})
121 
122 #define nop()	__asm__ __volatile__ ("nop" : : )
123 
124 #define xchg(ptr, x)							\
125 	((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
126 #define xchg_local(ptr, x)						\
127 	((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr),	\
128 			sizeof(*(ptr))))
129 
130 extern void  __xchg_called_with_bad_pointer(void);
131 
132 #ifdef CONFIG_CHIP_M32700_TS1
133 #define DCACHE_CLEAR(reg0, reg1, addr)				\
134 	"seth	"reg1", #high(dcache_dummy);		\n\t"	\
135 	"or3	"reg1", "reg1", #low(dcache_dummy);	\n\t"	\
136 	"lock	"reg0", @"reg1";			\n\t"	\
137 	"add3	"reg0", "addr", #0x1000;		\n\t"	\
138 	"ld	"reg0", @"reg0";			\n\t"	\
139 	"add3	"reg0", "addr", #0x2000;		\n\t"	\
140 	"ld	"reg0", @"reg0";			\n\t"	\
141 	"unlock	"reg0", @"reg1";			\n\t"
142 	/* FIXME: This workaround code cannot handle kernel modules
143 	 * correctly under SMP environment.
144 	 */
145 #else	/* CONFIG_CHIP_M32700_TS1 */
146 #define DCACHE_CLEAR(reg0, reg1, addr)
147 #endif	/* CONFIG_CHIP_M32700_TS1 */
148 
149 static __always_inline unsigned long
__xchg(unsigned long x,volatile void * ptr,int size)150 __xchg(unsigned long x, volatile void *ptr, int size)
151 {
152 	unsigned long flags;
153 	unsigned long tmp = 0;
154 
155 	local_irq_save(flags);
156 
157 	switch (size) {
158 #ifndef CONFIG_SMP
159 	case 1:
160 		__asm__ __volatile__ (
161 			"ldb	%0, @%2 \n\t"
162 			"stb	%1, @%2 \n\t"
163 			: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
164 		break;
165 	case 2:
166 		__asm__ __volatile__ (
167 			"ldh	%0, @%2 \n\t"
168 			"sth	%1, @%2 \n\t"
169 			: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
170 		break;
171 	case 4:
172 		__asm__ __volatile__ (
173 			"ld	%0, @%2 \n\t"
174 			"st	%1, @%2 \n\t"
175 			: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
176 		break;
177 #else  /* CONFIG_SMP */
178 	case 4:
179 		__asm__ __volatile__ (
180 			DCACHE_CLEAR("%0", "r4", "%2")
181 			"lock	%0, @%2;	\n\t"
182 			"unlock	%1, @%2;	\n\t"
183 			: "=&r" (tmp) : "r" (x), "r" (ptr)
184 			: "memory"
185 #ifdef CONFIG_CHIP_M32700_TS1
186 			, "r4"
187 #endif	/* CONFIG_CHIP_M32700_TS1 */
188 		);
189 		break;
190 #endif  /* CONFIG_SMP */
191 	default:
192 		__xchg_called_with_bad_pointer();
193 	}
194 
195 	local_irq_restore(flags);
196 
197 	return (tmp);
198 }
199 
200 static __always_inline unsigned long
__xchg_local(unsigned long x,volatile void * ptr,int size)201 __xchg_local(unsigned long x, volatile void *ptr, int size)
202 {
203 	unsigned long flags;
204 	unsigned long tmp = 0;
205 
206 	local_irq_save(flags);
207 
208 	switch (size) {
209 	case 1:
210 		__asm__ __volatile__ (
211 			"ldb	%0, @%2 \n\t"
212 			"stb	%1, @%2 \n\t"
213 			: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
214 		break;
215 	case 2:
216 		__asm__ __volatile__ (
217 			"ldh	%0, @%2 \n\t"
218 			"sth	%1, @%2 \n\t"
219 			: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
220 		break;
221 	case 4:
222 		__asm__ __volatile__ (
223 			"ld	%0, @%2 \n\t"
224 			"st	%1, @%2 \n\t"
225 			: "=&r" (tmp) : "r" (x), "r" (ptr) : "memory");
226 		break;
227 	default:
228 		__xchg_called_with_bad_pointer();
229 	}
230 
231 	local_irq_restore(flags);
232 
233 	return (tmp);
234 }
235 
236 #define __HAVE_ARCH_CMPXCHG	1
237 
238 static inline unsigned long
__cmpxchg_u32(volatile unsigned int * p,unsigned int old,unsigned int new)239 __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
240 {
241 	unsigned long flags;
242 	unsigned int retval;
243 
244 	local_irq_save(flags);
245 	__asm__ __volatile__ (
246 			DCACHE_CLEAR("%0", "r4", "%1")
247 			M32R_LOCK" %0, @%1;	\n"
248 		"	bne	%0, %2, 1f;	\n"
249 			M32R_UNLOCK" %3, @%1;	\n"
250 		"	bra	2f;		\n"
251                 "       .fillinsn		\n"
252 		"1:"
253 			M32R_UNLOCK" %0, @%1;	\n"
254                 "       .fillinsn		\n"
255 		"2:"
256 			: "=&r" (retval)
257 			: "r" (p), "r" (old), "r" (new)
258 			: "cbit", "memory"
259 #ifdef CONFIG_CHIP_M32700_TS1
260 			, "r4"
261 #endif  /* CONFIG_CHIP_M32700_TS1 */
262 		);
263 	local_irq_restore(flags);
264 
265 	return retval;
266 }
267 
268 static inline unsigned long
__cmpxchg_local_u32(volatile unsigned int * p,unsigned int old,unsigned int new)269 __cmpxchg_local_u32(volatile unsigned int *p, unsigned int old,
270 			unsigned int new)
271 {
272 	unsigned long flags;
273 	unsigned int retval;
274 
275 	local_irq_save(flags);
276 	__asm__ __volatile__ (
277 			DCACHE_CLEAR("%0", "r4", "%1")
278 			"ld %0, @%1;		\n"
279 		"	bne	%0, %2, 1f;	\n"
280 			"st %3, @%1;		\n"
281 		"	bra	2f;		\n"
282 		"       .fillinsn		\n"
283 		"1:"
284 			"st %0, @%1;		\n"
285 		"       .fillinsn		\n"
286 		"2:"
287 			: "=&r" (retval)
288 			: "r" (p), "r" (old), "r" (new)
289 			: "cbit", "memory"
290 #ifdef CONFIG_CHIP_M32700_TS1
291 			, "r4"
292 #endif  /* CONFIG_CHIP_M32700_TS1 */
293 		);
294 	local_irq_restore(flags);
295 
296 	return retval;
297 }
298 
299 /* This function doesn't exist, so you'll get a linker error
300    if something tries to do an invalid cmpxchg().  */
301 extern void __cmpxchg_called_with_bad_pointer(void);
302 
303 static inline unsigned long
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new,int size)304 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
305 {
306 	switch (size) {
307 	case 4:
308 		return __cmpxchg_u32(ptr, old, new);
309 #if 0	/* we don't have __cmpxchg_u64 */
310 	case 8:
311 		return __cmpxchg_u64(ptr, old, new);
312 #endif /* 0 */
313 	}
314 	__cmpxchg_called_with_bad_pointer();
315 	return old;
316 }
317 
318 #define cmpxchg(ptr, o, n)						 \
319 	((__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)(o),	 \
320 			(unsigned long)(n), sizeof(*(ptr))))
321 
322 #include <asm-generic/cmpxchg-local.h>
323 
__cmpxchg_local(volatile void * ptr,unsigned long old,unsigned long new,int size)324 static inline unsigned long __cmpxchg_local(volatile void *ptr,
325 				      unsigned long old,
326 				      unsigned long new, int size)
327 {
328 	switch (size) {
329 	case 4:
330 		return __cmpxchg_local_u32(ptr, old, new);
331 	default:
332 		return __cmpxchg_local_generic(ptr, old, new, size);
333 	}
334 
335 	return old;
336 }
337 
338 /*
339  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
340  * them available.
341  */
342 #define cmpxchg_local(ptr, o, n)				  	    \
343 	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	    \
344 			(unsigned long)(n), sizeof(*(ptr))))
345 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
346 
347 #endif  /* __KERNEL__ */
348 
349 /*
350  * Memory barrier.
351  *
352  * mb() prevents loads and stores being reordered across this point.
353  * rmb() prevents loads being reordered across this point.
354  * wmb() prevents stores being reordered across this point.
355  */
356 #define mb()   barrier()
357 #define rmb()  mb()
358 #define wmb()  mb()
359 
360 /**
361  * read_barrier_depends - Flush all pending reads that subsequents reads
362  * depend on.
363  *
364  * No data-dependent reads from memory-like regions are ever reordered
365  * over this barrier.  All reads preceding this primitive are guaranteed
366  * to access memory (but not necessarily other CPUs' caches) before any
367  * reads following this primitive that depend on the data return by
368  * any of the preceding reads.  This primitive is much lighter weight than
369  * rmb() on most CPUs, and is never heavier weight than is
370  * rmb().
371  *
372  * These ordering constraints are respected by both the local CPU
373  * and the compiler.
374  *
375  * Ordering is not guaranteed by anything other than these primitives,
376  * not even by data dependencies.  See the documentation for
377  * memory_barrier() for examples and URLs to more information.
378  *
379  * For example, the following code would force ordering (the initial
380  * value of "a" is zero, "b" is one, and "p" is "&a"):
381  *
382  * <programlisting>
383  *      CPU 0                           CPU 1
384  *
385  *      b = 2;
386  *      memory_barrier();
387  *      p = &b;                         q = p;
388  *                                      read_barrier_depends();
389  *                                      d = *q;
390  * </programlisting>
391  *
392  *
393  * because the read of "*q" depends on the read of "p" and these
394  * two reads are separated by a read_barrier_depends().  However,
395  * the following code, with the same initial values for "a" and "b":
396  *
397  * <programlisting>
398  *      CPU 0                           CPU 1
399  *
400  *      a = 2;
401  *      memory_barrier();
402  *      b = 3;                          y = b;
403  *                                      read_barrier_depends();
404  *                                      x = a;
405  * </programlisting>
406  *
407  * does not enforce ordering, since there is no data dependency between
408  * the read of "a" and the read of "b".  Therefore, on some CPUs, such
409  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
410  * in cases like this where there are no data dependencies.
411  **/
412 
413 #define read_barrier_depends()	do { } while (0)
414 
415 #ifdef CONFIG_SMP
416 #define smp_mb()	mb()
417 #define smp_rmb()	rmb()
418 #define smp_wmb()	wmb()
419 #define smp_read_barrier_depends()	read_barrier_depends()
420 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
421 #else
422 #define smp_mb()	barrier()
423 #define smp_rmb()	barrier()
424 #define smp_wmb()	barrier()
425 #define smp_read_barrier_depends()	do { } while (0)
426 #define set_mb(var, value) do { var = value; barrier(); } while (0)
427 #endif
428 
429 #define arch_align_stack(x) (x)
430 
431 #endif /* _ASM_M32R_SYSTEM_H */
432