• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _M68KNOMMU_SYSTEM_H
2 #define _M68KNOMMU_SYSTEM_H
3 
4 #include <linux/linkage.h>
5 #include <asm/segment.h>
6 #include <asm/entry.h>
7 
8 /*
9  * switch_to(n) should switch tasks to task ptr, first checking that
10  * ptr isn't the current task, in which case it does nothing.  This
11  * also clears the TS-flag if the task we switched to has used the
12  * math co-processor latest.
13  */
14 /*
15  * switch_to() saves the extra registers, that are not saved
16  * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
17  * a0-a1. Some of these are used by schedule() and its predecessors
18  * and so we might get see unexpected behaviors when a task returns
19  * with unexpected register values.
20  *
21  * syscall stores these registers itself and none of them are used
22  * by syscall after the function in the syscall has been called.
23  *
24  * Beware that resume now expects *next to be in d1 and the offset of
25  * tss to be in a1. This saves a few instructions as we no longer have
26  * to push them onto the stack and read them back right after.
27  *
28  * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
29  *
30  * Changed 96/09/19 by Andreas Schwab
31  * pass prev in a0, next in a1, offset of tss in d1, and whether
32  * the mm structures are shared in d2 (to avoid atc flushing).
33  */
34 asmlinkage void resume(void);
35 #define switch_to(prev,next,last)				\
36 {								\
37   void *_last;							\
38   __asm__ __volatile__(						\
39   	"movel	%1, %%a0\n\t"					\
40 	"movel	%2, %%a1\n\t"					\
41 	"jbsr resume\n\t"					\
42 	"movel	%%d1, %0\n\t"					\
43        : "=d" (_last)						\
44        : "d" (prev), "d" (next)					\
45        : "cc", "d0", "d1", "d2", "d3", "d4", "d5", "a0", "a1");	\
46   (last) = _last;						\
47 }
48 
49 #ifdef CONFIG_COLDFIRE
50 #define local_irq_enable() __asm__ __volatile__ (		\
51 	"move %/sr,%%d0\n\t"					\
52 	"andi.l #0xf8ff,%%d0\n\t"				\
53 	"move %%d0,%/sr\n"					\
54 	: /* no outputs */					\
55 	:							\
56         : "cc", "%d0", "memory")
57 #define local_irq_disable() __asm__ __volatile__ (		\
58 	"move %/sr,%%d0\n\t"					\
59 	"ori.l #0x0700,%%d0\n\t"				\
60 	"move %%d0,%/sr\n"					\
61 	: /* no outputs */					\
62 	:							\
63 	: "cc", "%d0", "memory")
64 /* For spinlocks etc */
65 #define local_irq_save(x) __asm__ __volatile__ (		\
66 	"movew %%sr,%0\n\t"					\
67 	"movew #0x0700,%%d0\n\t"				\
68 	"or.l  %0,%%d0\n\t"					\
69 	"movew %%d0,%/sr"					\
70 	: "=d" (x)						\
71 	:							\
72 	: "cc", "%d0", "memory")
73 #else
74 
75 /* portable version */ /* FIXME - see entry.h*/
76 #define ALLOWINT 0xf8ff
77 
78 #define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
79 #define local_irq_disable() asm volatile ("oriw  #0x0700,%%sr": : : "memory")
80 #endif
81 
82 #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
83 #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
84 
85 /* For spinlocks etc */
86 #ifndef local_irq_save
87 #define local_irq_save(x) do { local_save_flags(x); local_irq_disable(); } while (0)
88 #endif
89 
90 #define	irqs_disabled()			\
91 ({					\
92 	unsigned long flags;		\
93 	local_save_flags(flags);	\
94 	((flags & 0x0700) == 0x0700);	\
95 })
96 
97 #define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc")
98 
99 /*
100  * Force strict CPU ordering.
101  * Not really required on m68k...
102  */
103 #define nop()  asm volatile ("nop"::)
104 #define mb()   asm volatile (""   : : :"memory")
105 #define rmb()  asm volatile (""   : : :"memory")
106 #define wmb()  asm volatile (""   : : :"memory")
107 #define set_mb(var, value)	({ (var) = (value); wmb(); })
108 
109 #ifdef CONFIG_SMP
110 #define smp_mb()	mb()
111 #define smp_rmb()	rmb()
112 #define smp_wmb()	wmb()
113 #define smp_read_barrier_depends()	read_barrier_depends()
114 #else
115 #define smp_mb()	barrier()
116 #define smp_rmb()	barrier()
117 #define smp_wmb()	barrier()
118 #define smp_read_barrier_depends()	do { } while(0)
119 #endif
120 
121 #define read_barrier_depends()  ((void)0)
122 
123 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
124 
125 struct __xchg_dummy { unsigned long a[100]; };
126 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
127 
128 #ifndef CONFIG_RMW_INSNS
__xchg(unsigned long x,volatile void * ptr,int size)129 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
130 {
131   unsigned long tmp, flags;
132 
133   local_irq_save(flags);
134 
135   switch (size) {
136   case 1:
137     __asm__ __volatile__
138     ("moveb %2,%0\n\t"
139      "moveb %1,%2"
140     : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
141     break;
142   case 2:
143     __asm__ __volatile__
144     ("movew %2,%0\n\t"
145      "movew %1,%2"
146     : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
147     break;
148   case 4:
149     __asm__ __volatile__
150     ("movel %2,%0\n\t"
151      "movel %1,%2"
152     : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
153     break;
154   }
155   local_irq_restore(flags);
156   return tmp;
157 }
158 #else
__xchg(unsigned long x,volatile void * ptr,int size)159 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
160 {
161 	switch (size) {
162 	    case 1:
163 		__asm__ __volatile__
164 			("moveb %2,%0\n\t"
165 			 "1:\n\t"
166 			 "casb %0,%1,%2\n\t"
167 			 "jne 1b"
168 			 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
169 		break;
170 	    case 2:
171 		__asm__ __volatile__
172 			("movew %2,%0\n\t"
173 			 "1:\n\t"
174 			 "casw %0,%1,%2\n\t"
175 			 "jne 1b"
176 			 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
177 		break;
178 	    case 4:
179 		__asm__ __volatile__
180 			("movel %2,%0\n\t"
181 			 "1:\n\t"
182 			 "casl %0,%1,%2\n\t"
183 			 "jne 1b"
184 			 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
185 		break;
186 	}
187 	return x;
188 }
189 #endif
190 
191 #include <asm-generic/cmpxchg-local.h>
192 
193 /*
194  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
195  * them available.
196  */
197 #define cmpxchg_local(ptr, o, n)				  	       \
198 	((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
199 			(unsigned long)(n), sizeof(*(ptr))))
200 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
201 
202 #ifndef CONFIG_SMP
203 #include <asm-generic/cmpxchg.h>
204 #endif
205 
206 #if defined( CONFIG_M68328 ) || defined( CONFIG_M68EZ328 ) || \
207 	defined (CONFIG_M68360) || defined( CONFIG_M68VZ328 )
208 #define HARD_RESET_NOW() ({		\
209         local_irq_disable();		\
210         asm("				\
211         moveal #0x10c00000, %a0;	\
212         moveb #0, 0xFFFFF300;		\
213         moveal 0(%a0), %sp;		\
214         moveal 4(%a0), %a0;		\
215         jmp (%a0);			\
216         ");				\
217 })
218 #endif
219 
220 #ifdef CONFIG_COLDFIRE
221 #if defined(CONFIG_M5272) && defined(CONFIG_NETtel)
222 /*
223  * Need to account for broken early mask of 5272 silicon. So don't
224  * jump through the original start address. Jump strait into the
225  * known start of the FLASH code.
226  */
227 #define HARD_RESET_NOW() ({		\
228         asm("				\
229 	movew #0x2700, %sr;		\
230         jmp 0xf0000400;			\
231         ");				\
232 })
233 #elif defined(CONFIG_NETtel) || \
234       defined(CONFIG_SECUREEDGEMP3) || defined(CONFIG_CLEOPATRA)
235 #define HARD_RESET_NOW() ({		\
236         asm("				\
237 	movew #0x2700, %sr;		\
238 	moveal #0x10000044, %a0;	\
239 	movel #0xffffffff, (%a0);	\
240 	moveal #0x10000001, %a0;	\
241 	moveb #0x00, (%a0);		\
242         moveal #0xf0000004, %a0;	\
243         moveal (%a0), %a0;		\
244         jmp (%a0);			\
245         ");				\
246 })
247 #elif defined(CONFIG_M5272)
248 /*
249  * Retrieve the boot address in flash using CSBR0 and CSOR0
250  * find the reset vector at flash_address + 4 (e.g. 0x400)
251  * remap it in the flash's current location (e.g. 0xf0000400)
252  * and jump there.
253  */
254 #define HARD_RESET_NOW() ({		\
255 	asm("				\
256 	movew #0x2700, %%sr;		\
257 	move.l	%0+0x40,%%d0;		\
258 	and.l	%0+0x44,%%d0;		\
259 	andi.l	#0xfffff000,%%d0;	\
260 	mov.l	%%d0,%%a0;		\
261 	or.l	4(%%a0),%%d0;		\
262 	mov.l	%%d0,%%a0;		\
263 	jmp (%%a0);"			\
264 	: /* No output */		\
265 	: "o" (*(char *)MCF_MBAR) );	\
266 })
267 #elif defined(CONFIG_M528x)
268 /*
269  * The MCF528x has a bit (SOFTRST) in memory (Reset Control Register RCR),
270  * that when set, resets the MCF528x.
271  */
272 #define HARD_RESET_NOW() \
273 ({						\
274 	unsigned char volatile *reset;		\
275 	asm("move.w	#0x2700, %sr");		\
276 	reset = ((volatile unsigned char *)(MCF_IPSBAR + 0x110000));	\
277 	while(1)				\
278 	*reset |= (0x01 << 7);\
279 })
280 #elif defined(CONFIG_M523x)
281 #define HARD_RESET_NOW() ({		\
282 	asm("				\
283 	movew #0x2700, %sr;		\
284 	movel #0x01000000, %sp;		\
285 	moveal #0x40110000, %a0;	\
286 	moveb #0x80, (%a0);		\
287 	");				\
288 })
289 #elif defined(CONFIG_M520x)
290 	/*
291 	 * The MCF5208 has a bit (SOFTRST) in memory (Reset Control Register
292 	 * RCR), that when set, resets the MCF5208.
293 	 */
294 #define HARD_RESET_NOW() 		\
295 ({					\
296 	unsigned char volatile *reset;	\
297 	asm("move.w     #0x2700, %sr");	\
298 	reset = ((volatile unsigned char *)(MCF_IPSBAR + 0xA0000));	\
299 	while(1)			\
300 		*reset |= 0x80;		\
301 })
302 #else
303 #define HARD_RESET_NOW() ({		\
304         asm("				\
305 	movew #0x2700, %sr;		\
306         moveal #0x4, %a0;		\
307         moveal (%a0), %a0;		\
308         jmp (%a0);			\
309         ");				\
310 })
311 #endif
312 #endif
313 #define arch_align_stack(x) (x)
314 
315 
irqs_disabled_flags(unsigned long flags)316 static inline int irqs_disabled_flags(unsigned long flags)
317 {
318 	if (flags & 0x0700)
319 		return 0;
320 	else
321 		return 1;
322 }
323 
324 #endif /* _M68KNOMMU_SYSTEM_H */
325