• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __SPARC64_SYSTEM_H
2 #define __SPARC64_SYSTEM_H
3 
4 #include <asm/ptrace.h>
5 #include <asm/processor.h>
6 #include <asm/visasm.h>
7 
8 #ifndef __ASSEMBLY__
9 
10 #include <linux/irqflags.h>
11 #include <asm-generic/cmpxchg-local.h>
12 
13 /*
14  * Sparc (general) CPU types
15  */
16 enum sparc_cpu {
17   sun4        = 0x00,
18   sun4c       = 0x01,
19   sun4m       = 0x02,
20   sun4d       = 0x03,
21   sun4e       = 0x04,
22   sun4u       = 0x05, /* V8 ploos ploos */
23   sun_unknown = 0x06,
24   ap1000      = 0x07, /* almost a sun4m */
25 };
26 
27 #define sparc_cpu_model sun4u
28 
29 /* This cannot ever be a sun4c :) That's just history. */
30 #define ARCH_SUN4C 0
31 
32 extern char reboot_command[];
33 
34 /* These are here in an effort to more fully work around Spitfire Errata
35  * #51.  Essentially, if a memory barrier occurs soon after a mispredicted
36  * branch, the chip can stop executing instructions until a trap occurs.
37  * Therefore, if interrupts are disabled, the chip can hang forever.
38  *
39  * It used to be believed that the memory barrier had to be right in the
40  * delay slot, but a case has been traced recently wherein the memory barrier
41  * was one instruction after the branch delay slot and the chip still hung.
42  * The offending sequence was the following in sym_wakeup_done() of the
43  * sym53c8xx_2 driver:
44  *
45  *	call	sym_ccb_from_dsa, 0
46  *	 movge	%icc, 0, %l0
47  *	brz,pn	%o0, .LL1303
48  *	 mov	%o0, %l2
49  *	membar	#LoadLoad
50  *
51  * The branch has to be mispredicted for the bug to occur.  Therefore, we put
52  * the memory barrier explicitly into a "branch always, predicted taken"
53  * delay slot to avoid the problem case.
54  */
55 #define membar_safe(type) \
56 do {	__asm__ __volatile__("ba,pt	%%xcc, 1f\n\t" \
57 			     " membar	" type "\n" \
58 			     "1:\n" \
59 			     : : : "memory"); \
60 } while (0)
61 
62 #define mb()	membar_safe("#StoreLoad")
63 #define rmb()	__asm__ __volatile__("":::"memory")
64 #define wmb()	__asm__ __volatile__("":::"memory")
65 
66 #endif
67 
68 #define nop() 		__asm__ __volatile__ ("nop")
69 
70 #define read_barrier_depends()		do { } while(0)
71 #define set_mb(__var, __value) \
72 	do { __var = __value; membar_safe("#StoreLoad"); } while(0)
73 
74 #ifdef CONFIG_SMP
75 #define smp_mb()	mb()
76 #define smp_rmb()	rmb()
77 #define smp_wmb()	wmb()
78 #else
79 #define smp_mb()	__asm__ __volatile__("":::"memory")
80 #define smp_rmb()	__asm__ __volatile__("":::"memory")
81 #define smp_wmb()	__asm__ __volatile__("":::"memory")
82 #endif
83 
84 #define smp_read_barrier_depends()	do { } while(0)
85 
86 #define flushi(addr)	__asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
87 
88 #define flushw_all()	__asm__ __volatile__("flushw")
89 
90 /* Performance counter register access. */
91 #define read_pcr(__p)  __asm__ __volatile__("rd	%%pcr, %0" : "=r" (__p))
92 #define write_pcr(__p) __asm__ __volatile__("wr	%0, 0x0, %%pcr" : : "r" (__p))
93 #define read_pic(__p)  __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
94 
95 /* Blackbird errata workaround.  See commentary in
96  * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
97  * for more information.
98  */
99 #define write_pic(__p)  					\
100 	__asm__ __volatile__("ba,pt	%%xcc, 99f\n\t"		\
101 			     ".align	64\n"			\
102 			  "99:wr	%0, 0x0, %%pic\n\t"	\
103 			     "rd	%%pic, %%g0" : : "r" (__p))
104 #define reset_pic()	write_pic(0)
105 
106 #ifndef __ASSEMBLY__
107 
108 extern void sun_do_break(void);
109 extern int stop_a_enabled;
110 extern int scons_pwroff;
111 
112 extern void fault_in_user_windows(void);
113 extern void synchronize_user_stack(void);
114 
115 extern void __flushw_user(void);
116 #define flushw_user() __flushw_user()
117 
118 #define flush_user_windows flushw_user
119 #define flush_register_windows flushw_all
120 
121 /* Don't hold the runqueue lock over context switch */
122 #define __ARCH_WANT_UNLOCKED_CTXSW
123 #define prepare_arch_switch(next)		\
124 do {						\
125 	flushw_all();				\
126 } while (0)
127 
128 	/* See what happens when you design the chip correctly?
129 	 *
130 	 * We tell gcc we clobber all non-fixed-usage registers except
131 	 * for l0/l1.  It will use one for 'next' and the other to hold
132 	 * the output value of 'last'.  'next' is not referenced again
133 	 * past the invocation of switch_to in the scheduler, so we need
134 	 * not preserve it's value.  Hairy, but it lets us remove 2 loads
135 	 * and 2 stores in this critical code path.  -DaveM
136 	 */
137 #define switch_to(prev, next, last)					\
138 do {	if (test_thread_flag(TIF_PERFCTR)) {				\
139 		unsigned long __tmp;					\
140 		read_pcr(__tmp);					\
141 		current_thread_info()->pcr_reg = __tmp;			\
142 		read_pic(__tmp);					\
143 		current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
144 		current_thread_info()->kernel_cntd1 += ((__tmp) >> 32);	\
145 	}								\
146 	flush_tlb_pending();						\
147 	save_and_clear_fpu();						\
148 	/* If you are tempted to conditionalize the following */	\
149 	/* so that ASI is only written if it changes, think again. */	\
150 	__asm__ __volatile__("wr %%g0, %0, %%asi"			\
151 	: : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
152 	trap_block[current_thread_info()->cpu].thread =			\
153 		task_thread_info(next);					\
154 	__asm__ __volatile__(						\
155 	"mov	%%g4, %%g7\n\t"						\
156 	"stx	%%i6, [%%sp + 2047 + 0x70]\n\t"				\
157 	"stx	%%i7, [%%sp + 2047 + 0x78]\n\t"				\
158 	"rdpr	%%wstate, %%o5\n\t"					\
159 	"stx	%%o6, [%%g6 + %6]\n\t"					\
160 	"stb	%%o5, [%%g6 + %5]\n\t"					\
161 	"rdpr	%%cwp, %%o5\n\t"					\
162 	"stb	%%o5, [%%g6 + %8]\n\t"					\
163 	"wrpr	%%g0, 15, %%pil\n\t"					\
164 	"mov	%4, %%g6\n\t"						\
165 	"ldub	[%4 + %8], %%g1\n\t"					\
166 	"wrpr	%%g1, %%cwp\n\t"					\
167 	"ldx	[%%g6 + %6], %%o6\n\t"					\
168 	"ldub	[%%g6 + %5], %%o5\n\t"					\
169 	"ldub	[%%g6 + %7], %%o7\n\t"					\
170 	"wrpr	%%o5, 0x0, %%wstate\n\t"				\
171 	"ldx	[%%sp + 2047 + 0x70], %%i6\n\t"				\
172 	"ldx	[%%sp + 2047 + 0x78], %%i7\n\t"				\
173 	"ldx	[%%g6 + %9], %%g4\n\t"					\
174 	"wrpr	%%g0, 14, %%pil\n\t"					\
175 	"brz,pt %%o7, switch_to_pc\n\t"					\
176 	" mov	%%g7, %0\n\t"						\
177 	"sethi	%%hi(ret_from_syscall), %%g1\n\t"			\
178 	"jmpl	%%g1 + %%lo(ret_from_syscall), %%g0\n\t"		\
179 	" nop\n\t"							\
180 	".globl switch_to_pc\n\t"					\
181 	"switch_to_pc:\n\t"						\
182 	: "=&r" (last), "=r" (current), "=r" (current_thread_info_reg),	\
183 	  "=r" (__local_per_cpu_offset)					\
184 	: "0" (task_thread_info(next)),					\
185 	  "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD),            \
186 	  "i" (TI_CWP), "i" (TI_TASK)					\
187 	: "cc",								\
188 	        "g1", "g2", "g3",                   "g7",		\
189 	        "l1", "l2", "l3", "l4", "l5", "l6", "l7",		\
190 	  "i0", "i1", "i2", "i3", "i4", "i5",				\
191 	  "o0", "o1", "o2", "o3", "o4", "o5",       "o7");		\
192 	/* If you fuck with this, update ret_from_syscall code too. */	\
193 	if (test_thread_flag(TIF_PERFCTR)) {				\
194 		write_pcr(current_thread_info()->pcr_reg);		\
195 		reset_pic();						\
196 	}								\
197 } while(0)
198 
xchg32(__volatile__ unsigned int * m,unsigned int val)199 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
200 {
201 	unsigned long tmp1, tmp2;
202 
203 	__asm__ __volatile__(
204 "	mov		%0, %1\n"
205 "1:	lduw		[%4], %2\n"
206 "	cas		[%4], %2, %0\n"
207 "	cmp		%2, %0\n"
208 "	bne,a,pn	%%icc, 1b\n"
209 "	 mov		%1, %0\n"
210 	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
211 	: "0" (val), "r" (m)
212 	: "cc", "memory");
213 	return val;
214 }
215 
xchg64(__volatile__ unsigned long * m,unsigned long val)216 static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
217 {
218 	unsigned long tmp1, tmp2;
219 
220 	__asm__ __volatile__(
221 "	mov		%0, %1\n"
222 "1:	ldx		[%4], %2\n"
223 "	casx		[%4], %2, %0\n"
224 "	cmp		%2, %0\n"
225 "	bne,a,pn	%%xcc, 1b\n"
226 "	 mov		%1, %0\n"
227 	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
228 	: "0" (val), "r" (m)
229 	: "cc", "memory");
230 	return val;
231 }
232 
233 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
234 
235 extern void __xchg_called_with_bad_pointer(void);
236 
__xchg(unsigned long x,__volatile__ void * ptr,int size)237 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
238 				       int size)
239 {
240 	switch (size) {
241 	case 4:
242 		return xchg32(ptr, x);
243 	case 8:
244 		return xchg64(ptr, x);
245 	};
246 	__xchg_called_with_bad_pointer();
247 	return x;
248 }
249 
250 extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
251 
252 /*
253  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
254  * store NEW in MEM.  Return the initial value in MEM.  Success is
255  * indicated by comparing RETURN with OLD.
256  */
257 
258 #define __HAVE_ARCH_CMPXCHG 1
259 
260 static inline unsigned long
__cmpxchg_u32(volatile int * m,int old,int new)261 __cmpxchg_u32(volatile int *m, int old, int new)
262 {
263 	__asm__ __volatile__("cas [%2], %3, %0"
264 			     : "=&r" (new)
265 			     : "0" (new), "r" (m), "r" (old)
266 			     : "memory");
267 
268 	return new;
269 }
270 
271 static inline unsigned long
__cmpxchg_u64(volatile long * m,unsigned long old,unsigned long new)272 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
273 {
274 	__asm__ __volatile__("casx [%2], %3, %0"
275 			     : "=&r" (new)
276 			     : "0" (new), "r" (m), "r" (old)
277 			     : "memory");
278 
279 	return new;
280 }
281 
282 /* This function doesn't exist, so you'll get a linker error
283    if something tries to do an invalid cmpxchg().  */
284 extern void __cmpxchg_called_with_bad_pointer(void);
285 
286 static inline unsigned long
__cmpxchg(volatile void * ptr,unsigned long old,unsigned long new,int size)287 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
288 {
289 	switch (size) {
290 		case 4:
291 			return __cmpxchg_u32(ptr, old, new);
292 		case 8:
293 			return __cmpxchg_u64(ptr, old, new);
294 	}
295 	__cmpxchg_called_with_bad_pointer();
296 	return old;
297 }
298 
299 #define cmpxchg(ptr,o,n)						 \
300   ({									 \
301      __typeof__(*(ptr)) _o_ = (o);					 \
302      __typeof__(*(ptr)) _n_ = (n);					 \
303      (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
304 				    (unsigned long)_n_, sizeof(*(ptr))); \
305   })
306 
307 /*
308  * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
309  * them available.
310  */
311 
__cmpxchg_local(volatile void * ptr,unsigned long old,unsigned long new,int size)312 static inline unsigned long __cmpxchg_local(volatile void *ptr,
313 				      unsigned long old,
314 				      unsigned long new, int size)
315 {
316 	switch (size) {
317 	case 4:
318 	case 8:	return __cmpxchg(ptr, old, new, size);
319 	default:
320 		return __cmpxchg_local_generic(ptr, old, new, size);
321 	}
322 
323 	return old;
324 }
325 
326 #define cmpxchg_local(ptr, o, n)				  	\
327 	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
328 			(unsigned long)(n), sizeof(*(ptr))))
329 #define cmpxchg64_local(ptr, o, n)					\
330   ({									\
331 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
332 	cmpxchg_local((ptr), (o), (n));					\
333   })
334 
335 #endif /* !(__ASSEMBLY__) */
336 
337 #define arch_align_stack(x) (x)
338 
339 #endif /* !(__SPARC64_SYSTEM_H) */
340