• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * arch/sh/kernel/cpu/sh5/entry.S
3 *
4 * Copyright (C) 2000, 2001  Paolo Alberelli
5 * Copyright (C) 2004 - 2008  Paul Mundt
6 * Copyright (C) 2003, 2004  Richard Curnow
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License.  See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/errno.h>
13#include <linux/sys.h>
14#include <cpu/registers.h>
15#include <asm/processor.h>
16#include <asm/unistd.h>
17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h>
19
20/*
21 * SR fields.
22 */
23#define SR_ASID_MASK	0x00ff0000
24#define SR_FD_MASK	0x00008000
25#define SR_SS		0x08000000
26#define SR_BL		0x10000000
27#define SR_MD		0x40000000
28
29/*
30 * Event code.
31 */
32#define	EVENT_INTERRUPT		0
33#define	EVENT_FAULT_TLB		1
34#define	EVENT_FAULT_NOT_TLB	2
35#define	EVENT_DEBUG		3
36
37/* EXPEVT values */
38#define	RESET_CAUSE		0x20
39#define DEBUGSS_CAUSE		0x980
40
41/*
42 * Frame layout. Quad index.
43 */
44#define	FRAME_T(x)	FRAME_TBASE+(x*8)
45#define	FRAME_R(x)	FRAME_RBASE+(x*8)
46#define	FRAME_S(x)	FRAME_SBASE+(x*8)
47#define FSPC		0
48#define FSSR		1
49#define FSYSCALL_ID	2
50
51/* Arrange the save frame to be a multiple of 32 bytes long */
52#define FRAME_SBASE	0
53#define FRAME_RBASE	(FRAME_SBASE+(3*8))	/* SYSCALL_ID - SSR - SPC */
54#define FRAME_TBASE	(FRAME_RBASE+(63*8))	/* r0 - r62 */
55#define FRAME_PBASE	(FRAME_TBASE+(8*8))	/* tr0 -tr7 */
56#define	FRAME_SIZE	(FRAME_PBASE+(2*8))	/* pad0-pad1 */
57
58#define FP_FRAME_SIZE	FP_FRAME_BASE+(33*8)	/* dr0 - dr31 + fpscr */
59#define FP_FRAME_BASE	0
60
61#define	SAVED_R2	0*8
62#define	SAVED_R3	1*8
63#define	SAVED_R4	2*8
64#define	SAVED_R5	3*8
65#define	SAVED_R18	4*8
66#define	SAVED_R6	5*8
67#define	SAVED_TR0	6*8
68
69/* These are the registers saved in the TLB path that aren't saved in the first
70   level of the normal one. */
71#define	TLB_SAVED_R25	7*8
72#define	TLB_SAVED_TR1	8*8
73#define	TLB_SAVED_TR2	9*8
74#define	TLB_SAVED_TR3	10*8
75#define	TLB_SAVED_TR4	11*8
76/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
77   breakage otherwise. */
78#define	TLB_SAVED_R0	12*8
79#define	TLB_SAVED_R1	13*8
80
81#define CLI()				\
82	getcon	SR, r6;			\
83	ori	r6, 0xf0, r6;		\
84	putcon	r6, SR;
85
86#define STI()				\
87	getcon	SR, r6;			\
88	andi	r6, ~0xf0, r6;		\
89	putcon	r6, SR;
90
91#ifdef CONFIG_PREEMPT
92#  define preempt_stop()	CLI()
93#else
94#  define preempt_stop()
95#  define resume_kernel		restore_all
96#endif
97
98	.section	.data, "aw"
99
100#define FAST_TLBMISS_STACK_CACHELINES 4
101#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
102
103/* Register back-up area for all exceptions */
104	.balign	32
105	/* Allow for 16 quadwords to be pushed by fast tlbmiss handling
106	 * register saves etc. */
107	.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
108/* This is 32 byte aligned by construction */
109/* Register back-up area for all exceptions */
110reg_save_area:
111	.quad	0
112	.quad	0
113	.quad	0
114	.quad	0
115
116	.quad	0
117	.quad	0
118	.quad	0
119	.quad	0
120
121	.quad	0
122	.quad	0
123	.quad	0
124	.quad	0
125
126	.quad	0
127	.quad   0
128
129/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
130 * reentrancy. Note this area may be accessed via physical address.
131 * Align so this fits a whole single cache line, for ease of purging.
132 */
133	.balign 32,0,32
134resvec_save_area:
135	.quad	0
136	.quad	0
137	.quad	0
138	.quad	0
139	.quad	0
140	.balign 32,0,32
141
142/* Jump table of 3rd level handlers  */
143trap_jtable:
144	.long	do_exception_error		/* 0x000 */
145	.long	do_exception_error		/* 0x020 */
146#ifdef CONFIG_MMU
147	.long	tlb_miss_load				/* 0x040 */
148	.long	tlb_miss_store				/* 0x060 */
149#else
150	.long	do_exception_error
151	.long	do_exception_error
152#endif
153	! ARTIFICIAL pseudo-EXPEVT setting
154	.long	do_debug_interrupt		/* 0x080 */
155#ifdef CONFIG_MMU
156	.long	tlb_miss_load				/* 0x0A0 */
157	.long	tlb_miss_store				/* 0x0C0 */
158#else
159	.long	do_exception_error
160	.long	do_exception_error
161#endif
162	.long	do_address_error_load	/* 0x0E0 */
163	.long	do_address_error_store	/* 0x100 */
164#ifdef CONFIG_SH_FPU
165	.long	do_fpu_error		/* 0x120 */
166#else
167	.long	do_exception_error		/* 0x120 */
168#endif
169	.long	do_exception_error		/* 0x140 */
170	.long	system_call				/* 0x160 */
171	.long	do_reserved_inst		/* 0x180 */
172	.long	do_illegal_slot_inst	/* 0x1A0 */
173	.long	do_exception_error		/* 0x1C0 - NMI */
174	.long	do_exception_error		/* 0x1E0 */
175	.rept 15
176		.long do_IRQ		/* 0x200 - 0x3C0 */
177	.endr
178	.long	do_exception_error		/* 0x3E0 */
179	.rept 32
180		.long do_IRQ		/* 0x400 - 0x7E0 */
181	.endr
182	.long	fpu_error_or_IRQA			/* 0x800 */
183	.long	fpu_error_or_IRQB			/* 0x820 */
184	.long	do_IRQ			/* 0x840 */
185	.long	do_IRQ			/* 0x860 */
186	.rept 6
187		.long do_exception_error	/* 0x880 - 0x920 */
188	.endr
189	.long	do_software_break_point	/* 0x940 */
190	.long	do_exception_error		/* 0x960 */
191	.long	do_single_step		/* 0x980 */
192
193	.rept 3
194		.long do_exception_error	/* 0x9A0 - 0x9E0 */
195	.endr
196	.long	do_IRQ			/* 0xA00 */
197	.long	do_IRQ			/* 0xA20 */
198#ifdef CONFIG_MMU
199	.long	itlb_miss_or_IRQ			/* 0xA40 */
200#else
201	.long	do_IRQ
202#endif
203	.long	do_IRQ			/* 0xA60 */
204	.long	do_IRQ			/* 0xA80 */
205#ifdef CONFIG_MMU
206	.long	itlb_miss_or_IRQ			/* 0xAA0 */
207#else
208	.long	do_IRQ
209#endif
210	.long	do_exception_error		/* 0xAC0 */
211	.long	do_address_error_exec	/* 0xAE0 */
212	.rept 8
213		.long do_exception_error	/* 0xB00 - 0xBE0 */
214	.endr
215	.rept 18
216		.long do_IRQ		/* 0xC00 - 0xE20 */
217	.endr
218
219	.section	.text64, "ax"
220
221/*
222 * --- Exception/Interrupt/Event Handling Section
223 */
224
225/*
226 * VBR and RESVEC blocks.
227 *
228 * First level handler for VBR-based exceptions.
229 *
230 * To avoid waste of space, align to the maximum text block size.
231 * This is assumed to be at most 128 bytes or 32 instructions.
232 * DO NOT EXCEED 32 instructions on the first level handlers !
233 *
234 * Also note that RESVEC is contained within the VBR block
235 * where the room left (1KB - TEXT_SIZE) allows placing
236 * the RESVEC block (at most 512B + TEXT_SIZE).
237 *
238 * So first (and only) level handler for RESVEC-based exceptions.
239 *
240 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
241 * and interrupt) we are a lot tight with register space until
242 * saving onto the stack frame, which is done in handle_exception().
243 *
244 */
245
246#define	TEXT_SIZE 	128
247#define	BLOCK_SIZE 	1664 		/* Dynamic check, 13*128 */
248
249	.balign TEXT_SIZE
250LVBR_block:
251	.space	256, 0			/* Power-on class handler, */
252					/* not required here       */
253not_a_tlb_miss:
254	synco	/* TAKum03020 (but probably a good idea anyway.) */
255	/* Save original stack pointer into KCR1 */
256	putcon	SP, KCR1
257
258	/* Save other original registers into reg_save_area */
259        movi  reg_save_area, SP
260	st.q	SP, SAVED_R2, r2
261	st.q	SP, SAVED_R3, r3
262	st.q	SP, SAVED_R4, r4
263	st.q	SP, SAVED_R5, r5
264	st.q	SP, SAVED_R6, r6
265	st.q	SP, SAVED_R18, r18
266	gettr	tr0, r3
267	st.q	SP, SAVED_TR0, r3
268
269	/* Set args for Non-debug, Not a TLB miss class handler */
270	getcon	EXPEVT, r2
271	movi	ret_from_exception, r3
272	ori	r3, 1, r3
273	movi	EVENT_FAULT_NOT_TLB, r4
274	or	SP, ZERO, r5
275	getcon	KCR1, SP
276	pta	handle_exception, tr0
277	blink	tr0, ZERO
278
279	.balign 256
280	! VBR+0x200
281	nop
282	.balign 256
283	! VBR+0x300
284	nop
285	.balign 256
286	/*
287	 * Instead of the natural .balign 1024 place RESVEC here
288	 * respecting the final 1KB alignment.
289	 */
290	.balign TEXT_SIZE
291	/*
292	 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
293	 * block making sure the final alignment is correct.
294	 */
295#ifdef CONFIG_MMU
296tlb_miss:
297	synco	/* TAKum03020 (but probably a good idea anyway.) */
298	putcon	SP, KCR1
299	movi	reg_save_area, SP
300	/* SP is guaranteed 32-byte aligned. */
301	st.q	SP, TLB_SAVED_R0 , r0
302	st.q	SP, TLB_SAVED_R1 , r1
303	st.q	SP, SAVED_R2 , r2
304	st.q	SP, SAVED_R3 , r3
305	st.q	SP, SAVED_R4 , r4
306	st.q	SP, SAVED_R5 , r5
307	st.q	SP, SAVED_R6 , r6
308	st.q	SP, SAVED_R18, r18
309
310	/* Save R25 for safety; as/ld may want to use it to achieve the call to
311	 * the code in mm/tlbmiss.c */
312	st.q	SP, TLB_SAVED_R25, r25
313	gettr	tr0, r2
314	gettr	tr1, r3
315	gettr	tr2, r4
316	gettr	tr3, r5
317	gettr	tr4, r18
318	st.q	SP, SAVED_TR0 , r2
319	st.q	SP, TLB_SAVED_TR1 , r3
320	st.q	SP, TLB_SAVED_TR2 , r4
321	st.q	SP, TLB_SAVED_TR3 , r5
322	st.q	SP, TLB_SAVED_TR4 , r18
323
324	pt	do_fast_page_fault, tr0
325	getcon	SSR, r2
326	getcon	EXPEVT, r3
327	getcon	TEA, r4
328	shlri	r2, 30, r2
329	andi	r2, 1, r2	/* r2 = SSR.MD */
330	blink 	tr0, LINK
331
332	pt	fixup_to_invoke_general_handler, tr1
333
334	/* If the fast path handler fixed the fault, just drop through quickly
335	   to the restore code right away to return to the excepting context.
336	   */
337	beqi/u	r2, 0, tr1
338
339fast_tlb_miss_restore:
340	ld.q	SP, SAVED_TR0, r2
341	ld.q	SP, TLB_SAVED_TR1, r3
342	ld.q	SP, TLB_SAVED_TR2, r4
343
344	ld.q	SP, TLB_SAVED_TR3, r5
345	ld.q	SP, TLB_SAVED_TR4, r18
346
347	ptabs	r2, tr0
348	ptabs	r3, tr1
349	ptabs	r4, tr2
350	ptabs	r5, tr3
351	ptabs	r18, tr4
352
353	ld.q	SP, TLB_SAVED_R0, r0
354	ld.q	SP, TLB_SAVED_R1, r1
355	ld.q	SP, SAVED_R2, r2
356	ld.q	SP, SAVED_R3, r3
357	ld.q	SP, SAVED_R4, r4
358	ld.q	SP, SAVED_R5, r5
359	ld.q	SP, SAVED_R6, r6
360	ld.q	SP, SAVED_R18, r18
361	ld.q	SP, TLB_SAVED_R25, r25
362
363	getcon	KCR1, SP
364	rte
365	nop /* for safety, in case the code is run on sh5-101 cut1.x */
366
367fixup_to_invoke_general_handler:
368
369	/* OK, new method.  Restore stuff that's not expected to get saved into
370	   the 'first-level' reg save area, then just fall through to setting
371	   up the registers and calling the second-level handler. */
372
373	/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved.  So we must restore
374	   r25,tr1-4 and save r6 to get into the right state.  */
375
376	ld.q	SP, TLB_SAVED_TR1, r3
377	ld.q	SP, TLB_SAVED_TR2, r4
378	ld.q	SP, TLB_SAVED_TR3, r5
379	ld.q	SP, TLB_SAVED_TR4, r18
380	ld.q	SP, TLB_SAVED_R25, r25
381
382	ld.q	SP, TLB_SAVED_R0, r0
383	ld.q	SP, TLB_SAVED_R1, r1
384
385	ptabs/u	r3, tr1
386	ptabs/u	r4, tr2
387	ptabs/u	r5, tr3
388	ptabs/u	r18, tr4
389
390	/* Set args for Non-debug, TLB miss class handler */
391	getcon	EXPEVT, r2
392	movi	ret_from_exception, r3
393	ori	r3, 1, r3
394	movi	EVENT_FAULT_TLB, r4
395	or	SP, ZERO, r5
396	getcon	KCR1, SP
397	pta	handle_exception, tr0
398	blink	tr0, ZERO
399#else /* CONFIG_MMU */
400	.balign 256
401#endif
402
403/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
404   DOES END UP AT VBR+0x600 */
405	nop
406	nop
407	nop
408	nop
409	nop
410	nop
411
412	.balign 256
413	/* VBR + 0x600 */
414
415interrupt:
416	synco	/* TAKum03020 (but probably a good idea anyway.) */
417	/* Save original stack pointer into KCR1 */
418	putcon	SP, KCR1
419
420	/* Save other original registers into reg_save_area */
421        movi  reg_save_area, SP
422	st.q	SP, SAVED_R2, r2
423	st.q	SP, SAVED_R3, r3
424	st.q	SP, SAVED_R4, r4
425	st.q	SP, SAVED_R5, r5
426	st.q	SP, SAVED_R6, r6
427	st.q	SP, SAVED_R18, r18
428	gettr	tr0, r3
429	st.q	SP, SAVED_TR0, r3
430
431	/* Set args for interrupt class handler */
432	getcon	INTEVT, r2
433	movi	ret_from_irq, r3
434	ori	r3, 1, r3
435	movi	EVENT_INTERRUPT, r4
436	or	SP, ZERO, r5
437	getcon	KCR1, SP
438	pta	handle_exception, tr0
439	blink	tr0, ZERO
440	.balign	TEXT_SIZE		/* let's waste the bare minimum */
441
442LVBR_block_end:				/* Marker. Used for total checking */
443
444	.balign 256
445LRESVEC_block:
446	/* Panic handler. Called with MMU off. Possible causes/actions:
447	 * - Reset:		Jump to program start.
448	 * - Single Step:	Turn off Single Step & return.
449	 * - Others:		Call panic handler, passing PC as arg.
450	 *			(this may need to be extended...)
451	 */
452reset_or_panic:
453	synco	/* TAKum03020 (but probably a good idea anyway.) */
454	putcon	SP, DCR
455	/* First save r0-1 and tr0, as we need to use these */
456	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
457	st.q	SP, 0, r0
458	st.q	SP, 8, r1
459	gettr	tr0, r0
460	st.q	SP, 32, r0
461
462	/* Check cause */
463	getcon	EXPEVT, r0
464	movi	RESET_CAUSE, r1
465	sub	r1, r0, r1		/* r1=0 if reset */
466	movi	_stext-CONFIG_PAGE_OFFSET, r0
467	ori	r0, 1, r0
468	ptabs	r0, tr0
469	beqi	r1, 0, tr0		/* Jump to start address if reset */
470
471	getcon	EXPEVT, r0
472	movi	DEBUGSS_CAUSE, r1
473	sub	r1, r0, r1		/* r1=0 if single step */
474	pta	single_step_panic, tr0
475	beqi	r1, 0, tr0		/* jump if single step */
476
477	/* Now jump to where we save the registers. */
478	movi	panic_stash_regs-CONFIG_PAGE_OFFSET, r1
479	ptabs	r1, tr0
480	blink	tr0, r63
481
482single_step_panic:
483	/* We are in a handler with Single Step set. We need to resume the
484	 * handler, by turning on MMU & turning off Single Step. */
485	getcon	SSR, r0
486	movi	SR_MMU, r1
487	or	r0, r1, r0
488	movi	~SR_SS, r1
489	and	r0, r1, r0
490	putcon	r0, SSR
491	/* Restore EXPEVT, as the rte won't do this */
492	getcon	PEXPEVT, r0
493	putcon	r0, EXPEVT
494	/* Restore regs */
495	ld.q	SP, 32, r0
496	ptabs	r0, tr0
497	ld.q	SP, 0, r0
498	ld.q	SP, 8, r1
499	getcon	DCR, SP
500	synco
501	rte
502
503
504	.balign	256
505debug_exception:
506	synco	/* TAKum03020 (but probably a good idea anyway.) */
507	/*
508	 * Single step/software_break_point first level handler.
509	 * Called with MMU off, so the first thing we do is enable it
510	 * by doing an rte with appropriate SSR.
511	 */
512	putcon	SP, DCR
513	/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
514	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
515
516	/* With the MMU off, we are bypassing the cache, so purge any
517         * data that will be made stale by the following stores.
518         */
519	ocbp	SP, 0
520	synco
521
522	st.q	SP, 0, r0
523	st.q	SP, 8, r1
524	getcon	SPC, r0
525	st.q	SP, 16, r0
526	getcon	SSR, r0
527	st.q	SP, 24, r0
528
529	/* Enable MMU, block exceptions, set priv mode, disable single step */
530	movi	SR_MMU | SR_BL | SR_MD, r1
531	or	r0, r1, r0
532	movi	~SR_SS, r1
533	and	r0, r1, r0
534	putcon	r0, SSR
535	/* Force control to debug_exception_2 when rte is executed */
536	movi	debug_exeception_2, r0
537	ori	r0, 1, r0      /* force SHmedia, just in case */
538	putcon	r0, SPC
539	getcon	DCR, SP
540	synco
541	rte
542debug_exeception_2:
543	/* Restore saved regs */
544	putcon	SP, KCR1
545	movi	resvec_save_area, SP
546	ld.q	SP, 24, r0
547	putcon	r0, SSR
548	ld.q	SP, 16, r0
549	putcon	r0, SPC
550	ld.q	SP, 0, r0
551	ld.q	SP, 8, r1
552
553	/* Save other original registers into reg_save_area */
554        movi  reg_save_area, SP
555	st.q	SP, SAVED_R2, r2
556	st.q	SP, SAVED_R3, r3
557	st.q	SP, SAVED_R4, r4
558	st.q	SP, SAVED_R5, r5
559	st.q	SP, SAVED_R6, r6
560	st.q	SP, SAVED_R18, r18
561	gettr	tr0, r3
562	st.q	SP, SAVED_TR0, r3
563
564	/* Set args for debug class handler */
565	getcon	EXPEVT, r2
566	movi	ret_from_exception, r3
567	ori	r3, 1, r3
568	movi	EVENT_DEBUG, r4
569	or	SP, ZERO, r5
570	getcon	KCR1, SP
571	pta	handle_exception, tr0
572	blink	tr0, ZERO
573
574	.balign	256
575debug_interrupt:
576	/* !!! WE COME HERE IN REAL MODE !!! */
577	/* Hook-up debug interrupt to allow various debugging options to be
578	 * hooked into its handler. */
579	/* Save original stack pointer into KCR1 */
580	synco
581	putcon	SP, KCR1
582	movi	resvec_save_area-CONFIG_PAGE_OFFSET, SP
583	ocbp	SP, 0
584	ocbp	SP, 32
585	synco
586
587	/* Save other original registers into reg_save_area thru real addresses */
588	st.q	SP, SAVED_R2, r2
589	st.q	SP, SAVED_R3, r3
590	st.q	SP, SAVED_R4, r4
591	st.q	SP, SAVED_R5, r5
592	st.q	SP, SAVED_R6, r6
593	st.q	SP, SAVED_R18, r18
594	gettr	tr0, r3
595	st.q	SP, SAVED_TR0, r3
596
597	/* move (spc,ssr)->(pspc,pssr).  The rte will shift
598	   them back again, so that they look like the originals
599	   as far as the real handler code is concerned. */
600	getcon	spc, r6
601	putcon	r6, pspc
602	getcon	ssr, r6
603	putcon	r6, pssr
604
605	! construct useful SR for handle_exception
606	movi	3, r6
607	shlli	r6, 30, r6
608	getcon	sr, r18
609	or	r18, r6, r6
610	putcon	r6, ssr
611
612	! SSR is now the current SR with the MD and MMU bits set
613	! i.e. the rte will switch back to priv mode and put
614	! the mmu back on
615
616	! construct spc
617	movi	handle_exception, r18
618	ori	r18, 1, r18		! for safety (do we need this?)
619	putcon	r18, spc
620
621	/* Set args for Non-debug, Not a TLB miss class handler */
622
623	! EXPEVT==0x80 is unused, so 'steal' this value to put the
624	! debug interrupt handler in the vectoring table
625	movi	0x80, r2
626	movi	ret_from_exception, r3
627	ori	r3, 1, r3
628	movi	EVENT_FAULT_NOT_TLB, r4
629
630	or	SP, ZERO, r5
631	movi	CONFIG_PAGE_OFFSET, r6
632	add	r6, r5, r5
633	getcon	KCR1, SP
634
635	synco	! for safety
636	rte	! -> handle_exception, switch back to priv mode again
637
638LRESVEC_block_end:			/* Marker. Unused. */
639
640	.balign	TEXT_SIZE
641
642/*
643 * Second level handler for VBR-based exceptions. Pre-handler.
644 * In common to all stack-frame sensitive handlers.
645 *
646 * Inputs:
647 * (KCR0) Current [current task union]
648 * (KCR1) Original SP
649 * (r2)   INTEVT/EXPEVT
650 * (r3)   appropriate return address
651 * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
652 * (r5)   Pointer to reg_save_area
653 * (SP)   Original SP
654 *
655 * Available registers:
656 * (r6)
657 * (r18)
658 * (tr0)
659 *
660 */
661handle_exception:
662	/* Common 2nd level handler. */
663
664	/* First thing we need an appropriate stack pointer */
665	getcon	SSR, r6
666	shlri	r6, 30, r6
667	andi	r6, 1, r6
668	pta	stack_ok, tr0
669	bne	r6, ZERO, tr0		/* Original stack pointer is fine */
670
671	/* Set stack pointer for user fault */
672	getcon	KCR0, SP
673	movi	THREAD_SIZE, r6		/* Point to the end */
674	add	SP, r6, SP
675
676stack_ok:
677
678/* DEBUG : check for underflow/overflow of the kernel stack */
679	pta	no_underflow, tr0
680	getcon  KCR0, r6
681	movi	1024, r18
682	add	r6, r18, r6
683	bge	SP, r6, tr0 	! ? below 1k from bottom of stack : danger zone
684
685/* Just panic to cause a crash. */
686bad_sp:
687	ld.b	r63, 0, r6
688	nop
689
690no_underflow:
691	pta	bad_sp, tr0
692	getcon	kcr0, r6
693	movi	THREAD_SIZE, r18
694	add	r18, r6, r6
695	bgt	SP, r6, tr0	! sp above the stack
696
697	/* Make some room for the BASIC frame. */
698	movi	-(FRAME_SIZE), r6
699	add	SP, r6, SP
700
701/* Could do this with no stalling if we had another spare register, but the
702   code below will be OK. */
703	ld.q	r5, SAVED_R2, r6
704	ld.q	r5, SAVED_R3, r18
705	st.q	SP, FRAME_R(2), r6
706	ld.q	r5, SAVED_R4, r6
707	st.q	SP, FRAME_R(3), r18
708	ld.q	r5, SAVED_R5, r18
709	st.q	SP, FRAME_R(4), r6
710	ld.q	r5, SAVED_R6, r6
711	st.q	SP, FRAME_R(5), r18
712	ld.q	r5, SAVED_R18, r18
713	st.q	SP, FRAME_R(6), r6
714	ld.q	r5, SAVED_TR0, r6
715	st.q	SP, FRAME_R(18), r18
716	st.q	SP, FRAME_T(0), r6
717
718	/* Keep old SP around */
719	getcon	KCR1, r6
720
721	/* Save the rest of the general purpose registers */
722	st.q	SP, FRAME_R(0), r0
723	st.q	SP, FRAME_R(1), r1
724	st.q	SP, FRAME_R(7), r7
725	st.q	SP, FRAME_R(8), r8
726	st.q	SP, FRAME_R(9), r9
727	st.q	SP, FRAME_R(10), r10
728	st.q	SP, FRAME_R(11), r11
729	st.q	SP, FRAME_R(12), r12
730	st.q	SP, FRAME_R(13), r13
731	st.q	SP, FRAME_R(14), r14
732
733	/* SP is somewhere else */
734	st.q	SP, FRAME_R(15), r6
735
736	st.q	SP, FRAME_R(16), r16
737	st.q	SP, FRAME_R(17), r17
738	/* r18 is saved earlier. */
739	st.q	SP, FRAME_R(19), r19
740	st.q	SP, FRAME_R(20), r20
741	st.q	SP, FRAME_R(21), r21
742	st.q	SP, FRAME_R(22), r22
743	st.q	SP, FRAME_R(23), r23
744	st.q	SP, FRAME_R(24), r24
745	st.q	SP, FRAME_R(25), r25
746	st.q	SP, FRAME_R(26), r26
747	st.q	SP, FRAME_R(27), r27
748	st.q	SP, FRAME_R(28), r28
749	st.q	SP, FRAME_R(29), r29
750	st.q	SP, FRAME_R(30), r30
751	st.q	SP, FRAME_R(31), r31
752	st.q	SP, FRAME_R(32), r32
753	st.q	SP, FRAME_R(33), r33
754	st.q	SP, FRAME_R(34), r34
755	st.q	SP, FRAME_R(35), r35
756	st.q	SP, FRAME_R(36), r36
757	st.q	SP, FRAME_R(37), r37
758	st.q	SP, FRAME_R(38), r38
759	st.q	SP, FRAME_R(39), r39
760	st.q	SP, FRAME_R(40), r40
761	st.q	SP, FRAME_R(41), r41
762	st.q	SP, FRAME_R(42), r42
763	st.q	SP, FRAME_R(43), r43
764	st.q	SP, FRAME_R(44), r44
765	st.q	SP, FRAME_R(45), r45
766	st.q	SP, FRAME_R(46), r46
767	st.q	SP, FRAME_R(47), r47
768	st.q	SP, FRAME_R(48), r48
769	st.q	SP, FRAME_R(49), r49
770	st.q	SP, FRAME_R(50), r50
771	st.q	SP, FRAME_R(51), r51
772	st.q	SP, FRAME_R(52), r52
773	st.q	SP, FRAME_R(53), r53
774	st.q	SP, FRAME_R(54), r54
775	st.q	SP, FRAME_R(55), r55
776	st.q	SP, FRAME_R(56), r56
777	st.q	SP, FRAME_R(57), r57
778	st.q	SP, FRAME_R(58), r58
779	st.q	SP, FRAME_R(59), r59
780	st.q	SP, FRAME_R(60), r60
781	st.q	SP, FRAME_R(61), r61
782	st.q	SP, FRAME_R(62), r62
783
784	/*
785	 * Save the S* registers.
786	 */
787	getcon	SSR, r61
788	st.q	SP, FRAME_S(FSSR), r61
789	getcon	SPC, r62
790	st.q	SP, FRAME_S(FSPC), r62
791	movi	-1, r62			/* Reset syscall_nr */
792	st.q	SP, FRAME_S(FSYSCALL_ID), r62
793
794	/* Save the rest of the target registers */
795	gettr	tr1, r6
796	st.q	SP, FRAME_T(1), r6
797	gettr	tr2, r6
798	st.q	SP, FRAME_T(2), r6
799	gettr	tr3, r6
800	st.q	SP, FRAME_T(3), r6
801	gettr	tr4, r6
802	st.q	SP, FRAME_T(4), r6
803	gettr	tr5, r6
804	st.q	SP, FRAME_T(5), r6
805	gettr	tr6, r6
806	st.q	SP, FRAME_T(6), r6
807	gettr	tr7, r6
808	st.q	SP, FRAME_T(7), r6
809
810	! setup FP so that unwinder can wind back through nested kernel mode
811	! exceptions
812	add	SP, ZERO, r14
813
814#ifdef CONFIG_POOR_MANS_STRACE
815	/* We've pushed all the registers now, so only r2-r4 hold anything
816	 * useful. Move them into callee save registers */
817	or	r2, ZERO, r28
818	or	r3, ZERO, r29
819	or	r4, ZERO, r30
820
821	/* Preserve r2 as the event code */
822	movi	evt_debug, r3
823	ori	r3, 1, r3
824	ptabs	r3, tr0
825
826	or	SP, ZERO, r6
827	getcon	TRA, r5
828	blink	tr0, LINK
829
830	or	r28, ZERO, r2
831	or	r29, ZERO, r3
832	or	r30, ZERO, r4
833#endif
834
835	/* For syscall and debug race condition, get TRA now */
836	getcon	TRA, r5
837
838	/* We are in a safe position to turn SR.BL off, but set IMASK=0xf
839	 * Also set FD, to catch FPU usage in the kernel.
840	 *
841	 * benedict.gaster@superh.com 29/07/2002
842	 *
843	 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
844	 * same time change BL from 1->0, as any pending interrupt of a level
845	 * higher than he previous value of IMASK will leak through and be
846	 * taken unexpectedly.
847	 *
848	 * To avoid this we raise the IMASK and then issue another PUTCON to
849	 * enable interrupts.
850         */
851	getcon	SR, r6
852	movi	SR_IMASK | SR_FD, r7
853	or	r6, r7, r6
854	putcon	r6, SR
855	movi	SR_UNBLOCK_EXC, r7
856	and	r6, r7, r6
857	putcon	r6, SR
858
859
860	/* Now call the appropriate 3rd level handler */
861	or	r3, ZERO, LINK
862	movi	trap_jtable, r3
863	shlri	r2, 3, r2
864	ldx.l	r2, r3, r3
865	shlri	r2, 2, r2
866	ptabs	r3, tr0
867	or	SP, ZERO, r3
868	blink	tr0, ZERO
869
870/*
871 * Second level handler for VBR-based exceptions. Post-handlers.
872 *
873 * Post-handlers for interrupts (ret_from_irq), exceptions
874 * (ret_from_exception) and common reentrance doors (restore_all
875 * to get back to the original context, ret_from_syscall loop to
876 * check kernel exiting).
877 *
878 * ret_with_reschedule and work_notifysig are an inner lables of
879 * the ret_from_syscall loop.
880 *
881 * In common to all stack-frame sensitive handlers.
882 *
883 * Inputs:
884 * (SP)   struct pt_regs *, original register's frame pointer (basic)
885 *
886 */
887	.global ret_from_irq
888ret_from_irq:
889#ifdef CONFIG_POOR_MANS_STRACE
890	pta	evt_debug_ret_from_irq, tr0
891	ori	SP, 0, r2
892	blink	tr0, LINK
893#endif
894	ld.q	SP, FRAME_S(FSSR), r6
895	shlri	r6, 30, r6
896	andi	r6, 1, r6
897	pta	resume_kernel, tr0
898	bne	r6, ZERO, tr0		/* no further checks */
899	STI()
900	pta	ret_with_reschedule, tr0
901	blink	tr0, ZERO		/* Do not check softirqs */
902
903	.global ret_from_exception
904ret_from_exception:
905	preempt_stop()
906
907#ifdef CONFIG_POOR_MANS_STRACE
908	pta	evt_debug_ret_from_exc, tr0
909	ori	SP, 0, r2
910	blink	tr0, LINK
911#endif
912
913	ld.q	SP, FRAME_S(FSSR), r6
914	shlri	r6, 30, r6
915	andi	r6, 1, r6
916	pta	resume_kernel, tr0
917	bne	r6, ZERO, tr0		/* no further checks */
918
919	/* Check softirqs */
920
921#ifdef CONFIG_PREEMPT
922	pta   ret_from_syscall, tr0
923	blink   tr0, ZERO
924
925resume_kernel:
926	CLI()
927
928	pta	restore_all, tr0
929
930	getcon	KCR0, r6
931	ld.l	r6, TI_PRE_COUNT, r7
932	beq/u	r7, ZERO, tr0
933
934need_resched:
935	ld.l	r6, TI_FLAGS, r7
936	movi	(1 << TIF_NEED_RESCHED), r8
937	and	r8, r7, r8
938	bne	r8, ZERO, tr0
939
940	getcon	SR, r7
941	andi	r7, 0xf0, r7
942	bne	r7, ZERO, tr0
943
944	movi	preempt_schedule_irq, r7
945	ori	r7, 1, r7
946	ptabs	r7, tr1
947	blink	tr1, LINK
948
949	pta	need_resched, tr1
950	blink	tr1, ZERO
951#endif
952
953	.global ret_from_syscall
954ret_from_syscall:
955
956ret_with_reschedule:
957	getcon	KCR0, r6		! r6 contains current_thread_info
958	ld.l	r6, TI_FLAGS, r7	! r7 contains current_thread_info->flags
959
960	movi	_TIF_NEED_RESCHED, r8
961	and	r8, r7, r8
962	pta	work_resched, tr0
963	bne	r8, ZERO, tr0
964
965	pta	restore_all, tr1
966
967	movi	(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
968	and	r8, r7, r8
969	pta	work_notifysig, tr0
970	bne	r8, ZERO, tr0
971
972	blink	tr1, ZERO
973
974work_resched:
975	pta	ret_from_syscall, tr0
976	gettr	tr0, LINK
977	movi	schedule, r6
978	ptabs	r6, tr0
979	blink	tr0, ZERO		/* Call schedule(), return on top */
980
981work_notifysig:
982	gettr	tr1, LINK
983
984	movi	do_notify_resume, r6
985	ptabs	r6, tr0
986	or	SP, ZERO, r2
987	or	r7, ZERO, r3
988	blink	tr0, LINK	    /* Call do_notify_resume(regs, current_thread_info->flags), return here */
989
990restore_all:
991	/* Do prefetches */
992
993	ld.q	SP, FRAME_T(0), r6
994	ld.q	SP, FRAME_T(1), r7
995	ld.q	SP, FRAME_T(2), r8
996	ld.q	SP, FRAME_T(3), r9
997	ptabs	r6, tr0
998	ptabs	r7, tr1
999	ptabs	r8, tr2
1000	ptabs	r9, tr3
1001	ld.q	SP, FRAME_T(4), r6
1002	ld.q	SP, FRAME_T(5), r7
1003	ld.q	SP, FRAME_T(6), r8
1004	ld.q	SP, FRAME_T(7), r9
1005	ptabs	r6, tr4
1006	ptabs	r7, tr5
1007	ptabs	r8, tr6
1008	ptabs	r9, tr7
1009
1010	ld.q	SP, FRAME_R(0), r0
1011	ld.q	SP, FRAME_R(1), r1
1012	ld.q	SP, FRAME_R(2), r2
1013	ld.q	SP, FRAME_R(3), r3
1014	ld.q	SP, FRAME_R(4), r4
1015	ld.q	SP, FRAME_R(5), r5
1016	ld.q	SP, FRAME_R(6), r6
1017	ld.q	SP, FRAME_R(7), r7
1018	ld.q	SP, FRAME_R(8), r8
1019	ld.q	SP, FRAME_R(9), r9
1020	ld.q	SP, FRAME_R(10), r10
1021	ld.q	SP, FRAME_R(11), r11
1022	ld.q	SP, FRAME_R(12), r12
1023	ld.q	SP, FRAME_R(13), r13
1024	ld.q	SP, FRAME_R(14), r14
1025
1026	ld.q	SP, FRAME_R(16), r16
1027	ld.q	SP, FRAME_R(17), r17
1028	ld.q	SP, FRAME_R(18), r18
1029	ld.q	SP, FRAME_R(19), r19
1030	ld.q	SP, FRAME_R(20), r20
1031	ld.q	SP, FRAME_R(21), r21
1032	ld.q	SP, FRAME_R(22), r22
1033	ld.q	SP, FRAME_R(23), r23
1034	ld.q	SP, FRAME_R(24), r24
1035	ld.q	SP, FRAME_R(25), r25
1036	ld.q	SP, FRAME_R(26), r26
1037	ld.q	SP, FRAME_R(27), r27
1038	ld.q	SP, FRAME_R(28), r28
1039	ld.q	SP, FRAME_R(29), r29
1040	ld.q	SP, FRAME_R(30), r30
1041	ld.q	SP, FRAME_R(31), r31
1042	ld.q	SP, FRAME_R(32), r32
1043	ld.q	SP, FRAME_R(33), r33
1044	ld.q	SP, FRAME_R(34), r34
1045	ld.q	SP, FRAME_R(35), r35
1046	ld.q	SP, FRAME_R(36), r36
1047	ld.q	SP, FRAME_R(37), r37
1048	ld.q	SP, FRAME_R(38), r38
1049	ld.q	SP, FRAME_R(39), r39
1050	ld.q	SP, FRAME_R(40), r40
1051	ld.q	SP, FRAME_R(41), r41
1052	ld.q	SP, FRAME_R(42), r42
1053	ld.q	SP, FRAME_R(43), r43
1054	ld.q	SP, FRAME_R(44), r44
1055	ld.q	SP, FRAME_R(45), r45
1056	ld.q	SP, FRAME_R(46), r46
1057	ld.q	SP, FRAME_R(47), r47
1058	ld.q	SP, FRAME_R(48), r48
1059	ld.q	SP, FRAME_R(49), r49
1060	ld.q	SP, FRAME_R(50), r50
1061	ld.q	SP, FRAME_R(51), r51
1062	ld.q	SP, FRAME_R(52), r52
1063	ld.q	SP, FRAME_R(53), r53
1064	ld.q	SP, FRAME_R(54), r54
1065	ld.q	SP, FRAME_R(55), r55
1066	ld.q	SP, FRAME_R(56), r56
1067	ld.q	SP, FRAME_R(57), r57
1068	ld.q	SP, FRAME_R(58), r58
1069
1070	getcon	SR, r59
1071	movi	SR_BLOCK_EXC, r60
1072	or	r59, r60, r59
1073	putcon	r59, SR			/* SR.BL = 1, keep nesting out */
1074	ld.q	SP, FRAME_S(FSSR), r61
1075	ld.q	SP, FRAME_S(FSPC), r62
1076	movi	SR_ASID_MASK, r60
1077	and	r59, r60, r59
1078	andc	r61, r60, r61		/* Clear out older ASID */
1079	or	r59, r61, r61		/* Retain current ASID */
1080	putcon	r61, SSR
1081	putcon	r62, SPC
1082
1083	/* Ignore FSYSCALL_ID */
1084
1085	ld.q	SP, FRAME_R(59), r59
1086	ld.q	SP, FRAME_R(60), r60
1087	ld.q	SP, FRAME_R(61), r61
1088	ld.q	SP, FRAME_R(62), r62
1089
1090	/* Last touch */
1091	ld.q	SP, FRAME_R(15), SP
1092	rte
1093	nop
1094
1095/*
1096 * Third level handlers for VBR-based exceptions. Adapting args to
1097 * and/or deflecting to fourth level handlers.
1098 *
1099 * Fourth level handlers interface.
1100 * Most are C-coded handlers directly pointed by the trap_jtable.
1101 * (Third = Fourth level)
1102 * Inputs:
1103 * (r2)   fault/interrupt code, entry number (e.g. NMI = 14,
1104 *	  IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1105 * (r3)   struct pt_regs *, original register's frame pointer
1106 * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1107 * (r5)   TRA control register (for syscall/debug benefit only)
1108 * (LINK) return address
1109 * (SP)   = r3
1110 *
1111 * Kernel TLB fault handlers will get a slightly different interface.
1112 * (r2)   struct pt_regs *, original register's frame pointer
1113 * (r3)   writeaccess, whether it's a store fault as opposed to load fault
1114 * (r4)   execaccess, whether it's a ITLB fault as opposed to DTLB fault
1115 * (r5)   Effective Address of fault
1116 * (LINK) return address
1117 * (SP)   = r2
1118 *
1119 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1120 *
1121 */
1122#ifdef CONFIG_MMU
1123tlb_miss_load:
1124	or	SP, ZERO, r2
1125	or	ZERO, ZERO, r3		/* Read */
1126	or	ZERO, ZERO, r4		/* Data */
1127	getcon	TEA, r5
1128	pta	call_do_page_fault, tr0
1129	beq	ZERO, ZERO, tr0
1130
1131tlb_miss_store:
1132	or	SP, ZERO, r2
1133	movi	1, r3			/* Write */
1134	or	ZERO, ZERO, r4		/* Data */
1135	getcon	TEA, r5
1136	pta	call_do_page_fault, tr0
1137	beq	ZERO, ZERO, tr0
1138
1139itlb_miss_or_IRQ:
1140	pta	its_IRQ, tr0
1141	beqi/u	r4, EVENT_INTERRUPT, tr0
1142	or	SP, ZERO, r2
1143	or	ZERO, ZERO, r3		/* Read */
1144	movi	1, r4			/* Text */
1145	getcon	TEA, r5
1146	/* Fall through */
1147
1148call_do_page_fault:
1149	movi	do_page_fault, r6
1150        ptabs	r6, tr0
1151        blink	tr0, ZERO
1152#endif /* CONFIG_MMU */
1153
1154fpu_error_or_IRQA:
1155	pta	its_IRQ, tr0
1156	beqi/l	r4, EVENT_INTERRUPT, tr0
1157#ifdef CONFIG_SH_FPU
1158	movi	do_fpu_state_restore, r6
1159#else
1160	movi	do_exception_error, r6
1161#endif
1162	ptabs	r6, tr0
1163	blink	tr0, ZERO
1164
1165fpu_error_or_IRQB:
1166	pta	its_IRQ, tr0
1167	beqi/l	r4, EVENT_INTERRUPT, tr0
1168#ifdef CONFIG_SH_FPU
1169	movi	do_fpu_state_restore, r6
1170#else
1171	movi	do_exception_error, r6
1172#endif
1173	ptabs	r6, tr0
1174	blink	tr0, ZERO
1175
1176its_IRQ:
1177	movi	do_IRQ, r6
1178	ptabs	r6, tr0
1179	blink	tr0, ZERO
1180
1181/*
1182 * system_call/unknown_trap third level handler:
1183 *
1184 * Inputs:
1185 * (r2)   fault/interrupt code, entry number (TRAP = 11)
1186 * (r3)   struct pt_regs *, original register's frame pointer
1187 * (r4)   Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1188 * (r5)   TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1189 * (SP)   = r3
1190 * (LINK) return address: ret_from_exception
1191 * (*r3)  Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1192 *
1193 * Outputs:
1194 * (*r3)  Syscall reply (Saved r2)
1195 * (LINK) In case of syscall only it can be scrapped.
1196 *        Common second level post handler will be ret_from_syscall.
1197 *        Common (non-trace) exit point to that is syscall_ret (saving
1198 *        result to r2). Common bad exit point is syscall_bad (returning
1199 *        ENOSYS then saved to r2).
1200 *
1201 */
1202
1203unknown_trap:
1204	/* Unknown Trap or User Trace */
1205	movi	do_unknown_trapa, r6
1206	ptabs	r6, tr0
1207        ld.q    r3, FRAME_R(9), r2	/* r2 = #arg << 16 | syscall # */
1208        andi    r2, 0x1ff, r2		/* r2 = syscall # */
1209	blink	tr0, LINK
1210
1211	pta	syscall_ret, tr0
1212	blink	tr0, ZERO
1213
1214        /* New syscall implementation*/
1215system_call:
1216	pta	unknown_trap, tr0
1217        or      r5, ZERO, r4            /* TRA (=r5) -> r4 */
1218        shlri   r4, 20, r4
1219	bnei	r4, 1, tr0		/* unknown_trap if not 0x1yzzzz */
1220
1221        /* It's a system call */
1222	st.q    r3, FRAME_S(FSYSCALL_ID), r5 	/* ID (0x1yzzzz) -> stack */
1223	andi    r5, 0x1ff, r5			/* syscall # -> r5	  */
1224
1225	STI()
1226
1227	pta	syscall_allowed, tr0
1228	movi	NR_syscalls - 1, r4	/* Last valid */
1229	bgeu/l	r4, r5, tr0
1230
1231syscall_bad:
1232	/* Return ENOSYS ! */
1233	movi	-(ENOSYS), r2		/* Fall-through */
1234
1235	.global syscall_ret
1236syscall_ret:
1237	st.q	SP, FRAME_R(9), r2	/* Expecting SP back to BASIC frame */
1238
1239#ifdef CONFIG_POOR_MANS_STRACE
1240	/* nothing useful in registers at this point */
1241
1242	movi	evt_debug2, r5
1243	ori	r5, 1, r5
1244	ptabs	r5, tr0
1245	ld.q	SP, FRAME_R(9), r2
1246	or	SP, ZERO, r3
1247	blink	tr0, LINK
1248#endif
1249
1250	ld.q	SP, FRAME_S(FSPC), r2
1251	addi	r2, 4, r2		/* Move PC, being pre-execution event */
1252	st.q	SP, FRAME_S(FSPC), r2
1253	pta	ret_from_syscall, tr0
1254	blink	tr0, ZERO
1255
1256
1257/*  A different return path for ret_from_fork, because we now need
1258 *  to call schedule_tail with the later kernels. Because prev is
1259 *  loaded into r2 by switch_to() means we can just call it straight  away
1260 */
1261
1262.global	ret_from_fork
1263ret_from_fork:
1264
1265	movi	schedule_tail,r5
1266	ori	r5, 1, r5
1267	ptabs	r5, tr0
1268	blink	tr0, LINK
1269
1270#ifdef CONFIG_POOR_MANS_STRACE
1271	/* nothing useful in registers at this point */
1272
1273	movi	evt_debug2, r5
1274	ori	r5, 1, r5
1275	ptabs	r5, tr0
1276	ld.q	SP, FRAME_R(9), r2
1277	or	SP, ZERO, r3
1278	blink	tr0, LINK
1279#endif
1280
1281	ld.q	SP, FRAME_S(FSPC), r2
1282	addi	r2, 4, r2		/* Move PC, being pre-execution event */
1283	st.q	SP, FRAME_S(FSPC), r2
1284	pta	ret_from_syscall, tr0
1285	blink	tr0, ZERO
1286
1287
1288
1289syscall_allowed:
1290	/* Use LINK to deflect the exit point, default is syscall_ret */
1291	pta	syscall_ret, tr0
1292	gettr	tr0, LINK
1293	pta	syscall_notrace, tr0
1294
1295	getcon	KCR0, r2
1296	ld.l	r2, TI_FLAGS, r4
1297	movi	_TIF_WORK_SYSCALL_MASK, r6
1298	and	r6, r4, r6
1299	beq/l	r6, ZERO, tr0
1300
1301	/* Trace it by calling syscall_trace before and after */
1302	movi	do_syscall_trace_enter, r4
1303	or	SP, ZERO, r2
1304	ptabs	r4, tr0
1305	blink	tr0, LINK
1306
1307	/* Save the retval */
1308	st.q	SP, FRAME_R(2), r2
1309
1310	/* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
1311	ld.q	SP, FRAME_S(FSYSCALL_ID), r5
1312	andi	r5, 0x1ff, r5
1313
1314	pta	syscall_ret_trace, tr0
1315	gettr	tr0, LINK
1316
1317syscall_notrace:
1318	/* Now point to the appropriate 4th level syscall handler */
1319	movi	sys_call_table, r4
1320	shlli	r5, 2, r5
1321	ldx.l	r4, r5, r5
1322	ptabs	r5, tr0
1323
1324	/* Prepare original args */
1325	ld.q	SP, FRAME_R(2), r2
1326	ld.q	SP, FRAME_R(3), r3
1327	ld.q	SP, FRAME_R(4), r4
1328	ld.q	SP, FRAME_R(5), r5
1329	ld.q	SP, FRAME_R(6), r6
1330	ld.q	SP, FRAME_R(7), r7
1331
1332	/* And now the trick for those syscalls requiring regs * ! */
1333	or	SP, ZERO, r8
1334
1335	/* Call it */
1336	blink	tr0, ZERO	/* LINK is already properly set */
1337
1338syscall_ret_trace:
1339	/* We get back here only if under trace */
1340	st.q	SP, FRAME_R(9), r2	/* Save return value */
1341
1342	movi	do_syscall_trace_leave, LINK
1343	or	SP, ZERO, r2
1344	ptabs	LINK, tr0
1345	blink	tr0, LINK
1346
1347	/* This needs to be done after any syscall tracing */
1348	ld.q	SP, FRAME_S(FSPC), r2
1349	addi	r2, 4, r2	/* Move PC, being pre-execution event */
1350	st.q	SP, FRAME_S(FSPC), r2
1351
1352	pta	ret_from_syscall, tr0
1353	blink	tr0, ZERO		/* Resume normal return sequence */
1354
1355/*
1356 * --- Switch to running under a particular ASID and return the previous ASID value
1357 * --- The caller is assumed to have done a cli before calling this.
1358 *
1359 * Input r2 : new ASID
1360 * Output r2 : old ASID
1361 */
1362
1363	.global switch_and_save_asid
1364switch_and_save_asid:
1365	getcon	sr, r0
1366	movi	255, r4
1367	shlli 	r4, 16, r4	/* r4 = mask to select ASID */
1368	and	r0, r4, r3	/* r3 = shifted old ASID */
1369	andi	r2, 255, r2	/* mask down new ASID */
1370	shlli	r2, 16, r2	/* align new ASID against SR.ASID */
1371	andc	r0, r4, r0	/* efface old ASID from SR */
1372	or	r0, r2, r0	/* insert the new ASID */
1373	putcon	r0, ssr
1374	movi	1f, r0
1375	putcon	r0, spc
1376	rte
1377	nop
13781:
1379	ptabs	LINK, tr0
1380	shlri	r3, 16, r2	/* r2 = old ASID */
1381	blink tr0, r63
1382
1383	.global	route_to_panic_handler
1384route_to_panic_handler:
1385	/* Switch to real mode, goto panic_handler, don't return.  Useful for
1386	   last-chance debugging, e.g. if no output wants to go to the console.
1387	   */
1388
1389	movi	panic_handler - CONFIG_PAGE_OFFSET, r1
1390	ptabs	r1, tr0
1391	pta	1f, tr1
1392	gettr	tr1, r0
1393	putcon	r0, spc
1394	getcon	sr, r0
1395	movi	1, r1
1396	shlli	r1, 31, r1
1397	andc	r0, r1, r0
1398	putcon	r0, ssr
1399	rte
1400	nop
14011:	/* Now in real mode */
1402	blink tr0, r63
1403	nop
1404
1405	.global peek_real_address_q
1406peek_real_address_q:
1407	/* Two args:
1408	   r2 : real mode address to peek
1409	   r2(out) : result quadword
1410
1411	   This is provided as a cheapskate way of manipulating device
1412	   registers for debugging (to avoid the need to onchip_remap the debug
1413	   module, and to avoid the need to onchip_remap the watchpoint
1414	   controller in a way that identity maps sufficient bits to avoid the
1415	   SH5-101 cut2 silicon defect).
1416
1417	   This code is not performance critical
1418	*/
1419
1420	add.l	r2, r63, r2	/* sign extend address */
1421	getcon	sr, r0		/* r0 = saved original SR */
1422	movi	1, r1
1423	shlli	r1, 28, r1
1424	or	r0, r1, r1	/* r0 with block bit set */
1425	putcon	r1, sr		/* now in critical section */
1426	movi	1, r36
1427	shlli	r36, 31, r36
1428	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
1429
1430	putcon	r1, ssr
1431	movi	.peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1432	movi	1f, r37		/* virtual mode return addr */
1433	putcon	r36, spc
1434
1435	synco
1436	rte
1437	nop
1438
1439.peek0:	/* come here in real mode, don't touch caches!!
1440           still in critical section (sr.bl==1) */
1441	putcon	r0, ssr
1442	putcon	r37, spc
1443	/* Here's the actual peek.  If the address is bad, all bets are now off
1444	 * what will happen (handlers invoked in real-mode = bad news) */
1445	ld.q	r2, 0, r2
1446	synco
1447	rte	/* Back to virtual mode */
1448	nop
1449
14501:
1451	ptabs	LINK, tr0
1452	blink	tr0, r63
1453
1454	.global poke_real_address_q
1455poke_real_address_q:
1456	/* Two args:
1457	   r2 : real mode address to poke
1458	   r3 : quadword value to write.
1459
1460	   This is provided as a cheapskate way of manipulating device
1461	   registers for debugging (to avoid the need to onchip_remap the debug
1462	   module, and to avoid the need to onchip_remap the watchpoint
1463	   controller in a way that identity maps sufficient bits to avoid the
1464	   SH5-101 cut2 silicon defect).
1465
1466	   This code is not performance critical
1467	*/
1468
1469	add.l	r2, r63, r2	/* sign extend address */
1470	getcon	sr, r0		/* r0 = saved original SR */
1471	movi	1, r1
1472	shlli	r1, 28, r1
1473	or	r0, r1, r1	/* r0 with block bit set */
1474	putcon	r1, sr		/* now in critical section */
1475	movi	1, r36
1476	shlli	r36, 31, r36
1477	andc	r1, r36, r1	/* turn sr.mmu off in real mode section */
1478
1479	putcon	r1, ssr
1480	movi	.poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1481	movi	1f, r37		/* virtual mode return addr */
1482	putcon	r36, spc
1483
1484	synco
1485	rte
1486	nop
1487
1488.poke0:	/* come here in real mode, don't touch caches!!
1489           still in critical section (sr.bl==1) */
1490	putcon	r0, ssr
1491	putcon	r37, spc
1492	/* Here's the actual poke.  If the address is bad, all bets are now off
1493	 * what will happen (handlers invoked in real-mode = bad news) */
1494	st.q	r2, 0, r3
1495	synco
1496	rte	/* Back to virtual mode */
1497	nop
1498
14991:
1500	ptabs	LINK, tr0
1501	blink	tr0, r63
1502
1503#ifdef CONFIG_MMU
1504/*
1505 * --- User Access Handling Section
1506 */
1507
1508/*
1509 * User Access support. It all moved to non inlined Assembler
1510 * functions in here.
1511 *
1512 * __kernel_size_t __copy_user(void *__to, const void *__from,
1513 *			       __kernel_size_t __n)
1514 *
1515 * Inputs:
1516 * (r2)  target address
1517 * (r3)  source address
1518 * (r4)  size in bytes
1519 *
1520 * Ouputs:
1521 * (*r2) target data
1522 * (r2)  non-copied bytes
1523 *
1524 * If a fault occurs on the user pointer, bail out early and return the
1525 * number of bytes not copied in r2.
1526 * Strategy : for large blocks, call a real memcpy function which can
1527 * move >1 byte at a time using unaligned ld/st instructions, and can
1528 * manipulate the cache using prefetch + alloco to improve the speed
1529 * further.  If a fault occurs in that function, just revert to the
1530 * byte-by-byte approach used for small blocks; this is rare so the
1531 * performance hit for that case does not matter.
1532 *
1533 * For small blocks it's not worth the overhead of setting up and calling
1534 * the memcpy routine; do the copy a byte at a time.
1535 *
1536 */
1537	.global	__copy_user
1538__copy_user:
1539	pta	__copy_user_byte_by_byte, tr1
1540	movi	16, r0 ! this value is a best guess, should tune it by benchmarking
1541	bge/u	r0, r4, tr1
1542	pta copy_user_memcpy, tr0
1543	addi	SP, -32, SP
1544	/* Save arguments in case we have to fix-up unhandled page fault */
1545	st.q	SP, 0, r2
1546	st.q	SP, 8, r3
1547	st.q	SP, 16, r4
1548	st.q	SP, 24, r35 ! r35 is callee-save
1549	/* Save LINK in a register to reduce RTS time later (otherwise
1550	   ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1551	ori	LINK, 0, r35
1552	blink	tr0, LINK
1553
1554	/* Copy completed normally if we get back here */
1555	ptabs	r35, tr0
1556	ld.q	SP, 24, r35
1557	/* don't restore r2-r4, pointless */
1558	/* set result=r2 to zero as the copy must have succeeded. */
1559	or	r63, r63, r2
1560	addi	SP, 32, SP
1561	blink	tr0, r63 ! RTS
1562
1563	.global __copy_user_fixup
1564__copy_user_fixup:
1565	/* Restore stack frame */
1566	ori	r35, 0, LINK
1567	ld.q	SP, 24, r35
1568	ld.q	SP, 16, r4
1569	ld.q	SP,  8, r3
1570	ld.q	SP,  0, r2
1571	addi	SP, 32, SP
1572	/* Fall through to original code, in the 'same' state we entered with */
1573
1574/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1575   user address.  In that rare case, the speed drop can be tolerated. */
1576__copy_user_byte_by_byte:
1577	pta	___copy_user_exit, tr1
1578	pta	___copy_user1, tr0
1579	beq/u	r4, r63, tr1	/* early exit for zero length copy */
1580	sub	r2, r3, r0
1581	addi	r0, -1, r0
1582
1583___copy_user1:
1584	ld.b	r3, 0, r5		/* Fault address 1 */
1585
1586	/* Could rewrite this to use just 1 add, but the second comes 'free'
1587	   due to load latency */
1588	addi	r3, 1, r3
1589	addi	r4, -1, r4		/* No real fixup required */
1590___copy_user2:
1591	stx.b	r3, r0, r5		/* Fault address 2 */
1592	bne     r4, ZERO, tr0
1593
1594___copy_user_exit:
1595	or	r4, ZERO, r2
1596	ptabs	LINK, tr0
1597	blink	tr0, ZERO
1598
1599/*
1600 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1601 *
1602 * Inputs:
1603 * (r2)  target address
1604 * (r3)  size in bytes
1605 *
1606 * Ouputs:
1607 * (*r2) zero-ed target data
1608 * (r2)  non-zero-ed bytes
1609 */
1610	.global	__clear_user
1611__clear_user:
1612	pta	___clear_user_exit, tr1
1613	pta	___clear_user1, tr0
1614	beq/u	r3, r63, tr1
1615
1616___clear_user1:
1617	st.b	r2, 0, ZERO		/* Fault address */
1618	addi	r2, 1, r2
1619	addi	r3, -1, r3		/* No real fixup required */
1620	bne     r3, ZERO, tr0
1621
1622___clear_user_exit:
1623	or	r3, ZERO, r2
1624	ptabs	LINK, tr0
1625	blink	tr0, ZERO
1626
1627#endif /* CONFIG_MMU */
1628
1629/*
1630 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1631 *			   int __count)
1632 *
1633 * Inputs:
1634 * (r2)  target address
1635 * (r3)  source address
1636 * (r4)  maximum size in bytes
1637 *
1638 * Ouputs:
1639 * (*r2) copied data
1640 * (r2)  -EFAULT (in case of faulting)
1641 *       copied data (otherwise)
1642 */
1643	.global	__strncpy_from_user
1644__strncpy_from_user:
1645	pta	___strncpy_from_user1, tr0
1646	pta	___strncpy_from_user_done, tr1
1647	or	r4, ZERO, r5		/* r5 = original count */
1648	beq/u	r4, r63, tr1		/* early exit if r4==0 */
1649	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
1650	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
1651
1652___strncpy_from_user1:
1653	ld.b	r3, 0, r7		/* Fault address: only in reading */
1654	st.b	r2, 0, r7
1655	addi	r2, 1, r2
1656	addi	r3, 1, r3
1657	beq/u	ZERO, r7, tr1
1658	addi	r4, -1, r4		/* return real number of copied bytes */
1659	bne/l	ZERO, r4, tr0
1660
1661___strncpy_from_user_done:
1662	sub	r5, r4, r6		/* If done, return copied */
1663
1664___strncpy_from_user_exit:
1665	or	r6, ZERO, r2
1666	ptabs	LINK, tr0
1667	blink	tr0, ZERO
1668
1669/*
1670 * extern long __strnlen_user(const char *__s, long __n)
1671 *
1672 * Inputs:
1673 * (r2)  source address
1674 * (r3)  source size in bytes
1675 *
1676 * Ouputs:
1677 * (r2)  -EFAULT (in case of faulting)
1678 *       string length (otherwise)
1679 */
1680	.global	__strnlen_user
1681__strnlen_user:
1682	pta	___strnlen_user_set_reply, tr0
1683	pta	___strnlen_user1, tr1
1684	or	ZERO, ZERO, r5		/* r5 = counter */
1685	movi	-(EFAULT), r6		/* r6 = reply, no real fixup */
1686	or	ZERO, ZERO, r7		/* r7 = data, clear top byte of data */
1687	beq	r3, ZERO, tr0
1688
1689___strnlen_user1:
1690	ldx.b	r2, r5, r7		/* Fault address: only in reading */
1691	addi	r3, -1, r3		/* No real fixup */
1692	addi	r5, 1, r5
1693	beq	r3, ZERO, tr0
1694	bne	r7, ZERO, tr1
1695! The line below used to be active.  This meant led to a junk byte lying between each pair
1696! of entries in the argv & envp structures in memory.  Whilst the program saw the right data
1697! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1698! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1699!	addi	r5, 1, r5		/* Include '\0' */
1700
1701___strnlen_user_set_reply:
1702	or	r5, ZERO, r6		/* If done, return counter */
1703
1704___strnlen_user_exit:
1705	or	r6, ZERO, r2
1706	ptabs	LINK, tr0
1707	blink	tr0, ZERO
1708
1709/*
1710 * extern long __get_user_asm_?(void *val, long addr)
1711 *
1712 * Inputs:
1713 * (r2)  dest address
1714 * (r3)  source address (in User Space)
1715 *
1716 * Ouputs:
1717 * (r2)  -EFAULT (faulting)
1718 *       0 	 (not faulting)
1719 */
1720	.global	__get_user_asm_b
1721__get_user_asm_b:
1722	or	r2, ZERO, r4
1723	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1724
1725___get_user_asm_b1:
1726	ld.b	r3, 0, r5		/* r5 = data */
1727	st.b	r4, 0, r5
1728	or	ZERO, ZERO, r2
1729
1730___get_user_asm_b_exit:
1731	ptabs	LINK, tr0
1732	blink	tr0, ZERO
1733
1734
1735	.global	__get_user_asm_w
1736__get_user_asm_w:
1737	or	r2, ZERO, r4
1738	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1739
1740___get_user_asm_w1:
1741	ld.w	r3, 0, r5		/* r5 = data */
1742	st.w	r4, 0, r5
1743	or	ZERO, ZERO, r2
1744
1745___get_user_asm_w_exit:
1746	ptabs	LINK, tr0
1747	blink	tr0, ZERO
1748
1749
1750	.global	__get_user_asm_l
1751__get_user_asm_l:
1752	or	r2, ZERO, r4
1753	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1754
1755___get_user_asm_l1:
1756	ld.l	r3, 0, r5		/* r5 = data */
1757	st.l	r4, 0, r5
1758	or	ZERO, ZERO, r2
1759
1760___get_user_asm_l_exit:
1761	ptabs	LINK, tr0
1762	blink	tr0, ZERO
1763
1764
1765	.global	__get_user_asm_q
1766__get_user_asm_q:
1767	or	r2, ZERO, r4
1768	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1769
1770___get_user_asm_q1:
1771	ld.q	r3, 0, r5		/* r5 = data */
1772	st.q	r4, 0, r5
1773	or	ZERO, ZERO, r2
1774
1775___get_user_asm_q_exit:
1776	ptabs	LINK, tr0
1777	blink	tr0, ZERO
1778
1779/*
1780 * extern long __put_user_asm_?(void *pval, long addr)
1781 *
1782 * Inputs:
1783 * (r2)  kernel pointer to value
1784 * (r3)  dest address (in User Space)
1785 *
1786 * Ouputs:
1787 * (r2)  -EFAULT (faulting)
1788 *       0 	 (not faulting)
1789 */
1790	.global	__put_user_asm_b
1791__put_user_asm_b:
1792	ld.b	r2, 0, r4		/* r4 = data */
1793	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1794
1795___put_user_asm_b1:
1796	st.b	r3, 0, r4
1797	or	ZERO, ZERO, r2
1798
1799___put_user_asm_b_exit:
1800	ptabs	LINK, tr0
1801	blink	tr0, ZERO
1802
1803
1804	.global	__put_user_asm_w
1805__put_user_asm_w:
1806	ld.w	r2, 0, r4		/* r4 = data */
1807	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1808
1809___put_user_asm_w1:
1810	st.w	r3, 0, r4
1811	or	ZERO, ZERO, r2
1812
1813___put_user_asm_w_exit:
1814	ptabs	LINK, tr0
1815	blink	tr0, ZERO
1816
1817
1818	.global	__put_user_asm_l
1819__put_user_asm_l:
1820	ld.l	r2, 0, r4		/* r4 = data */
1821	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1822
1823___put_user_asm_l1:
1824	st.l	r3, 0, r4
1825	or	ZERO, ZERO, r2
1826
1827___put_user_asm_l_exit:
1828	ptabs	LINK, tr0
1829	blink	tr0, ZERO
1830
1831
1832	.global	__put_user_asm_q
1833__put_user_asm_q:
1834	ld.q	r2, 0, r4		/* r4 = data */
1835	movi	-(EFAULT), r2		/* r2 = reply, no real fixup */
1836
1837___put_user_asm_q1:
1838	st.q	r3, 0, r4
1839	or	ZERO, ZERO, r2
1840
1841___put_user_asm_q_exit:
1842	ptabs	LINK, tr0
1843	blink	tr0, ZERO
1844
1845panic_stash_regs:
1846	/* The idea is : when we get an unhandled panic, we dump the registers
1847	   to a known memory location, the just sit in a tight loop.
1848	   This allows the human to look at the memory region through the GDB
1849	   session (assuming the debug module's SHwy initiator isn't locked up
1850	   or anything), to hopefully analyze the cause of the panic. */
1851
1852	/* On entry, former r15 (SP) is in DCR
1853	   former r0  is at resvec_saved_area + 0
1854	   former r1  is at resvec_saved_area + 8
1855	   former tr0 is at resvec_saved_area + 32
1856	   DCR is the only register whose value is lost altogether.
1857	*/
1858
1859	movi	0xffffffff80000000, r0 ! phy of dump area
1860	ld.q	SP, 0x000, r1	! former r0
1861	st.q	r0,  0x000, r1
1862	ld.q	SP, 0x008, r1	! former r1
1863	st.q	r0,  0x008, r1
1864	st.q	r0,  0x010, r2
1865	st.q	r0,  0x018, r3
1866	st.q	r0,  0x020, r4
1867	st.q	r0,  0x028, r5
1868	st.q	r0,  0x030, r6
1869	st.q	r0,  0x038, r7
1870	st.q	r0,  0x040, r8
1871	st.q	r0,  0x048, r9
1872	st.q	r0,  0x050, r10
1873	st.q	r0,  0x058, r11
1874	st.q	r0,  0x060, r12
1875	st.q	r0,  0x068, r13
1876	st.q	r0,  0x070, r14
1877	getcon	dcr, r14
1878	st.q	r0,  0x078, r14
1879	st.q	r0,  0x080, r16
1880	st.q	r0,  0x088, r17
1881	st.q	r0,  0x090, r18
1882	st.q	r0,  0x098, r19
1883	st.q	r0,  0x0a0, r20
1884	st.q	r0,  0x0a8, r21
1885	st.q	r0,  0x0b0, r22
1886	st.q	r0,  0x0b8, r23
1887	st.q	r0,  0x0c0, r24
1888	st.q	r0,  0x0c8, r25
1889	st.q	r0,  0x0d0, r26
1890	st.q	r0,  0x0d8, r27
1891	st.q	r0,  0x0e0, r28
1892	st.q	r0,  0x0e8, r29
1893	st.q	r0,  0x0f0, r30
1894	st.q	r0,  0x0f8, r31
1895	st.q	r0,  0x100, r32
1896	st.q	r0,  0x108, r33
1897	st.q	r0,  0x110, r34
1898	st.q	r0,  0x118, r35
1899	st.q	r0,  0x120, r36
1900	st.q	r0,  0x128, r37
1901	st.q	r0,  0x130, r38
1902	st.q	r0,  0x138, r39
1903	st.q	r0,  0x140, r40
1904	st.q	r0,  0x148, r41
1905	st.q	r0,  0x150, r42
1906	st.q	r0,  0x158, r43
1907	st.q	r0,  0x160, r44
1908	st.q	r0,  0x168, r45
1909	st.q	r0,  0x170, r46
1910	st.q	r0,  0x178, r47
1911	st.q	r0,  0x180, r48
1912	st.q	r0,  0x188, r49
1913	st.q	r0,  0x190, r50
1914	st.q	r0,  0x198, r51
1915	st.q	r0,  0x1a0, r52
1916	st.q	r0,  0x1a8, r53
1917	st.q	r0,  0x1b0, r54
1918	st.q	r0,  0x1b8, r55
1919	st.q	r0,  0x1c0, r56
1920	st.q	r0,  0x1c8, r57
1921	st.q	r0,  0x1d0, r58
1922	st.q	r0,  0x1d8, r59
1923	st.q	r0,  0x1e0, r60
1924	st.q	r0,  0x1e8, r61
1925	st.q	r0,  0x1f0, r62
1926	st.q	r0,  0x1f8, r63	! bogus, but for consistency's sake...
1927
1928	ld.q	SP, 0x020, r1  ! former tr0
1929	st.q	r0,  0x200, r1
1930	gettr	tr1, r1
1931	st.q	r0,  0x208, r1
1932	gettr	tr2, r1
1933	st.q	r0,  0x210, r1
1934	gettr	tr3, r1
1935	st.q	r0,  0x218, r1
1936	gettr	tr4, r1
1937	st.q	r0,  0x220, r1
1938	gettr	tr5, r1
1939	st.q	r0,  0x228, r1
1940	gettr	tr6, r1
1941	st.q	r0,  0x230, r1
1942	gettr	tr7, r1
1943	st.q	r0,  0x238, r1
1944
1945	getcon	sr,  r1
1946	getcon	ssr,  r2
1947	getcon	pssr,  r3
1948	getcon	spc,  r4
1949	getcon	pspc,  r5
1950	getcon	intevt,  r6
1951	getcon	expevt,  r7
1952	getcon	pexpevt,  r8
1953	getcon	tra,  r9
1954	getcon	tea,  r10
1955	getcon	kcr0, r11
1956	getcon	kcr1, r12
1957	getcon	vbr,  r13
1958	getcon	resvec,  r14
1959
1960	st.q	r0,  0x240, r1
1961	st.q	r0,  0x248, r2
1962	st.q	r0,  0x250, r3
1963	st.q	r0,  0x258, r4
1964	st.q	r0,  0x260, r5
1965	st.q	r0,  0x268, r6
1966	st.q	r0,  0x270, r7
1967	st.q	r0,  0x278, r8
1968	st.q	r0,  0x280, r9
1969	st.q	r0,  0x288, r10
1970	st.q	r0,  0x290, r11
1971	st.q	r0,  0x298, r12
1972	st.q	r0,  0x2a0, r13
1973	st.q	r0,  0x2a8, r14
1974
1975	getcon	SPC,r2
1976	getcon	SSR,r3
1977	getcon	EXPEVT,r4
1978	/* Prepare to jump to C - physical address */
1979	movi	panic_handler-CONFIG_PAGE_OFFSET, r1
1980	ori	r1, 1, r1
1981	ptabs   r1, tr0
1982	getcon	DCR, SP
1983	blink	tr0, ZERO
1984	nop
1985	nop
1986	nop
1987	nop
1988
1989
1990
1991
1992/*
1993 * --- Signal Handling Section
1994 */
1995
1996/*
1997 * extern long long _sa_default_rt_restorer
1998 * extern long long _sa_default_restorer
1999 *
2000 *		 or, better,
2001 *
2002 * extern void _sa_default_rt_restorer(void)
2003 * extern void _sa_default_restorer(void)
2004 *
2005 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
2006 * from user space. Copied into user space by signal management.
2007 * Both must be quad aligned and 2 quad long (4 instructions).
2008 *
2009 */
2010	.balign 8
2011	.global sa_default_rt_restorer
2012sa_default_rt_restorer:
2013	movi	0x10, r9
2014	shori	__NR_rt_sigreturn, r9
2015	trapa	r9
2016	nop
2017
2018	.balign 8
2019	.global sa_default_restorer
2020sa_default_restorer:
2021	movi	0x10, r9
2022	shori	__NR_sigreturn, r9
2023	trapa	r9
2024	nop
2025
2026/*
2027 * --- __ex_table Section
2028 */
2029
2030/*
2031 * User Access Exception Table.
2032 */
2033	.section	__ex_table,  "a"
2034
2035	.global asm_uaccess_start	/* Just a marker */
2036asm_uaccess_start:
2037
2038#ifdef CONFIG_MMU
2039	.long	___copy_user1, ___copy_user_exit
2040	.long	___copy_user2, ___copy_user_exit
2041	.long	___clear_user1, ___clear_user_exit
2042#endif
2043	.long	___strncpy_from_user1, ___strncpy_from_user_exit
2044	.long	___strnlen_user1, ___strnlen_user_exit
2045	.long	___get_user_asm_b1, ___get_user_asm_b_exit
2046	.long	___get_user_asm_w1, ___get_user_asm_w_exit
2047	.long	___get_user_asm_l1, ___get_user_asm_l_exit
2048	.long	___get_user_asm_q1, ___get_user_asm_q_exit
2049	.long	___put_user_asm_b1, ___put_user_asm_b_exit
2050	.long	___put_user_asm_w1, ___put_user_asm_w_exit
2051	.long	___put_user_asm_l1, ___put_user_asm_l_exit
2052	.long	___put_user_asm_q1, ___put_user_asm_q_exit
2053
2054	.global asm_uaccess_end		/* Just a marker */
2055asm_uaccess_end:
2056
2057
2058
2059
2060/*
2061 * --- .text.init Section
2062 */
2063
2064	.section	.text.init, "ax"
2065
2066/*
2067 * void trap_init (void)
2068 *
2069 */
2070	.global	trap_init
2071trap_init:
2072	addi	SP, -24, SP			/* Room to save r28/r29/r30 */
2073	st.q	SP, 0, r28
2074	st.q	SP, 8, r29
2075	st.q	SP, 16, r30
2076
2077	/* Set VBR and RESVEC */
2078	movi	LVBR_block, r19
2079	andi	r19, -4, r19			/* reset MMUOFF + reserved */
2080	/* For RESVEC exceptions we force the MMU off, which means we need the
2081	   physical address. */
2082	movi	LRESVEC_block-CONFIG_PAGE_OFFSET, r20
2083	andi	r20, -4, r20			/* reset reserved */
2084	ori	r20, 1, r20			/* set MMUOFF */
2085	putcon	r19, VBR
2086	putcon	r20, RESVEC
2087
2088	/* Sanity check */
2089	movi	LVBR_block_end, r21
2090	andi	r21, -4, r21
2091	movi	BLOCK_SIZE, r29			/* r29 = expected size */
2092	or	r19, ZERO, r30
2093	add	r19, r29, r19
2094
2095	/*
2096	 * Ugly, but better loop forever now than crash afterwards.
2097	 * We should print a message, but if we touch LVBR or
2098	 * LRESVEC blocks we should not be surprised if we get stuck
2099	 * in trap_init().
2100	 */
2101	pta	trap_init_loop, tr1
2102	gettr	tr1, r28			/* r28 = trap_init_loop */
2103	sub	r21, r30, r30			/* r30 = actual size */
2104
2105	/*
2106	 * VBR/RESVEC handlers overlap by being bigger than
2107	 * allowed. Very bad. Just loop forever.
2108	 * (r28) panic/loop address
2109	 * (r29) expected size
2110	 * (r30) actual size
2111	 */
2112trap_init_loop:
2113	bne	r19, r21, tr1
2114
2115	/* Now that exception vectors are set up reset SR.BL */
2116	getcon 	SR, r22
2117	movi	SR_UNBLOCK_EXC, r23
2118	and	r22, r23, r22
2119	putcon	r22, SR
2120
2121	addi	SP, 24, SP
2122	ptabs	LINK, tr0
2123	blink	tr0, ZERO
2124
2125