• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/asm-405.h>
32#include <asm/feature-fixups.h>
33#include <asm/barrier.h>
34#include <asm/kup.h>
35#include <asm/bug.h>
36
37#include "head_32.h"
38
39/*
40 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
41 * fit into one page in order to not encounter a TLB miss between the
42 * modification of srr0/srr1 and the associated rfi.
43 */
44	.align	12
45
46#ifdef CONFIG_BOOKE
47	.globl	mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
49	mfspr	r0,SPRN_DSRR0
50	stw	r0,_DSRR0(r11)
51	mfspr	r0,SPRN_DSRR1
52	stw	r0,_DSRR1(r11)
53	/* fall through */
54
55	.globl	debug_transfer_to_handler
56debug_transfer_to_handler:
57	mfspr	r0,SPRN_CSRR0
58	stw	r0,_CSRR0(r11)
59	mfspr	r0,SPRN_CSRR1
60	stw	r0,_CSRR1(r11)
61	/* fall through */
62
63	.globl	crit_transfer_to_handler
64crit_transfer_to_handler:
65#ifdef CONFIG_PPC_BOOK3E_MMU
66	mfspr	r0,SPRN_MAS0
67	stw	r0,MAS0(r11)
68	mfspr	r0,SPRN_MAS1
69	stw	r0,MAS1(r11)
70	mfspr	r0,SPRN_MAS2
71	stw	r0,MAS2(r11)
72	mfspr	r0,SPRN_MAS3
73	stw	r0,MAS3(r11)
74	mfspr	r0,SPRN_MAS6
75	stw	r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77	mfspr	r0,SPRN_MAS7
78	stw	r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
80#endif /* CONFIG_PPC_BOOK3E_MMU */
81#ifdef CONFIG_44x
82	mfspr	r0,SPRN_MMUCR
83	stw	r0,MMUCR(r11)
84#endif
85	mfspr	r0,SPRN_SRR0
86	stw	r0,_SRR0(r11)
87	mfspr	r0,SPRN_SRR1
88	stw	r0,_SRR1(r11)
89
90	/* set the stack limit to the current stack */
91	mfspr	r8,SPRN_SPRG_THREAD
92	lwz	r0,KSP_LIMIT(r8)
93	stw	r0,SAVED_KSP_LIMIT(r11)
94	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
95	stw	r0,KSP_LIMIT(r8)
96	/* fall through */
97#endif
98
99#ifdef CONFIG_40x
100	.globl	crit_transfer_to_handler
101crit_transfer_to_handler:
102	lwz	r0,crit_r10@l(0)
103	stw	r0,GPR10(r11)
104	lwz	r0,crit_r11@l(0)
105	stw	r0,GPR11(r11)
106	mfspr	r0,SPRN_SRR0
107	stw	r0,crit_srr0@l(0)
108	mfspr	r0,SPRN_SRR1
109	stw	r0,crit_srr1@l(0)
110
111	/* set the stack limit to the current stack */
112	mfspr	r8,SPRN_SPRG_THREAD
113	lwz	r0,KSP_LIMIT(r8)
114	stw	r0,saved_ksp_limit@l(0)
115	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
116	stw	r0,KSP_LIMIT(r8)
117	/* fall through */
118#endif
119
120/*
121 * This code finishes saving the registers to the exception frame
122 * and jumps to the appropriate handler for the exception, turning
123 * on address translation.
124 * Note that we rely on the caller having set cr0.eq iff the exception
125 * occurred in kernel mode (i.e. MSR:PR = 0).
126 */
127	.globl	transfer_to_handler_full
128transfer_to_handler_full:
129	SAVE_NVGPRS(r11)
130	/* fall through */
131
132	.globl	transfer_to_handler
133transfer_to_handler:
134	stw	r2,GPR2(r11)
135	stw	r12,_NIP(r11)
136	stw	r9,_MSR(r11)
137	andi.	r2,r9,MSR_PR
138	mfctr	r12
139	mfspr	r2,SPRN_XER
140	stw	r12,_CTR(r11)
141	stw	r2,_XER(r11)
142	mfspr	r12,SPRN_SPRG_THREAD
143	beq	2f			/* if from user, fix up THREAD.regs */
144	addi	r2, r12, -THREAD
145	addi	r11,r1,STACK_FRAME_OVERHEAD
146	stw	r11,PT_REGS(r12)
147#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
148	/* Check to see if the dbcr0 register is set up to debug.  Use the
149	   internal debug mode bit to do this. */
150	lwz	r12,THREAD_DBCR0(r12)
151	andis.	r12,r12,DBCR0_IDM@h
152#endif
153	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
154#ifdef CONFIG_PPC_BOOK3S_32
155	kuep_lock r11, r12
156#endif
157#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
158	beq+	3f
159	/* From user and task is ptraced - load up global dbcr0 */
160	li	r12,-1			/* clear all pending debug events */
161	mtspr	SPRN_DBSR,r12
162	lis	r11,global_dbcr0@ha
163	tophys(r11,r11)
164	addi	r11,r11,global_dbcr0@l
165#ifdef CONFIG_SMP
166	lwz	r9,TASK_CPU(r2)
167	slwi	r9,r9,3
168	add	r11,r11,r9
169#endif
170	lwz	r12,0(r11)
171	mtspr	SPRN_DBCR0,r12
172	lwz	r12,4(r11)
173	addi	r12,r12,-1
174	stw	r12,4(r11)
175#endif
176
177	b	3f
178
1792:	/* if from kernel, check interrupted DOZE/NAP mode and
180         * check for stack overflow
181         */
182	kuap_save_and_lock r11, r12, r9, r2, r6
183	addi	r2, r12, -THREAD
184	lwz	r9,KSP_LIMIT(r12)
185	cmplw	r1,r9			/* if r1 <= ksp_limit */
186	ble-	stack_ovf		/* then the kernel stack overflowed */
1875:
188#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
189	lwz	r12,TI_LOCAL_FLAGS(r2)
190	mtcrf	0x01,r12
191	bt-	31-TLF_NAPPING,4f
192	bt-	31-TLF_SLEEPING,7f
193#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
194	.globl transfer_to_handler_cont
195transfer_to_handler_cont:
1963:
197	mflr	r9
198	tovirt(r2, r2)			/* set r2 to current */
199	lwz	r11,0(r9)		/* virtual address of handler */
200	lwz	r9,4(r9)		/* where to go when done */
201#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
202	mtspr	SPRN_NRI, r0
203#endif
204#ifdef CONFIG_TRACE_IRQFLAGS
205	/*
206	 * When tracing IRQ state (lockdep) we enable the MMU before we call
207	 * the IRQ tracing functions as they might access vmalloc space or
208	 * perform IOs for console output.
209	 *
210	 * To speed up the syscall path where interrupts stay on, let's check
211	 * first if we are changing the MSR value at all.
212	 */
213	tophys(r12, r1)
214	lwz	r12,_MSR(r12)
215	andi.	r12,r12,MSR_EE
216	bne	1f
217
218	/* MSR isn't changing, just transition directly */
219#endif
220	mtspr	SPRN_SRR0,r11
221	mtspr	SPRN_SRR1,r10
222	mtlr	r9
223	SYNC
224	RFI				/* jump to handler, enable MMU */
225
226#ifdef CONFIG_TRACE_IRQFLAGS
2271:	/* MSR is changing, re-enable MMU so we can notify lockdep. We need to
228	 * keep interrupts disabled at this point otherwise we might risk
229	 * taking an interrupt before we tell lockdep they are enabled.
230	 */
231	lis	r12,reenable_mmu@h
232	ori	r12,r12,reenable_mmu@l
233	LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
234	mtspr	SPRN_SRR0,r12
235	mtspr	SPRN_SRR1,r0
236	SYNC
237	RFI
238
239reenable_mmu:
240	/*
241	 * We save a bunch of GPRs,
242	 * r3 can be different from GPR3(r1) at this point, r9 and r11
243	 * contains the old MSR and handler address respectively,
244	 * r4 & r5 can contain page fault arguments that need to be passed
245	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
246	 * they aren't useful past this point (aren't syscall arguments),
247	 * the rest is restored from the exception frame.
248	 */
249
250	stwu	r1,-32(r1)
251	stw	r9,8(r1)
252	stw	r11,12(r1)
253	stw	r3,16(r1)
254	stw	r4,20(r1)
255	stw	r5,24(r1)
256
257	/* If we are disabling interrupts (normal case), simply log it with
258	 * lockdep
259	 */
2601:	bl	trace_hardirqs_off
2612:	lwz	r5,24(r1)
262	lwz	r4,20(r1)
263	lwz	r3,16(r1)
264	lwz	r11,12(r1)
265	lwz	r9,8(r1)
266	addi	r1,r1,32
267	lwz	r0,GPR0(r1)
268	lwz	r6,GPR6(r1)
269	lwz	r7,GPR7(r1)
270	lwz	r8,GPR8(r1)
271	mtctr	r11
272	mtlr	r9
273	bctr				/* jump to handler */
274#endif /* CONFIG_TRACE_IRQFLAGS */
275
276#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
2774:	rlwinm	r12,r12,0,~_TLF_NAPPING
278	stw	r12,TI_LOCAL_FLAGS(r2)
279	b	power_save_ppc32_restore
280
2817:	rlwinm	r12,r12,0,~_TLF_SLEEPING
282	stw	r12,TI_LOCAL_FLAGS(r2)
283	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
284	rlwinm	r9,r9,0,~MSR_EE
285	lwz	r12,_LINK(r11)		/* and return to address in LR */
286	kuap_restore r11, r2, r3, r4, r5
287	lwz	r2, GPR2(r11)
288	b	fast_exception_return
289#endif
290
291/*
292 * On kernel stack overflow, load up an initial stack pointer
293 * and call StackOverflow(regs), which should not return.
294 */
295stack_ovf:
296	/* sometimes we use a statically-allocated stack, which is OK. */
297	lis	r12,_end@h
298	ori	r12,r12,_end@l
299	cmplw	r1,r12
300	ble	5b			/* r1 <= &_end is OK */
301	SAVE_NVGPRS(r11)
302	addi	r3,r1,STACK_FRAME_OVERHEAD
303	lis	r1,init_thread_union@ha
304	addi	r1,r1,init_thread_union@l
305	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
306	lis	r9,StackOverflow@ha
307	addi	r9,r9,StackOverflow@l
308	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
309#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
310	mtspr	SPRN_NRI, r0
311#endif
312	mtspr	SPRN_SRR0,r9
313	mtspr	SPRN_SRR1,r10
314	SYNC
315	RFI
316
317#ifdef CONFIG_TRACE_IRQFLAGS
318trace_syscall_entry_irq_off:
319	/*
320	 * Syscall shouldn't happen while interrupts are disabled,
321	 * so let's do a warning here.
322	 */
3230:	trap
324	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
325	bl	trace_hardirqs_on
326
327	/* Now enable for real */
328	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
329	mtmsr	r10
330
331	REST_GPR(0, r1)
332	REST_4GPRS(3, r1)
333	REST_2GPRS(7, r1)
334	b	DoSyscall
335#endif /* CONFIG_TRACE_IRQFLAGS */
336
337	.globl	transfer_to_syscall
338transfer_to_syscall:
339#ifdef CONFIG_PPC_BOOK3S_32
340	kuep_lock r11, r12
341#endif
342#ifdef CONFIG_TRACE_IRQFLAGS
343	andi.	r12,r9,MSR_EE
344	beq-	trace_syscall_entry_irq_off
345#endif /* CONFIG_TRACE_IRQFLAGS */
346
347/*
348 * Handle a system call.
349 */
350	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
351	.stabs	"entry_32.S",N_SO,0,0,0f
3520:
353
354_GLOBAL(DoSyscall)
355	stw	r3,ORIG_GPR3(r1)
356	li	r12,0
357	stw	r12,RESULT(r1)
358#ifdef CONFIG_TRACE_IRQFLAGS
359	/* Make sure interrupts are enabled */
360	mfmsr	r11
361	andi.	r12,r11,MSR_EE
362	/* We came in with interrupts disabled, we WARN and mark them enabled
363	 * for lockdep now */
3640:	tweqi	r12, 0
365	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
366#endif /* CONFIG_TRACE_IRQFLAGS */
367	lwz	r11,TI_FLAGS(r2)
368	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
369	bne-	syscall_dotrace
370syscall_dotrace_cont:
371	cmplwi	0,r0,NR_syscalls
372	lis	r10,sys_call_table@h
373	ori	r10,r10,sys_call_table@l
374	slwi	r0,r0,2
375	bge-	66f
376
377	barrier_nospec_asm
378	/*
379	 * Prevent the load of the handler below (based on the user-passed
380	 * system call number) being speculatively executed until the test
381	 * against NR_syscalls and branch to .66f above has
382	 * committed.
383	 */
384
385	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
386	mtlr	r10
387	addi	r9,r1,STACK_FRAME_OVERHEAD
388	PPC440EP_ERR42
389	blrl			/* Call handler */
390	.globl	ret_from_syscall
391ret_from_syscall:
392#ifdef CONFIG_DEBUG_RSEQ
393	/* Check whether the syscall is issued inside a restartable sequence */
394	stw	r3,GPR3(r1)
395	addi    r3,r1,STACK_FRAME_OVERHEAD
396	bl      rseq_syscall
397	lwz	r3,GPR3(r1)
398#endif
399	mr	r6,r3
400	/* disable interrupts so current_thread_info()->flags can't change */
401	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
402	/* Note: We don't bother telling lockdep about it */
403	SYNC
404	MTMSRD(r10)
405	lwz	r9,TI_FLAGS(r2)
406	li	r8,-MAX_ERRNO
407	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
408	bne-	syscall_exit_work
409	cmplw	0,r3,r8
410	blt+	syscall_exit_cont
411	lwz	r11,_CCR(r1)			/* Load CR */
412	neg	r3,r3
413	oris	r11,r11,0x1000	/* Set SO bit in CR */
414	stw	r11,_CCR(r1)
415syscall_exit_cont:
416	lwz	r8,_MSR(r1)
417#ifdef CONFIG_TRACE_IRQFLAGS
418	/* If we are going to return from the syscall with interrupts
419	 * off, we trace that here. It shouldn't normally happen.
420	 */
421	andi.	r10,r8,MSR_EE
422	bne+	1f
423	stw	r3,GPR3(r1)
424	bl      trace_hardirqs_off
425	lwz	r3,GPR3(r1)
4261:
427#endif /* CONFIG_TRACE_IRQFLAGS */
428#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
429	/* If the process has its own DBCR0 value, load it up.  The internal
430	   debug mode bit tells us that dbcr0 should be loaded. */
431	lwz	r0,THREAD+THREAD_DBCR0(r2)
432	andis.	r10,r0,DBCR0_IDM@h
433	bnel-	load_dbcr0
434#endif
435#ifdef CONFIG_44x
436BEGIN_MMU_FTR_SECTION
437	lis	r4,icache_44x_need_flush@ha
438	lwz	r5,icache_44x_need_flush@l(r4)
439	cmplwi	cr0,r5,0
440	bne-	2f
4411:
442END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
443#endif /* CONFIG_44x */
444BEGIN_FTR_SECTION
445	lwarx	r7,0,r1
446END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
447	stwcx.	r0,0,r1			/* to clear the reservation */
448	ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
449#ifdef CONFIG_PPC_BOOK3S_32
450	kuep_unlock r5, r7
451#endif
452	kuap_check r2, r4
453	lwz	r4,_LINK(r1)
454	lwz	r5,_CCR(r1)
455	mtlr	r4
456	mtcr	r5
457	lwz	r7,_NIP(r1)
458	lwz	r2,GPR2(r1)
459	lwz	r1,GPR1(r1)
460#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
461	mtspr	SPRN_NRI, r0
462#endif
463	mtspr	SPRN_SRR0,r7
464	mtspr	SPRN_SRR1,r8
465	SYNC
466	RFI
467#ifdef CONFIG_44x
4682:	li	r7,0
469	iccci	r0,r0
470	stw	r7,icache_44x_need_flush@l(r4)
471	b	1b
472#endif  /* CONFIG_44x */
473
47466:	li	r3,-ENOSYS
475	b	ret_from_syscall
476
477	.globl	ret_from_fork
478ret_from_fork:
479	REST_NVGPRS(r1)
480	bl	schedule_tail
481	li	r3,0
482	b	ret_from_syscall
483
484	.globl	ret_from_kernel_thread
485ret_from_kernel_thread:
486	REST_NVGPRS(r1)
487	bl	schedule_tail
488	mtlr	r14
489	mr	r3,r15
490	PPC440EP_ERR42
491	blrl
492	li	r3,0
493	b	ret_from_syscall
494
495/* Traced system call support */
496syscall_dotrace:
497	SAVE_NVGPRS(r1)
498	li	r0,0xc00
499	stw	r0,_TRAP(r1)
500	addi	r3,r1,STACK_FRAME_OVERHEAD
501	bl	do_syscall_trace_enter
502	/*
503	 * Restore argument registers possibly just changed.
504	 * We use the return value of do_syscall_trace_enter
505	 * for call number to look up in the table (r0).
506	 */
507	mr	r0,r3
508	lwz	r3,GPR3(r1)
509	lwz	r4,GPR4(r1)
510	lwz	r5,GPR5(r1)
511	lwz	r6,GPR6(r1)
512	lwz	r7,GPR7(r1)
513	lwz	r8,GPR8(r1)
514	REST_NVGPRS(r1)
515
516	cmplwi	r0,NR_syscalls
517	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
518	bge-	ret_from_syscall
519	b	syscall_dotrace_cont
520
521syscall_exit_work:
522	andi.	r0,r9,_TIF_RESTOREALL
523	beq+	0f
524	REST_NVGPRS(r1)
525	b	2f
5260:	cmplw	0,r3,r8
527	blt+	1f
528	andi.	r0,r9,_TIF_NOERROR
529	bne-	1f
530	lwz	r11,_CCR(r1)			/* Load CR */
531	neg	r3,r3
532	oris	r11,r11,0x1000	/* Set SO bit in CR */
533	stw	r11,_CCR(r1)
534
5351:	stw	r6,RESULT(r1)	/* Save result */
536	stw	r3,GPR3(r1)	/* Update return value */
5372:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
538	beq	4f
539
540	/* Clear per-syscall TIF flags if any are set.  */
541
542	li	r11,_TIF_PERSYSCALL_MASK
543	addi	r12,r2,TI_FLAGS
5443:	lwarx	r8,0,r12
545	andc	r8,r8,r11
546#ifdef CONFIG_IBM405_ERR77
547	dcbt	0,r12
548#endif
549	stwcx.	r8,0,r12
550	bne-	3b
551
5524:	/* Anything which requires enabling interrupts? */
553	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
554	beq	ret_from_except
555
556	/* Re-enable interrupts. There is no need to trace that with
557	 * lockdep as we are supposed to have IRQs on at this point
558	 */
559	ori	r10,r10,MSR_EE
560	SYNC
561	MTMSRD(r10)
562
563	/* Save NVGPRS if they're not saved already */
564	lwz	r4,_TRAP(r1)
565	andi.	r4,r4,1
566	beq	5f
567	SAVE_NVGPRS(r1)
568	li	r4,0xc00
569	stw	r4,_TRAP(r1)
5705:
571	addi	r3,r1,STACK_FRAME_OVERHEAD
572	bl	do_syscall_trace_leave
573	b	ret_from_except_full
574
575/*
576 * The fork/clone functions need to copy the full register set into
577 * the child process. Therefore we need to save all the nonvolatile
578 * registers (r13 - r31) before calling the C code.
579 */
580	.globl	ppc_fork
581ppc_fork:
582	SAVE_NVGPRS(r1)
583	lwz	r0,_TRAP(r1)
584	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
585	stw	r0,_TRAP(r1)		/* register set saved */
586	b	sys_fork
587
588	.globl	ppc_vfork
589ppc_vfork:
590	SAVE_NVGPRS(r1)
591	lwz	r0,_TRAP(r1)
592	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
593	stw	r0,_TRAP(r1)		/* register set saved */
594	b	sys_vfork
595
596	.globl	ppc_clone
597ppc_clone:
598	SAVE_NVGPRS(r1)
599	lwz	r0,_TRAP(r1)
600	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
601	stw	r0,_TRAP(r1)		/* register set saved */
602	b	sys_clone
603
604	.globl	ppc_clone3
605ppc_clone3:
606	SAVE_NVGPRS(r1)
607	lwz	r0,_TRAP(r1)
608	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
609	stw	r0,_TRAP(r1)		/* register set saved */
610	b	sys_clone3
611
612	.globl	ppc_swapcontext
613ppc_swapcontext:
614	SAVE_NVGPRS(r1)
615	lwz	r0,_TRAP(r1)
616	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
617	stw	r0,_TRAP(r1)		/* register set saved */
618	b	sys_swapcontext
619
620/*
621 * Top-level page fault handling.
622 * This is in assembler because if do_page_fault tells us that
623 * it is a bad kernel page fault, we want to save the non-volatile
624 * registers before calling bad_page_fault.
625 */
626	.globl	handle_page_fault
627handle_page_fault:
628	stw	r4,_DAR(r1)
629	addi	r3,r1,STACK_FRAME_OVERHEAD
630#ifdef CONFIG_PPC_BOOK3S_32
631	andis.  r0,r5,DSISR_DABRMATCH@h
632	bne-    handle_dabr_fault
633#endif
634	bl	do_page_fault
635	cmpwi	r3,0
636	beq+	ret_from_except
637	SAVE_NVGPRS(r1)
638	lwz	r0,_TRAP(r1)
639	clrrwi	r0,r0,1
640	stw	r0,_TRAP(r1)
641	mr	r5,r3
642	addi	r3,r1,STACK_FRAME_OVERHEAD
643	lwz	r4,_DAR(r1)
644	bl	bad_page_fault
645	b	ret_from_except_full
646
647#ifdef CONFIG_PPC_BOOK3S_32
648	/* We have a data breakpoint exception - handle it */
649handle_dabr_fault:
650	SAVE_NVGPRS(r1)
651	lwz	r0,_TRAP(r1)
652	clrrwi	r0,r0,1
653	stw	r0,_TRAP(r1)
654	bl      do_break
655	b	ret_from_except_full
656#endif
657
658/*
659 * This routine switches between two different tasks.  The process
660 * state of one is saved on its kernel stack.  Then the state
661 * of the other is restored from its kernel stack.  The memory
662 * management hardware is updated to the second process's state.
663 * Finally, we can return to the second process.
664 * On entry, r3 points to the THREAD for the current task, r4
665 * points to the THREAD for the new task.
666 *
667 * This routine is always called with interrupts disabled.
668 *
669 * Note: there are two ways to get to the "going out" portion
670 * of this code; either by coming in via the entry (_switch)
671 * or via "fork" which must set up an environment equivalent
672 * to the "_switch" path.  If you change this , you'll have to
673 * change the fork code also.
674 *
675 * The code which creates the new task context is in 'copy_thread'
676 * in arch/ppc/kernel/process.c
677 */
678_GLOBAL(_switch)
679	stwu	r1,-INT_FRAME_SIZE(r1)
680	mflr	r0
681	stw	r0,INT_FRAME_SIZE+4(r1)
682	/* r3-r12 are caller saved -- Cort */
683	SAVE_NVGPRS(r1)
684	stw	r0,_NIP(r1)	/* Return to switch caller */
685	mfmsr	r11
686	li	r0,MSR_FP	/* Disable floating-point */
687#ifdef CONFIG_ALTIVEC
688BEGIN_FTR_SECTION
689	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
690	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
691	stw	r12,THREAD+THREAD_VRSAVE(r2)
692END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
693#endif /* CONFIG_ALTIVEC */
694#ifdef CONFIG_SPE
695BEGIN_FTR_SECTION
696	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
697	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
698	stw	r12,THREAD+THREAD_SPEFSCR(r2)
699END_FTR_SECTION_IFSET(CPU_FTR_SPE)
700#endif /* CONFIG_SPE */
701	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
702	beq+	1f
703	andc	r11,r11,r0
704	MTMSRD(r11)
705	isync
7061:	stw	r11,_MSR(r1)
707	mfcr	r10
708	stw	r10,_CCR(r1)
709	stw	r1,KSP(r3)	/* Set old stack pointer */
710
711	kuap_check r2, r0
712#ifdef CONFIG_SMP
713	/* We need a sync somewhere here to make sure that if the
714	 * previous task gets rescheduled on another CPU, it sees all
715	 * stores it has performed on this one.
716	 */
717	sync
718#endif /* CONFIG_SMP */
719
720	tophys(r0,r4)
721	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
722	lwz	r1,KSP(r4)	/* Load new stack pointer */
723
724	/* save the old current 'last' for return value */
725	mr	r3,r2
726	addi	r2,r4,-THREAD	/* Update current */
727
728#ifdef CONFIG_ALTIVEC
729BEGIN_FTR_SECTION
730	lwz	r0,THREAD+THREAD_VRSAVE(r2)
731	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
732END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
733#endif /* CONFIG_ALTIVEC */
734#ifdef CONFIG_SPE
735BEGIN_FTR_SECTION
736	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
737	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
738END_FTR_SECTION_IFSET(CPU_FTR_SPE)
739#endif /* CONFIG_SPE */
740
741	lwz	r0,_CCR(r1)
742	mtcrf	0xFF,r0
743	/* r3-r12 are destroyed -- Cort */
744	REST_NVGPRS(r1)
745
746	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
747	mtlr	r4
748	addi	r1,r1,INT_FRAME_SIZE
749	blr
750
751	.globl	fast_exception_return
752fast_exception_return:
753#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
754	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
755	beq	1f			/* if not, we've got problems */
756#endif
757
7582:	REST_4GPRS(3, r11)
759	lwz	r10,_CCR(r11)
760	REST_GPR(1, r11)
761	mtcr	r10
762	lwz	r10,_LINK(r11)
763	mtlr	r10
764	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
765	li	r10, 0
766	stw	r10, 8(r11)
767	REST_GPR(10, r11)
768#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
769	mtspr	SPRN_NRI, r0
770#endif
771	mtspr	SPRN_SRR1,r9
772	mtspr	SPRN_SRR0,r12
773	REST_GPR(9, r11)
774	REST_GPR(12, r11)
775	lwz	r11,GPR11(r11)
776	SYNC
777	RFI
778
779#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
780/* check if the exception happened in a restartable section */
7811:	lis	r3,exc_exit_restart_end@ha
782	addi	r3,r3,exc_exit_restart_end@l
783	cmplw	r12,r3
784#ifdef CONFIG_PPC_BOOK3S_601
785	bge	2b
786#else
787	bge	3f
788#endif
789	lis	r4,exc_exit_restart@ha
790	addi	r4,r4,exc_exit_restart@l
791	cmplw	r12,r4
792#ifdef CONFIG_PPC_BOOK3S_601
793	blt	2b
794#else
795	blt	3f
796#endif
797	lis	r3,fee_restarts@ha
798	tophys(r3,r3)
799	lwz	r5,fee_restarts@l(r3)
800	addi	r5,r5,1
801	stw	r5,fee_restarts@l(r3)
802	mr	r12,r4		/* restart at exc_exit_restart */
803	b	2b
804
805	.section .bss
806	.align	2
807fee_restarts:
808	.space	4
809	.previous
810
811/* aargh, a nonrecoverable interrupt, panic */
812/* aargh, we don't know which trap this is */
813/* but the 601 doesn't implement the RI bit, so assume it's OK */
8143:
815	li	r10,-1
816	stw	r10,_TRAP(r11)
817	addi	r3,r1,STACK_FRAME_OVERHEAD
818	lis	r10,MSR_KERNEL@h
819	ori	r10,r10,MSR_KERNEL@l
820	bl	transfer_to_handler_full
821	.long	unrecoverable_exception
822	.long	ret_from_except
823#endif
824
825	.globl	ret_from_except_full
826ret_from_except_full:
827	REST_NVGPRS(r1)
828	/* fall through */
829
830	.globl	ret_from_except
831ret_from_except:
832	/* Hard-disable interrupts so that current_thread_info()->flags
833	 * can't change between when we test it and when we return
834	 * from the interrupt. */
835	/* Note: We don't bother telling lockdep about it */
836	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
837	SYNC			/* Some chip revs have problems here... */
838	MTMSRD(r10)		/* disable interrupts */
839
840	lwz	r3,_MSR(r1)	/* Returning to user mode? */
841	andi.	r0,r3,MSR_PR
842	beq	resume_kernel
843
844user_exc_return:		/* r10 contains MSR_KERNEL here */
845	/* Check current_thread_info()->flags */
846	lwz	r9,TI_FLAGS(r2)
847	andi.	r0,r9,_TIF_USER_WORK_MASK
848	bne	do_work
849
850restore_user:
851#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
852	/* Check whether this process has its own DBCR0 value.  The internal
853	   debug mode bit tells us that dbcr0 should be loaded. */
854	lwz	r0,THREAD+THREAD_DBCR0(r2)
855	andis.	r10,r0,DBCR0_IDM@h
856	bnel-	load_dbcr0
857#endif
858	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
859#ifdef CONFIG_PPC_BOOK3S_32
860	kuep_unlock	r10, r11
861#endif
862
863	b	restore
864
865/* N.B. the only way to get here is from the beq following ret_from_except. */
866resume_kernel:
867	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
868	lwz	r8,TI_FLAGS(r2)
869	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
870	beq+	1f
871
872	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
873
874	lwz	r3,GPR1(r1)
875	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
876	mr	r4,r1			/* src:  current exception frame */
877	mr	r1,r3			/* Reroute the trampoline frame to r1 */
878
879	/* Copy from the original to the trampoline. */
880	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
881	li	r6,0			/* start offset: 0 */
882	mtctr	r5
8832:	lwzx	r0,r6,r4
884	stwx	r0,r6,r3
885	addi	r6,r6,4
886	bdnz	2b
887
888	/* Do real store operation to complete stwu */
889	lwz	r5,GPR1(r1)
890	stw	r8,0(r5)
891
892	/* Clear _TIF_EMULATE_STACK_STORE flag */
893	lis	r11,_TIF_EMULATE_STACK_STORE@h
894	addi	r5,r2,TI_FLAGS
8950:	lwarx	r8,0,r5
896	andc	r8,r8,r11
897#ifdef CONFIG_IBM405_ERR77
898	dcbt	0,r5
899#endif
900	stwcx.	r8,0,r5
901	bne-	0b
9021:
903
904#ifdef CONFIG_PREEMPT
905	/* check current_thread_info->preempt_count */
906	lwz	r0,TI_PREEMPT(r2)
907	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
908	bne	restore_kuap
909	andi.	r8,r8,_TIF_NEED_RESCHED
910	beq+	restore_kuap
911	lwz	r3,_MSR(r1)
912	andi.	r0,r3,MSR_EE	/* interrupts off? */
913	beq	restore_kuap	/* don't schedule if so */
914#ifdef CONFIG_TRACE_IRQFLAGS
915	/* Lockdep thinks irqs are enabled, we need to call
916	 * preempt_schedule_irq with IRQs off, so we inform lockdep
917	 * now that we -did- turn them off already
918	 */
919	bl	trace_hardirqs_off
920#endif
921	bl	preempt_schedule_irq
922#ifdef CONFIG_TRACE_IRQFLAGS
923	/* And now, to properly rebalance the above, we tell lockdep they
924	 * are being turned back on, which will happen when we return
925	 */
926	bl	trace_hardirqs_on
927#endif
928#endif /* CONFIG_PREEMPT */
929restore_kuap:
930	kuap_restore r1, r2, r9, r10, r0
931
932	/* interrupts are hard-disabled at this point */
933restore:
934#ifdef CONFIG_44x
935BEGIN_MMU_FTR_SECTION
936	b	1f
937END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
938	lis	r4,icache_44x_need_flush@ha
939	lwz	r5,icache_44x_need_flush@l(r4)
940	cmplwi	cr0,r5,0
941	beq+	1f
942	li	r6,0
943	iccci	r0,r0
944	stw	r6,icache_44x_need_flush@l(r4)
9451:
946#endif  /* CONFIG_44x */
947
948	lwz	r9,_MSR(r1)
949#ifdef CONFIG_TRACE_IRQFLAGS
950	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
951	 * off in this assembly code while peeking at TI_FLAGS() and such. However
952	 * we need to inform it if the exception turned interrupts off, and we
953	 * are about to trun them back on.
954	 */
955	andi.	r10,r9,MSR_EE
956	beq	1f
957	stwu	r1,-32(r1)
958	mflr	r0
959	stw	r0,4(r1)
960	bl	trace_hardirqs_on
961	addi	r1, r1, 32
962	lwz	r9,_MSR(r1)
9631:
964#endif /* CONFIG_TRACE_IRQFLAGS */
965
966	lwz	r0,GPR0(r1)
967	lwz	r2,GPR2(r1)
968	REST_4GPRS(3, r1)
969	REST_2GPRS(7, r1)
970
971	lwz	r10,_XER(r1)
972	lwz	r11,_CTR(r1)
973	mtspr	SPRN_XER,r10
974	mtctr	r11
975
976	PPC405_ERR77(0,r1)
977BEGIN_FTR_SECTION
978	lwarx	r11,0,r1
979END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
980	stwcx.	r0,0,r1			/* to clear the reservation */
981
982#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
983	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
984	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
985
986	lwz	r10,_CCR(r1)
987	lwz	r11,_LINK(r1)
988	mtcrf	0xFF,r10
989	mtlr	r11
990
991	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
992	li	r10, 0
993	stw	r10, 8(r1)
994	/*
995	 * Once we put values in SRR0 and SRR1, we are in a state
996	 * where exceptions are not recoverable, since taking an
997	 * exception will trash SRR0 and SRR1.  Therefore we clear the
998	 * MSR:RI bit to indicate this.  If we do take an exception,
999	 * we can't return to the point of the exception but we
1000	 * can restart the exception exit path at the label
1001	 * exc_exit_restart below.  -- paulus
1002	 */
1003	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
1004	SYNC
1005	MTMSRD(r10)		/* clear the RI bit */
1006	.globl exc_exit_restart
1007exc_exit_restart:
1008	lwz	r12,_NIP(r1)
1009	mtspr	SPRN_SRR0,r12
1010	mtspr	SPRN_SRR1,r9
1011	REST_4GPRS(9, r1)
1012	lwz	r1,GPR1(r1)
1013	.globl exc_exit_restart_end
1014exc_exit_restart_end:
1015	SYNC
1016	RFI
1017
1018#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1019	/*
1020	 * This is a bit different on 4xx/Book-E because it doesn't have
1021	 * the RI bit in the MSR.
1022	 * The TLB miss handler checks if we have interrupted
1023	 * the exception exit path and restarts it if so
1024	 * (well maybe one day it will... :).
1025	 */
1026	lwz	r11,_LINK(r1)
1027	mtlr	r11
1028	lwz	r10,_CCR(r1)
1029	mtcrf	0xff,r10
1030	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1031	li	r10, 0
1032	stw	r10, 8(r1)
1033	REST_2GPRS(9, r1)
1034	.globl exc_exit_restart
1035exc_exit_restart:
1036	lwz	r11,_NIP(r1)
1037	lwz	r12,_MSR(r1)
1038exc_exit_start:
1039	mtspr	SPRN_SRR0,r11
1040	mtspr	SPRN_SRR1,r12
1041	REST_2GPRS(11, r1)
1042	lwz	r1,GPR1(r1)
1043	.globl exc_exit_restart_end
1044exc_exit_restart_end:
1045	PPC405_ERR77_SYNC
1046	rfi
1047	b	.			/* prevent prefetch past rfi */
1048
1049/*
1050 * Returning from a critical interrupt in user mode doesn't need
1051 * to be any different from a normal exception.  For a critical
1052 * interrupt in the kernel, we just return (without checking for
1053 * preemption) since the interrupt may have happened at some crucial
1054 * place (e.g. inside the TLB miss handler), and because we will be
1055 * running with r1 pointing into critical_stack, not the current
1056 * process's kernel stack (and therefore current_thread_info() will
1057 * give the wrong answer).
1058 * We have to restore various SPRs that may have been in use at the
1059 * time of the critical interrupt.
1060 *
1061 */
1062#ifdef CONFIG_40x
1063#define PPC_40x_TURN_OFF_MSR_DR						    \
1064	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1065	 * assume the instructions here are mapped by a pinned TLB entry */ \
1066	li	r10,MSR_IR;						    \
1067	mtmsr	r10;							    \
1068	isync;								    \
1069	tophys(r1, r1);
1070#else
1071#define PPC_40x_TURN_OFF_MSR_DR
1072#endif
1073
1074#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1075	REST_NVGPRS(r1);						\
1076	lwz	r3,_MSR(r1);						\
1077	andi.	r3,r3,MSR_PR;						\
1078	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL);				\
1079	bne	user_exc_return;					\
1080	lwz	r0,GPR0(r1);						\
1081	lwz	r2,GPR2(r1);						\
1082	REST_4GPRS(3, r1);						\
1083	REST_2GPRS(7, r1);						\
1084	lwz	r10,_XER(r1);						\
1085	lwz	r11,_CTR(r1);						\
1086	mtspr	SPRN_XER,r10;						\
1087	mtctr	r11;							\
1088	PPC405_ERR77(0,r1);						\
1089	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1090	lwz	r11,_LINK(r1);						\
1091	mtlr	r11;							\
1092	lwz	r10,_CCR(r1);						\
1093	mtcrf	0xff,r10;						\
1094	PPC_40x_TURN_OFF_MSR_DR;					\
1095	lwz	r9,_DEAR(r1);						\
1096	lwz	r10,_ESR(r1);						\
1097	mtspr	SPRN_DEAR,r9;						\
1098	mtspr	SPRN_ESR,r10;						\
1099	lwz	r11,_NIP(r1);						\
1100	lwz	r12,_MSR(r1);						\
1101	mtspr	exc_lvl_srr0,r11;					\
1102	mtspr	exc_lvl_srr1,r12;					\
1103	lwz	r9,GPR9(r1);						\
1104	lwz	r12,GPR12(r1);						\
1105	lwz	r10,GPR10(r1);						\
1106	lwz	r11,GPR11(r1);						\
1107	lwz	r1,GPR1(r1);						\
1108	PPC405_ERR77_SYNC;						\
1109	exc_lvl_rfi;							\
1110	b	.;		/* prevent prefetch past exc_lvl_rfi */
1111
1112#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1113	lwz	r9,_##exc_lvl_srr0(r1);					\
1114	lwz	r10,_##exc_lvl_srr1(r1);				\
1115	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1116	mtspr	SPRN_##exc_lvl_srr1,r10;
1117
1118#if defined(CONFIG_PPC_BOOK3E_MMU)
1119#ifdef CONFIG_PHYS_64BIT
1120#define	RESTORE_MAS7							\
1121	lwz	r11,MAS7(r1);						\
1122	mtspr	SPRN_MAS7,r11;
1123#else
1124#define	RESTORE_MAS7
1125#endif /* CONFIG_PHYS_64BIT */
1126#define RESTORE_MMU_REGS						\
1127	lwz	r9,MAS0(r1);						\
1128	lwz	r10,MAS1(r1);						\
1129	lwz	r11,MAS2(r1);						\
1130	mtspr	SPRN_MAS0,r9;						\
1131	lwz	r9,MAS3(r1);						\
1132	mtspr	SPRN_MAS1,r10;						\
1133	lwz	r10,MAS6(r1);						\
1134	mtspr	SPRN_MAS2,r11;						\
1135	mtspr	SPRN_MAS3,r9;						\
1136	mtspr	SPRN_MAS6,r10;						\
1137	RESTORE_MAS7;
1138#elif defined(CONFIG_44x)
1139#define RESTORE_MMU_REGS						\
1140	lwz	r9,MMUCR(r1);						\
1141	mtspr	SPRN_MMUCR,r9;
1142#else
1143#define RESTORE_MMU_REGS
1144#endif
1145
1146#ifdef CONFIG_40x
1147	.globl	ret_from_crit_exc
1148ret_from_crit_exc:
1149	mfspr	r9,SPRN_SPRG_THREAD
1150	lis	r10,saved_ksp_limit@ha;
1151	lwz	r10,saved_ksp_limit@l(r10);
1152	tovirt(r9,r9);
1153	stw	r10,KSP_LIMIT(r9)
1154	lis	r9,crit_srr0@ha;
1155	lwz	r9,crit_srr0@l(r9);
1156	lis	r10,crit_srr1@ha;
1157	lwz	r10,crit_srr1@l(r10);
1158	mtspr	SPRN_SRR0,r9;
1159	mtspr	SPRN_SRR1,r10;
1160	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1161#endif /* CONFIG_40x */
1162
1163#ifdef CONFIG_BOOKE
1164	.globl	ret_from_crit_exc
1165ret_from_crit_exc:
1166	mfspr	r9,SPRN_SPRG_THREAD
1167	lwz	r10,SAVED_KSP_LIMIT(r1)
1168	stw	r10,KSP_LIMIT(r9)
1169	RESTORE_xSRR(SRR0,SRR1);
1170	RESTORE_MMU_REGS;
1171	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1172
1173	.globl	ret_from_debug_exc
1174ret_from_debug_exc:
1175	mfspr	r9,SPRN_SPRG_THREAD
1176	lwz	r10,SAVED_KSP_LIMIT(r1)
1177	stw	r10,KSP_LIMIT(r9)
1178	RESTORE_xSRR(SRR0,SRR1);
1179	RESTORE_xSRR(CSRR0,CSRR1);
1180	RESTORE_MMU_REGS;
1181	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1182
1183	.globl	ret_from_mcheck_exc
1184ret_from_mcheck_exc:
1185	mfspr	r9,SPRN_SPRG_THREAD
1186	lwz	r10,SAVED_KSP_LIMIT(r1)
1187	stw	r10,KSP_LIMIT(r9)
1188	RESTORE_xSRR(SRR0,SRR1);
1189	RESTORE_xSRR(CSRR0,CSRR1);
1190	RESTORE_xSRR(DSRR0,DSRR1);
1191	RESTORE_MMU_REGS;
1192	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1193#endif /* CONFIG_BOOKE */
1194
1195/*
1196 * Load the DBCR0 value for a task that is being ptraced,
1197 * having first saved away the global DBCR0.  Note that r0
1198 * has the dbcr0 value to set upon entry to this.
1199 */
1200load_dbcr0:
1201	mfmsr	r10		/* first disable debug exceptions */
1202	rlwinm	r10,r10,0,~MSR_DE
1203	mtmsr	r10
1204	isync
1205	mfspr	r10,SPRN_DBCR0
1206	lis	r11,global_dbcr0@ha
1207	addi	r11,r11,global_dbcr0@l
1208#ifdef CONFIG_SMP
1209	lwz	r9,TASK_CPU(r2)
1210	slwi	r9,r9,3
1211	add	r11,r11,r9
1212#endif
1213	stw	r10,0(r11)
1214	mtspr	SPRN_DBCR0,r0
1215	lwz	r10,4(r11)
1216	addi	r10,r10,1
1217	stw	r10,4(r11)
1218	li	r11,-1
1219	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1220	blr
1221
1222	.section .bss
1223	.align	4
1224	.global global_dbcr0
1225global_dbcr0:
1226	.space	8*NR_CPUS
1227	.previous
1228#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1229
1230do_work:			/* r10 contains MSR_KERNEL here */
1231	andi.	r0,r9,_TIF_NEED_RESCHED
1232	beq	do_user_signal
1233
1234do_resched:			/* r10 contains MSR_KERNEL here */
1235#ifdef CONFIG_TRACE_IRQFLAGS
1236	bl	trace_hardirqs_on
1237	mfmsr	r10
1238#endif
1239	ori	r10,r10,MSR_EE
1240	SYNC
1241	MTMSRD(r10)		/* hard-enable interrupts */
1242	bl	schedule
1243recheck:
1244	/* Note: And we don't tell it we are disabling them again
1245	 * neither. Those disable/enable cycles used to peek at
1246	 * TI_FLAGS aren't advertised.
1247	 */
1248	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
1249	SYNC
1250	MTMSRD(r10)		/* disable interrupts */
1251	lwz	r9,TI_FLAGS(r2)
1252	andi.	r0,r9,_TIF_NEED_RESCHED
1253	bne-	do_resched
1254	andi.	r0,r9,_TIF_USER_WORK_MASK
1255	beq	restore_user
1256do_user_signal:			/* r10 contains MSR_KERNEL here */
1257	ori	r10,r10,MSR_EE
1258	SYNC
1259	MTMSRD(r10)		/* hard-enable interrupts */
1260	/* save r13-r31 in the exception frame, if not already done */
1261	lwz	r3,_TRAP(r1)
1262	andi.	r0,r3,1
1263	beq	2f
1264	SAVE_NVGPRS(r1)
1265	rlwinm	r3,r3,0,0,30
1266	stw	r3,_TRAP(r1)
12672:	addi	r3,r1,STACK_FRAME_OVERHEAD
1268	mr	r4,r9
1269	bl	do_notify_resume
1270	REST_NVGPRS(r1)
1271	b	recheck
1272
1273/*
1274 * We come here when we are at the end of handling an exception
1275 * that occurred at a place where taking an exception will lose
1276 * state information, such as the contents of SRR0 and SRR1.
1277 */
1278nonrecoverable:
1279	lis	r10,exc_exit_restart_end@ha
1280	addi	r10,r10,exc_exit_restart_end@l
1281	cmplw	r12,r10
1282#ifdef CONFIG_PPC_BOOK3S_601
1283	bgelr
1284#else
1285	bge	3f
1286#endif
1287	lis	r11,exc_exit_restart@ha
1288	addi	r11,r11,exc_exit_restart@l
1289	cmplw	r12,r11
1290#ifdef CONFIG_PPC_BOOK3S_601
1291	bltlr
1292#else
1293	blt	3f
1294#endif
1295	lis	r10,ee_restarts@ha
1296	lwz	r12,ee_restarts@l(r10)
1297	addi	r12,r12,1
1298	stw	r12,ee_restarts@l(r10)
1299	mr	r12,r11		/* restart at exc_exit_restart */
1300	blr
13013:	/* OK, we can't recover, kill this process */
1302	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1303	lwz	r3,_TRAP(r1)
1304	andi.	r0,r3,1
1305	beq	5f
1306	SAVE_NVGPRS(r1)
1307	rlwinm	r3,r3,0,0,30
1308	stw	r3,_TRAP(r1)
13095:	mfspr	r2,SPRN_SPRG_THREAD
1310	addi	r2,r2,-THREAD
1311	tovirt(r2,r2)			/* set back r2 to current */
13124:	addi	r3,r1,STACK_FRAME_OVERHEAD
1313	bl	unrecoverable_exception
1314	/* shouldn't return */
1315	b	4b
1316
1317	.section .bss
1318	.align	2
1319ee_restarts:
1320	.space	4
1321	.previous
1322
1323/*
1324 * PROM code for specific machines follows.  Put it
1325 * here so it's easy to add arch-specific sections later.
1326 * -- Cort
1327 */
1328#ifdef CONFIG_PPC_RTAS
1329/*
1330 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1331 * called with the MMU off.
1332 */
1333_GLOBAL(enter_rtas)
1334	stwu	r1,-INT_FRAME_SIZE(r1)
1335	mflr	r0
1336	stw	r0,INT_FRAME_SIZE+4(r1)
1337	LOAD_REG_ADDR(r4, rtas)
1338	lis	r6,1f@ha	/* physical return address for rtas */
1339	addi	r6,r6,1f@l
1340	tophys(r6,r6)
1341	tophys(r7,r1)
1342	lwz	r8,RTASENTRY(r4)
1343	lwz	r4,RTASBASE(r4)
1344	mfmsr	r9
1345	stw	r9,8(r1)
1346	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
1347	SYNC			/* disable interrupts so SRR0/1 */
1348	MTMSRD(r0)		/* don't get trashed */
1349	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1350	mtlr	r6
1351	stw	r7, THREAD + RTAS_SP(r2)
1352	mtspr	SPRN_SRR0,r8
1353	mtspr	SPRN_SRR1,r9
1354	RFI
13551:	tophys(r9,r1)
1356	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1357	lwz	r9,8(r9)	/* original msr value */
1358	addi	r1,r1,INT_FRAME_SIZE
1359	li	r0,0
1360	tophys(r7, r2)
1361	stw	r0, THREAD + RTAS_SP(r7)
1362	mtspr	SPRN_SRR0,r8
1363	mtspr	SPRN_SRR1,r9
1364	RFI			/* return to caller */
1365
1366	.globl	machine_check_in_rtas
1367machine_check_in_rtas:
1368	twi	31,0,0
1369	/* XXX load up BATs and panic */
1370
1371#endif /* CONFIG_PPC_RTAS */
1372