• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
7 *  Adapted for Power Macintosh by Paul Mackerras.
8 *  Low-level exception handlers and MMU support
9 *  rewritten by Paul Mackerras.
10 *    Copyright (C) 1996 Paul Mackerras.
11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains the system call entry code, context switch
14 *  code, and exception/interrupt return code for PowerPC.
15 */
16
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/sys.h>
20#include <linux/threads.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/thread_info.h>
26#include <asm/ppc_asm.h>
27#include <asm/asm-offsets.h>
28#include <asm/unistd.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/feature-fixups.h>
32#include <asm/barrier.h>
33#include <asm/kup.h>
34#include <asm/bug.h>
35
36#include "head_32.h"
37
38/*
39 * powerpc relies on return from interrupt/syscall being context synchronising
40 * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
41 * synchronisation instructions.
42 */
43
44/*
45 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
46 * fit into one page in order to not encounter a TLB miss between the
47 * modification of srr0/srr1 and the associated rfi.
48 */
49	.align	12
50
51#ifdef CONFIG_BOOKE
52	.globl	mcheck_transfer_to_handler
53mcheck_transfer_to_handler:
54	mfspr	r0,SPRN_DSRR0
55	stw	r0,_DSRR0(r11)
56	mfspr	r0,SPRN_DSRR1
57	stw	r0,_DSRR1(r11)
58	/* fall through */
59_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
60
61	.globl	debug_transfer_to_handler
62debug_transfer_to_handler:
63	mfspr	r0,SPRN_CSRR0
64	stw	r0,_CSRR0(r11)
65	mfspr	r0,SPRN_CSRR1
66	stw	r0,_CSRR1(r11)
67	/* fall through */
68_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
69
70	.globl	crit_transfer_to_handler
71crit_transfer_to_handler:
72#ifdef CONFIG_PPC_BOOK3E_MMU
73	mfspr	r0,SPRN_MAS0
74	stw	r0,MAS0(r11)
75	mfspr	r0,SPRN_MAS1
76	stw	r0,MAS1(r11)
77	mfspr	r0,SPRN_MAS2
78	stw	r0,MAS2(r11)
79	mfspr	r0,SPRN_MAS3
80	stw	r0,MAS3(r11)
81	mfspr	r0,SPRN_MAS6
82	stw	r0,MAS6(r11)
83#ifdef CONFIG_PHYS_64BIT
84	mfspr	r0,SPRN_MAS7
85	stw	r0,MAS7(r11)
86#endif /* CONFIG_PHYS_64BIT */
87#endif /* CONFIG_PPC_BOOK3E_MMU */
88#ifdef CONFIG_44x
89	mfspr	r0,SPRN_MMUCR
90	stw	r0,MMUCR(r11)
91#endif
92	mfspr	r0,SPRN_SRR0
93	stw	r0,_SRR0(r11)
94	mfspr	r0,SPRN_SRR1
95	stw	r0,_SRR1(r11)
96
97	/* set the stack limit to the current stack */
98	mfspr	r8,SPRN_SPRG_THREAD
99	lwz	r0,KSP_LIMIT(r8)
100	stw	r0,SAVED_KSP_LIMIT(r11)
101	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
102	stw	r0,KSP_LIMIT(r8)
103	/* fall through */
104_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
105#endif
106
107#ifdef CONFIG_40x
108	.globl	crit_transfer_to_handler
109crit_transfer_to_handler:
110	lwz	r0,crit_r10@l(0)
111	stw	r0,GPR10(r11)
112	lwz	r0,crit_r11@l(0)
113	stw	r0,GPR11(r11)
114	mfspr	r0,SPRN_SRR0
115	stw	r0,crit_srr0@l(0)
116	mfspr	r0,SPRN_SRR1
117	stw	r0,crit_srr1@l(0)
118
119	/* set the stack limit to the current stack */
120	mfspr	r8,SPRN_SPRG_THREAD
121	lwz	r0,KSP_LIMIT(r8)
122	stw	r0,saved_ksp_limit@l(0)
123	rlwinm	r0,r1,0,0,(31 - THREAD_SHIFT)
124	stw	r0,KSP_LIMIT(r8)
125	/* fall through */
126_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
127#endif
128
129/*
130 * This code finishes saving the registers to the exception frame
131 * and jumps to the appropriate handler for the exception, turning
132 * on address translation.
133 * Note that we rely on the caller having set cr0.eq iff the exception
134 * occurred in kernel mode (i.e. MSR:PR = 0).
135 */
136	.globl	transfer_to_handler_full
137transfer_to_handler_full:
138	SAVE_NVGPRS(r11)
139_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
140	/* fall through */
141
142	.globl	transfer_to_handler
143transfer_to_handler:
144	stw	r2,GPR2(r11)
145	stw	r12,_NIP(r11)
146	stw	r9,_MSR(r11)
147	andi.	r2,r9,MSR_PR
148	mfctr	r12
149	mfspr	r2,SPRN_XER
150	stw	r12,_CTR(r11)
151	stw	r2,_XER(r11)
152	mfspr	r12,SPRN_SPRG_THREAD
153	tovirt_vmstack r12, r12
154	beq	2f			/* if from user, fix up THREAD.regs */
155	addi	r2, r12, -THREAD
156	addi	r11,r1,STACK_FRAME_OVERHEAD
157	stw	r11,PT_REGS(r12)
158#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
159	/* Check to see if the dbcr0 register is set up to debug.  Use the
160	   internal debug mode bit to do this. */
161	lwz	r12,THREAD_DBCR0(r12)
162	andis.	r12,r12,DBCR0_IDM@h
163#endif
164	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
165#ifdef CONFIG_PPC_BOOK3S_32
166	kuep_lock r11, r12
167#endif
168#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
169	beq+	3f
170	/* From user and task is ptraced - load up global dbcr0 */
171	li	r12,-1			/* clear all pending debug events */
172	mtspr	SPRN_DBSR,r12
173	lis	r11,global_dbcr0@ha
174	tophys(r11,r11)
175	addi	r11,r11,global_dbcr0@l
176#ifdef CONFIG_SMP
177	lwz	r9,TASK_CPU(r2)
178	slwi	r9,r9,3
179	add	r11,r11,r9
180#endif
181	lwz	r12,0(r11)
182	mtspr	SPRN_DBCR0,r12
183	lwz	r12,4(r11)
184	addi	r12,r12,-1
185	stw	r12,4(r11)
186#endif
187
188	b	3f
189
1902:	/* if from kernel, check interrupted DOZE/NAP mode and
191         * check for stack overflow
192         */
193	kuap_save_and_lock r11, r12, r9, r2, r6
194	addi	r2, r12, -THREAD
195#ifndef CONFIG_VMAP_STACK
196	lwz	r9,KSP_LIMIT(r12)
197	cmplw	r1,r9			/* if r1 <= ksp_limit */
198	ble-	stack_ovf		/* then the kernel stack overflowed */
199#endif
2005:
201#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
202	lwz	r12,TI_LOCAL_FLAGS(r2)
203	mtcrf	0x01,r12
204	bt-	31-TLF_NAPPING,4f
205	bt-	31-TLF_SLEEPING,7f
206#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
207	.globl transfer_to_handler_cont
208transfer_to_handler_cont:
2093:
210	mflr	r9
211	tovirt_novmstack r2, r2 	/* set r2 to current */
212	tovirt_vmstack r9, r9
213	lwz	r11,0(r9)		/* virtual address of handler */
214	lwz	r9,4(r9)		/* where to go when done */
215#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
216	mtspr	SPRN_NRI, r0
217#endif
218#ifdef CONFIG_TRACE_IRQFLAGS
219	/*
220	 * When tracing IRQ state (lockdep) we enable the MMU before we call
221	 * the IRQ tracing functions as they might access vmalloc space or
222	 * perform IOs for console output.
223	 *
224	 * To speed up the syscall path where interrupts stay on, let's check
225	 * first if we are changing the MSR value at all.
226	 */
227	tophys_novmstack r12, r1
228	lwz	r12,_MSR(r12)
229	andi.	r12,r12,MSR_EE
230	bne	1f
231
232	/* MSR isn't changing, just transition directly */
233#endif
234	mtspr	SPRN_SRR0,r11
235	mtspr	SPRN_SRR1,r10
236	mtlr	r9
237	RFI				/* jump to handler, enable MMU */
238
239#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
2404:	rlwinm	r12,r12,0,~_TLF_NAPPING
241	stw	r12,TI_LOCAL_FLAGS(r2)
242	b	power_save_ppc32_restore
243
2447:	rlwinm	r12,r12,0,~_TLF_SLEEPING
245	stw	r12,TI_LOCAL_FLAGS(r2)
246	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
247	rlwinm	r9,r9,0,~MSR_EE
248	lwz	r12,_LINK(r11)		/* and return to address in LR */
249	kuap_restore r11, r2, r3, r4, r5
250	lwz	r2, GPR2(r11)
251	b	fast_exception_return
252#endif
253_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
254_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
255
256#ifdef CONFIG_TRACE_IRQFLAGS
2571:	/* MSR is changing, re-enable MMU so we can notify lockdep. We need to
258	 * keep interrupts disabled at this point otherwise we might risk
259	 * taking an interrupt before we tell lockdep they are enabled.
260	 */
261	lis	r12,reenable_mmu@h
262	ori	r12,r12,reenable_mmu@l
263	LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
264	mtspr	SPRN_SRR0,r12
265	mtspr	SPRN_SRR1,r0
266	RFI
267
268reenable_mmu:
269	/*
270	 * We save a bunch of GPRs,
271	 * r3 can be different from GPR3(r1) at this point, r9 and r11
272	 * contains the old MSR and handler address respectively,
273	 * r4 & r5 can contain page fault arguments that need to be passed
274	 * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left
275	 * clobbered as they aren't useful past this point.
276	 */
277
278	stwu	r1,-32(r1)
279	stw	r9,8(r1)
280	stw	r11,12(r1)
281	stw	r3,16(r1)
282	stw	r4,20(r1)
283	stw	r5,24(r1)
284
285	/* If we are disabling interrupts (normal case), simply log it with
286	 * lockdep
287	 */
2881:	bl	trace_hardirqs_off
289	lwz	r5,24(r1)
290	lwz	r4,20(r1)
291	lwz	r3,16(r1)
292	lwz	r11,12(r1)
293	lwz	r9,8(r1)
294	addi	r1,r1,32
295	mtctr	r11
296	mtlr	r9
297	bctr				/* jump to handler */
298#endif /* CONFIG_TRACE_IRQFLAGS */
299
300#ifndef CONFIG_VMAP_STACK
301/*
302 * On kernel stack overflow, load up an initial stack pointer
303 * and call StackOverflow(regs), which should not return.
304 */
305stack_ovf:
306	/* sometimes we use a statically-allocated stack, which is OK. */
307	lis	r12,_end@h
308	ori	r12,r12,_end@l
309	cmplw	r1,r12
310	ble	5b			/* r1 <= &_end is OK */
311	SAVE_NVGPRS(r11)
312	addi	r3,r1,STACK_FRAME_OVERHEAD
313	lis	r1,init_thread_union@ha
314	addi	r1,r1,init_thread_union@l
315	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
316	lis	r9,StackOverflow@ha
317	addi	r9,r9,StackOverflow@l
318	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
319#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
320	mtspr	SPRN_NRI, r0
321#endif
322	mtspr	SPRN_SRR0,r9
323	mtspr	SPRN_SRR1,r10
324	RFI
325_ASM_NOKPROBE_SYMBOL(stack_ovf)
326#endif
327
328#ifdef CONFIG_TRACE_IRQFLAGS
329trace_syscall_entry_irq_off:
330	/*
331	 * Syscall shouldn't happen while interrupts are disabled,
332	 * so let's do a warning here.
333	 */
3340:	trap
335	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
336	bl	trace_hardirqs_on
337
338	/* Now enable for real */
339	LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
340	mtmsr	r10
341
342	REST_GPR(0, r1)
343	REST_4GPRS(3, r1)
344	REST_2GPRS(7, r1)
345	b	DoSyscall
346#endif /* CONFIG_TRACE_IRQFLAGS */
347
348	.globl	transfer_to_syscall
349transfer_to_syscall:
350#ifdef CONFIG_PPC_BOOK3S_32
351	kuep_lock r11, r12
352#endif
353#ifdef CONFIG_TRACE_IRQFLAGS
354	andi.	r12,r9,MSR_EE
355	beq-	trace_syscall_entry_irq_off
356#endif /* CONFIG_TRACE_IRQFLAGS */
357
358/*
359 * Handle a system call.
360 */
361	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
362	.stabs	"entry_32.S",N_SO,0,0,0f
3630:
364
365_GLOBAL(DoSyscall)
366	stw	r3,ORIG_GPR3(r1)
367	li	r12,0
368	stw	r12,RESULT(r1)
369#ifdef CONFIG_TRACE_IRQFLAGS
370	/* Make sure interrupts are enabled */
371	mfmsr	r11
372	andi.	r12,r11,MSR_EE
373	/* We came in with interrupts disabled, we WARN and mark them enabled
374	 * for lockdep now */
3750:	tweqi	r12, 0
376	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
377#endif /* CONFIG_TRACE_IRQFLAGS */
378	lwz	r11,TI_FLAGS(r2)
379	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
380	bne-	syscall_dotrace
381syscall_dotrace_cont:
382	cmplwi	0,r0,NR_syscalls
383	lis	r10,sys_call_table@h
384	ori	r10,r10,sys_call_table@l
385	slwi	r0,r0,2
386	bge-	66f
387
388	barrier_nospec_asm
389	/*
390	 * Prevent the load of the handler below (based on the user-passed
391	 * system call number) being speculatively executed until the test
392	 * against NR_syscalls and branch to .66f above has
393	 * committed.
394	 */
395
396	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
397	mtlr	r10
398	addi	r9,r1,STACK_FRAME_OVERHEAD
399	PPC440EP_ERR42
400	blrl			/* Call handler */
401	.globl	ret_from_syscall
402ret_from_syscall:
403#ifdef CONFIG_DEBUG_RSEQ
404	/* Check whether the syscall is issued inside a restartable sequence */
405	stw	r3,GPR3(r1)
406	addi    r3,r1,STACK_FRAME_OVERHEAD
407	bl      rseq_syscall
408	lwz	r3,GPR3(r1)
409#endif
410	mr	r6,r3
411	/* disable interrupts so current_thread_info()->flags can't change */
412	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
413	/* Note: We don't bother telling lockdep about it */
414	mtmsr	r10
415	lwz	r9,TI_FLAGS(r2)
416	li	r8,-MAX_ERRNO
417	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
418	bne-	syscall_exit_work
419	cmplw	0,r3,r8
420	blt+	syscall_exit_cont
421	lwz	r11,_CCR(r1)			/* Load CR */
422	neg	r3,r3
423	oris	r11,r11,0x1000	/* Set SO bit in CR */
424	stw	r11,_CCR(r1)
425syscall_exit_cont:
426	lwz	r8,_MSR(r1)
427#ifdef CONFIG_TRACE_IRQFLAGS
428	/* If we are going to return from the syscall with interrupts
429	 * off, we trace that here. It shouldn't normally happen.
430	 */
431	andi.	r10,r8,MSR_EE
432	bne+	1f
433	stw	r3,GPR3(r1)
434	bl      trace_hardirqs_off
435	lwz	r3,GPR3(r1)
4361:
437#endif /* CONFIG_TRACE_IRQFLAGS */
438#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
439	/* If the process has its own DBCR0 value, load it up.  The internal
440	   debug mode bit tells us that dbcr0 should be loaded. */
441	lwz	r0,THREAD+THREAD_DBCR0(r2)
442	andis.	r10,r0,DBCR0_IDM@h
443	bnel-	load_dbcr0
444#endif
445#ifdef CONFIG_44x
446BEGIN_MMU_FTR_SECTION
447	lis	r4,icache_44x_need_flush@ha
448	lwz	r5,icache_44x_need_flush@l(r4)
449	cmplwi	cr0,r5,0
450	bne-	2f
4511:
452END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
453#endif /* CONFIG_44x */
454BEGIN_FTR_SECTION
455	lwarx	r7,0,r1
456END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
457	stwcx.	r0,0,r1			/* to clear the reservation */
458	ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
459#ifdef CONFIG_PPC_BOOK3S_32
460	kuep_unlock r5, r7
461#endif
462	kuap_check r2, r4
463	lwz	r4,_LINK(r1)
464	lwz	r5,_CCR(r1)
465	mtlr	r4
466	mtcr	r5
467	lwz	r7,_NIP(r1)
468	lwz	r2,GPR2(r1)
469	lwz	r1,GPR1(r1)
470syscall_exit_finish:
471#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
472	mtspr	SPRN_NRI, r0
473#endif
474	mtspr	SPRN_SRR0,r7
475	mtspr	SPRN_SRR1,r8
476	RFI
477_ASM_NOKPROBE_SYMBOL(syscall_exit_finish)
478#ifdef CONFIG_44x
4792:	li	r7,0
480	iccci	r0,r0
481	stw	r7,icache_44x_need_flush@l(r4)
482	b	1b
483#endif  /* CONFIG_44x */
484
48566:	li	r3,-ENOSYS
486	b	ret_from_syscall
487
488	.globl	ret_from_fork
489ret_from_fork:
490	REST_NVGPRS(r1)
491	bl	schedule_tail
492	li	r3,0
493	b	ret_from_syscall
494
495	.globl	ret_from_kernel_thread
496ret_from_kernel_thread:
497	REST_NVGPRS(r1)
498	bl	schedule_tail
499	mtlr	r14
500	mr	r3,r15
501	PPC440EP_ERR42
502	blrl
503	li	r3,0
504	b	ret_from_syscall
505
506/* Traced system call support */
507syscall_dotrace:
508	SAVE_NVGPRS(r1)
509	li	r0,0xc00
510	stw	r0,_TRAP(r1)
511	addi	r3,r1,STACK_FRAME_OVERHEAD
512	bl	do_syscall_trace_enter
513	/*
514	 * Restore argument registers possibly just changed.
515	 * We use the return value of do_syscall_trace_enter
516	 * for call number to look up in the table (r0).
517	 */
518	mr	r0,r3
519	lwz	r3,GPR3(r1)
520	lwz	r4,GPR4(r1)
521	lwz	r5,GPR5(r1)
522	lwz	r6,GPR6(r1)
523	lwz	r7,GPR7(r1)
524	lwz	r8,GPR8(r1)
525	REST_NVGPRS(r1)
526
527	cmplwi	r0,NR_syscalls
528	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
529	bge-	ret_from_syscall
530	b	syscall_dotrace_cont
531
532syscall_exit_work:
533	andi.	r0,r9,_TIF_RESTOREALL
534	beq+	0f
535	REST_NVGPRS(r1)
536	b	2f
5370:	cmplw	0,r3,r8
538	blt+	1f
539	andi.	r0,r9,_TIF_NOERROR
540	bne-	1f
541	lwz	r11,_CCR(r1)			/* Load CR */
542	neg	r3,r3
543	oris	r11,r11,0x1000	/* Set SO bit in CR */
544	stw	r11,_CCR(r1)
545
5461:	stw	r6,RESULT(r1)	/* Save result */
547	stw	r3,GPR3(r1)	/* Update return value */
5482:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
549	beq	4f
550
551	/* Clear per-syscall TIF flags if any are set.  */
552
553	li	r11,_TIF_PERSYSCALL_MASK
554	addi	r12,r2,TI_FLAGS
5553:	lwarx	r8,0,r12
556	andc	r8,r8,r11
557	stwcx.	r8,0,r12
558	bne-	3b
559
5604:	/* Anything which requires enabling interrupts? */
561	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
562	beq	ret_from_except
563
564	/* Re-enable interrupts. There is no need to trace that with
565	 * lockdep as we are supposed to have IRQs on at this point
566	 */
567	ori	r10,r10,MSR_EE
568	mtmsr	r10
569
570	/* Save NVGPRS if they're not saved already */
571	lwz	r4,_TRAP(r1)
572	andi.	r4,r4,1
573	beq	5f
574	SAVE_NVGPRS(r1)
575	li	r4,0xc00
576	stw	r4,_TRAP(r1)
5775:
578	addi	r3,r1,STACK_FRAME_OVERHEAD
579	bl	do_syscall_trace_leave
580	b	ret_from_except_full
581
582	/*
583	 * System call was called from kernel. We get here with SRR1 in r9.
584	 * Mark the exception as recoverable once we have retrieved SRR0,
585	 * trap a warning and return ENOSYS with CR[SO] set.
586	 */
587	.globl	ret_from_kernel_syscall
588ret_from_kernel_syscall:
589	mfspr	r9, SPRN_SRR0
590	mfspr	r10, SPRN_SRR1
591#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE)
592	LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR))
593	mtmsr	r11
594#endif
595
5960:	trap
597	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
598
599	li	r3, ENOSYS
600	crset	so
601#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
602	mtspr	SPRN_NRI, r0
603#endif
604	mtspr	SPRN_SRR0, r9
605	mtspr	SPRN_SRR1, r10
606	RFI
607_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall)
608
609/*
610 * The fork/clone functions need to copy the full register set into
611 * the child process. Therefore we need to save all the nonvolatile
612 * registers (r13 - r31) before calling the C code.
613 */
614	.globl	ppc_fork
615ppc_fork:
616	SAVE_NVGPRS(r1)
617	lwz	r0,_TRAP(r1)
618	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
619	stw	r0,_TRAP(r1)		/* register set saved */
620	b	sys_fork
621
622	.globl	ppc_vfork
623ppc_vfork:
624	SAVE_NVGPRS(r1)
625	lwz	r0,_TRAP(r1)
626	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
627	stw	r0,_TRAP(r1)		/* register set saved */
628	b	sys_vfork
629
630	.globl	ppc_clone
631ppc_clone:
632	SAVE_NVGPRS(r1)
633	lwz	r0,_TRAP(r1)
634	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
635	stw	r0,_TRAP(r1)		/* register set saved */
636	b	sys_clone
637
638	.globl	ppc_clone3
639ppc_clone3:
640	SAVE_NVGPRS(r1)
641	lwz	r0,_TRAP(r1)
642	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
643	stw	r0,_TRAP(r1)		/* register set saved */
644	b	sys_clone3
645
646	.globl	ppc_swapcontext
647ppc_swapcontext:
648	SAVE_NVGPRS(r1)
649	lwz	r0,_TRAP(r1)
650	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
651	stw	r0,_TRAP(r1)		/* register set saved */
652	b	sys_swapcontext
653
654/*
655 * Top-level page fault handling.
656 * This is in assembler because if do_page_fault tells us that
657 * it is a bad kernel page fault, we want to save the non-volatile
658 * registers before calling bad_page_fault.
659 */
660	.globl	handle_page_fault
661handle_page_fault:
662	addi	r3,r1,STACK_FRAME_OVERHEAD
663#ifdef CONFIG_PPC_BOOK3S_32
664	andis.  r0,r5,DSISR_DABRMATCH@h
665	bne-    handle_dabr_fault
666#endif
667	bl	do_page_fault
668	cmpwi	r3,0
669	beq+	ret_from_except
670	SAVE_NVGPRS(r1)
671	lwz	r0,_TRAP(r1)
672	clrrwi	r0,r0,1
673	stw	r0,_TRAP(r1)
674	mr	r5,r3
675	addi	r3,r1,STACK_FRAME_OVERHEAD
676	lwz	r4,_DAR(r1)
677	bl	bad_page_fault
678	b	ret_from_except_full
679
680#ifdef CONFIG_PPC_BOOK3S_32
681	/* We have a data breakpoint exception - handle it */
682handle_dabr_fault:
683	SAVE_NVGPRS(r1)
684	lwz	r0,_TRAP(r1)
685	clrrwi	r0,r0,1
686	stw	r0,_TRAP(r1)
687	bl      do_break
688	b	ret_from_except_full
689#endif
690
691/*
692 * This routine switches between two different tasks.  The process
693 * state of one is saved on its kernel stack.  Then the state
694 * of the other is restored from its kernel stack.  The memory
695 * management hardware is updated to the second process's state.
696 * Finally, we can return to the second process.
697 * On entry, r3 points to the THREAD for the current task, r4
698 * points to the THREAD for the new task.
699 *
700 * This routine is always called with interrupts disabled.
701 *
702 * Note: there are two ways to get to the "going out" portion
703 * of this code; either by coming in via the entry (_switch)
704 * or via "fork" which must set up an environment equivalent
705 * to the "_switch" path.  If you change this , you'll have to
706 * change the fork code also.
707 *
708 * The code which creates the new task context is in 'copy_thread'
709 * in arch/ppc/kernel/process.c
710 */
711_GLOBAL(_switch)
712	stwu	r1,-INT_FRAME_SIZE(r1)
713	mflr	r0
714	stw	r0,INT_FRAME_SIZE+4(r1)
715	/* r3-r12 are caller saved -- Cort */
716	SAVE_NVGPRS(r1)
717	stw	r0,_NIP(r1)	/* Return to switch caller */
718	mfmsr	r11
719	li	r0,MSR_FP	/* Disable floating-point */
720#ifdef CONFIG_ALTIVEC
721BEGIN_FTR_SECTION
722	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
723	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
724	stw	r12,THREAD+THREAD_VRSAVE(r2)
725END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
726#endif /* CONFIG_ALTIVEC */
727#ifdef CONFIG_SPE
728BEGIN_FTR_SECTION
729	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
730	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
731	stw	r12,THREAD+THREAD_SPEFSCR(r2)
732END_FTR_SECTION_IFSET(CPU_FTR_SPE)
733#endif /* CONFIG_SPE */
734	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
735	beq+	1f
736	andc	r11,r11,r0
737	mtmsr	r11
738	isync
7391:	stw	r11,_MSR(r1)
740	mfcr	r10
741	stw	r10,_CCR(r1)
742	stw	r1,KSP(r3)	/* Set old stack pointer */
743
744	kuap_check r2, r0
745#ifdef CONFIG_SMP
746	/* We need a sync somewhere here to make sure that if the
747	 * previous task gets rescheduled on another CPU, it sees all
748	 * stores it has performed on this one.
749	 */
750	sync
751#endif /* CONFIG_SMP */
752
753	tophys(r0,r4)
754	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
755	lwz	r1,KSP(r4)	/* Load new stack pointer */
756
757	/* save the old current 'last' for return value */
758	mr	r3,r2
759	addi	r2,r4,-THREAD	/* Update current */
760
761#ifdef CONFIG_ALTIVEC
762BEGIN_FTR_SECTION
763	lwz	r0,THREAD+THREAD_VRSAVE(r2)
764	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
765END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
766#endif /* CONFIG_ALTIVEC */
767#ifdef CONFIG_SPE
768BEGIN_FTR_SECTION
769	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
770	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
771END_FTR_SECTION_IFSET(CPU_FTR_SPE)
772#endif /* CONFIG_SPE */
773
774	lwz	r0,_CCR(r1)
775	mtcrf	0xFF,r0
776	/* r3-r12 are destroyed -- Cort */
777	REST_NVGPRS(r1)
778
779	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
780	mtlr	r4
781	addi	r1,r1,INT_FRAME_SIZE
782	blr
783
784	.globl	fast_exception_return
785fast_exception_return:
786#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
787	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
788	beq	1f			/* if not, we've got problems */
789#endif
790
7912:	REST_4GPRS(3, r11)
792	lwz	r10,_CCR(r11)
793	REST_GPR(1, r11)
794	mtcr	r10
795	lwz	r10,_LINK(r11)
796	mtlr	r10
797	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
798	li	r10, 0
799	stw	r10, 8(r11)
800	REST_GPR(10, r11)
801#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
802	mtspr	SPRN_NRI, r0
803#endif
804	mtspr	SPRN_SRR1,r9
805	mtspr	SPRN_SRR0,r12
806	REST_GPR(9, r11)
807	REST_GPR(12, r11)
808	lwz	r11,GPR11(r11)
809	RFI
810_ASM_NOKPROBE_SYMBOL(fast_exception_return)
811
812#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
813/* check if the exception happened in a restartable section */
8141:	lis	r3,exc_exit_restart_end@ha
815	addi	r3,r3,exc_exit_restart_end@l
816	cmplw	r12,r3
817	bge	3f
818	lis	r4,exc_exit_restart@ha
819	addi	r4,r4,exc_exit_restart@l
820	cmplw	r12,r4
821	blt	3f
822	lis	r3,fee_restarts@ha
823	tophys(r3,r3)
824	lwz	r5,fee_restarts@l(r3)
825	addi	r5,r5,1
826	stw	r5,fee_restarts@l(r3)
827	mr	r12,r4		/* restart at exc_exit_restart */
828	b	2b
829
830	.section .bss
831	.align	2
832fee_restarts:
833	.space	4
834	.previous
835
836/* aargh, a nonrecoverable interrupt, panic */
837/* aargh, we don't know which trap this is */
8383:
839	li	r10,-1
840	stw	r10,_TRAP(r11)
841	addi	r3,r1,STACK_FRAME_OVERHEAD
842	lis	r10,MSR_KERNEL@h
843	ori	r10,r10,MSR_KERNEL@l
844	bl	transfer_to_handler_full
845	.long	unrecoverable_exception
846	.long	ret_from_except
847#endif
848
849	.globl	ret_from_except_full
850ret_from_except_full:
851	REST_NVGPRS(r1)
852	/* fall through */
853
854	.globl	ret_from_except
855ret_from_except:
856	/* Hard-disable interrupts so that current_thread_info()->flags
857	 * can't change between when we test it and when we return
858	 * from the interrupt. */
859	/* Note: We don't bother telling lockdep about it */
860	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
861	mtmsr	r10		/* disable interrupts */
862
863	lwz	r3,_MSR(r1)	/* Returning to user mode? */
864	andi.	r0,r3,MSR_PR
865	beq	resume_kernel
866
867user_exc_return:		/* r10 contains MSR_KERNEL here */
868	/* Check current_thread_info()->flags */
869	lwz	r9,TI_FLAGS(r2)
870	andi.	r0,r9,_TIF_USER_WORK_MASK
871	bne	do_work
872
873restore_user:
874#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
875	/* Check whether this process has its own DBCR0 value.  The internal
876	   debug mode bit tells us that dbcr0 should be loaded. */
877	lwz	r0,THREAD+THREAD_DBCR0(r2)
878	andis.	r10,r0,DBCR0_IDM@h
879	bnel-	load_dbcr0
880#endif
881	ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
882#ifdef CONFIG_PPC_BOOK3S_32
883	kuep_unlock	r10, r11
884#endif
885
886	b	restore
887
888/* N.B. the only way to get here is from the beq following ret_from_except. */
889resume_kernel:
890	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
891	lwz	r8,TI_FLAGS(r2)
892	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
893	beq+	1f
894
895	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
896
897	lwz	r3,GPR1(r1)
898	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
899	mr	r4,r1			/* src:  current exception frame */
900	mr	r1,r3			/* Reroute the trampoline frame to r1 */
901
902	/* Copy from the original to the trampoline. */
903	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
904	li	r6,0			/* start offset: 0 */
905	mtctr	r5
9062:	lwzx	r0,r6,r4
907	stwx	r0,r6,r3
908	addi	r6,r6,4
909	bdnz	2b
910
911	/* Do real store operation to complete stwu */
912	lwz	r5,GPR1(r1)
913	stw	r8,0(r5)
914
915	/* Clear _TIF_EMULATE_STACK_STORE flag */
916	lis	r11,_TIF_EMULATE_STACK_STORE@h
917	addi	r5,r2,TI_FLAGS
9180:	lwarx	r8,0,r5
919	andc	r8,r8,r11
920	stwcx.	r8,0,r5
921	bne-	0b
9221:
923
924#ifdef CONFIG_PREEMPTION
925	/* check current_thread_info->preempt_count */
926	lwz	r0,TI_PREEMPT(r2)
927	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
928	bne	restore_kuap
929	andi.	r8,r8,_TIF_NEED_RESCHED
930	beq+	restore_kuap
931	lwz	r3,_MSR(r1)
932	andi.	r0,r3,MSR_EE	/* interrupts off? */
933	beq	restore_kuap	/* don't schedule if so */
934#ifdef CONFIG_TRACE_IRQFLAGS
935	/* Lockdep thinks irqs are enabled, we need to call
936	 * preempt_schedule_irq with IRQs off, so we inform lockdep
937	 * now that we -did- turn them off already
938	 */
939	bl	trace_hardirqs_off
940#endif
941	bl	preempt_schedule_irq
942#ifdef CONFIG_TRACE_IRQFLAGS
943	/* And now, to properly rebalance the above, we tell lockdep they
944	 * are being turned back on, which will happen when we return
945	 */
946	bl	trace_hardirqs_on
947#endif
948#endif /* CONFIG_PREEMPTION */
949restore_kuap:
950	kuap_restore r1, r2, r9, r10, r0
951
952	/* interrupts are hard-disabled at this point */
953restore:
954#ifdef CONFIG_44x
955BEGIN_MMU_FTR_SECTION
956	b	1f
957END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
958	lis	r4,icache_44x_need_flush@ha
959	lwz	r5,icache_44x_need_flush@l(r4)
960	cmplwi	cr0,r5,0
961	beq+	1f
962	li	r6,0
963	iccci	r0,r0
964	stw	r6,icache_44x_need_flush@l(r4)
9651:
966#endif  /* CONFIG_44x */
967
968	lwz	r9,_MSR(r1)
969#ifdef CONFIG_TRACE_IRQFLAGS
970	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
971	 * off in this assembly code while peeking at TI_FLAGS() and such. However
972	 * we need to inform it if the exception turned interrupts off, and we
973	 * are about to trun them back on.
974	 */
975	andi.	r10,r9,MSR_EE
976	beq	1f
977	stwu	r1,-32(r1)
978	mflr	r0
979	stw	r0,4(r1)
980	bl	trace_hardirqs_on
981	addi	r1, r1, 32
982	lwz	r9,_MSR(r1)
9831:
984#endif /* CONFIG_TRACE_IRQFLAGS */
985
986	lwz	r0,GPR0(r1)
987	lwz	r2,GPR2(r1)
988	REST_4GPRS(3, r1)
989	REST_2GPRS(7, r1)
990
991	lwz	r10,_XER(r1)
992	lwz	r11,_CTR(r1)
993	mtspr	SPRN_XER,r10
994	mtctr	r11
995
996BEGIN_FTR_SECTION
997	lwarx	r11,0,r1
998END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
999	stwcx.	r0,0,r1			/* to clear the reservation */
1000
1001#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
1002	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
1003	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
1004
1005	lwz	r10,_CCR(r1)
1006	lwz	r11,_LINK(r1)
1007	mtcrf	0xFF,r10
1008	mtlr	r11
1009
1010	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1011	li	r10, 0
1012	stw	r10, 8(r1)
1013	/*
1014	 * Once we put values in SRR0 and SRR1, we are in a state
1015	 * where exceptions are not recoverable, since taking an
1016	 * exception will trash SRR0 and SRR1.  Therefore we clear the
1017	 * MSR:RI bit to indicate this.  If we do take an exception,
1018	 * we can't return to the point of the exception but we
1019	 * can restart the exception exit path at the label
1020	 * exc_exit_restart below.  -- paulus
1021	 */
1022	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
1023	mtmsr	r10		/* clear the RI bit */
1024	.globl exc_exit_restart
1025exc_exit_restart:
1026	lwz	r12,_NIP(r1)
1027	mtspr	SPRN_SRR0,r12
1028	mtspr	SPRN_SRR1,r9
1029	REST_4GPRS(9, r1)
1030	lwz	r1,GPR1(r1)
1031	.globl exc_exit_restart_end
1032exc_exit_restart_end:
1033	RFI
1034_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
1035_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end)
1036
1037#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1038	/*
1039	 * This is a bit different on 4xx/Book-E because it doesn't have
1040	 * the RI bit in the MSR.
1041	 * The TLB miss handler checks if we have interrupted
1042	 * the exception exit path and restarts it if so
1043	 * (well maybe one day it will... :).
1044	 */
1045	lwz	r11,_LINK(r1)
1046	mtlr	r11
1047	lwz	r10,_CCR(r1)
1048	mtcrf	0xff,r10
1049	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
1050	li	r10, 0
1051	stw	r10, 8(r1)
1052	REST_2GPRS(9, r1)
1053	.globl exc_exit_restart
1054exc_exit_restart:
1055	lwz	r11,_NIP(r1)
1056	lwz	r12,_MSR(r1)
1057	mtspr	SPRN_SRR0,r11
1058	mtspr	SPRN_SRR1,r12
1059	REST_2GPRS(11, r1)
1060	lwz	r1,GPR1(r1)
1061	.globl exc_exit_restart_end
1062exc_exit_restart_end:
1063	rfi
1064	b	.			/* prevent prefetch past rfi */
1065_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
1066
1067/*
1068 * Returning from a critical interrupt in user mode doesn't need
1069 * to be any different from a normal exception.  For a critical
1070 * interrupt in the kernel, we just return (without checking for
1071 * preemption) since the interrupt may have happened at some crucial
1072 * place (e.g. inside the TLB miss handler), and because we will be
1073 * running with r1 pointing into critical_stack, not the current
1074 * process's kernel stack (and therefore current_thread_info() will
1075 * give the wrong answer).
1076 * We have to restore various SPRs that may have been in use at the
1077 * time of the critical interrupt.
1078 *
1079 */
1080#ifdef CONFIG_40x
1081#define PPC_40x_TURN_OFF_MSR_DR						    \
1082	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1083	 * assume the instructions here are mapped by a pinned TLB entry */ \
1084	li	r10,MSR_IR;						    \
1085	mtmsr	r10;							    \
1086	isync;								    \
1087	tophys(r1, r1);
1088#else
1089#define PPC_40x_TURN_OFF_MSR_DR
1090#endif
1091
1092#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1093	REST_NVGPRS(r1);						\
1094	lwz	r3,_MSR(r1);						\
1095	andi.	r3,r3,MSR_PR;						\
1096	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL);				\
1097	bne	user_exc_return;					\
1098	lwz	r0,GPR0(r1);						\
1099	lwz	r2,GPR2(r1);						\
1100	REST_4GPRS(3, r1);						\
1101	REST_2GPRS(7, r1);						\
1102	lwz	r10,_XER(r1);						\
1103	lwz	r11,_CTR(r1);						\
1104	mtspr	SPRN_XER,r10;						\
1105	mtctr	r11;							\
1106	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1107	lwz	r11,_LINK(r1);						\
1108	mtlr	r11;							\
1109	lwz	r10,_CCR(r1);						\
1110	mtcrf	0xff,r10;						\
1111	PPC_40x_TURN_OFF_MSR_DR;					\
1112	lwz	r9,_DEAR(r1);						\
1113	lwz	r10,_ESR(r1);						\
1114	mtspr	SPRN_DEAR,r9;						\
1115	mtspr	SPRN_ESR,r10;						\
1116	lwz	r11,_NIP(r1);						\
1117	lwz	r12,_MSR(r1);						\
1118	mtspr	exc_lvl_srr0,r11;					\
1119	mtspr	exc_lvl_srr1,r12;					\
1120	lwz	r9,GPR9(r1);						\
1121	lwz	r12,GPR12(r1);						\
1122	lwz	r10,GPR10(r1);						\
1123	lwz	r11,GPR11(r1);						\
1124	lwz	r1,GPR1(r1);						\
1125	exc_lvl_rfi;							\
1126	b	.;		/* prevent prefetch past exc_lvl_rfi */
1127
1128#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1129	lwz	r9,_##exc_lvl_srr0(r1);					\
1130	lwz	r10,_##exc_lvl_srr1(r1);				\
1131	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1132	mtspr	SPRN_##exc_lvl_srr1,r10;
1133
1134#if defined(CONFIG_PPC_BOOK3E_MMU)
1135#ifdef CONFIG_PHYS_64BIT
1136#define	RESTORE_MAS7							\
1137	lwz	r11,MAS7(r1);						\
1138	mtspr	SPRN_MAS7,r11;
1139#else
1140#define	RESTORE_MAS7
1141#endif /* CONFIG_PHYS_64BIT */
1142#define RESTORE_MMU_REGS						\
1143	lwz	r9,MAS0(r1);						\
1144	lwz	r10,MAS1(r1);						\
1145	lwz	r11,MAS2(r1);						\
1146	mtspr	SPRN_MAS0,r9;						\
1147	lwz	r9,MAS3(r1);						\
1148	mtspr	SPRN_MAS1,r10;						\
1149	lwz	r10,MAS6(r1);						\
1150	mtspr	SPRN_MAS2,r11;						\
1151	mtspr	SPRN_MAS3,r9;						\
1152	mtspr	SPRN_MAS6,r10;						\
1153	RESTORE_MAS7;
1154#elif defined(CONFIG_44x)
1155#define RESTORE_MMU_REGS						\
1156	lwz	r9,MMUCR(r1);						\
1157	mtspr	SPRN_MMUCR,r9;
1158#else
1159#define RESTORE_MMU_REGS
1160#endif
1161
1162#ifdef CONFIG_40x
1163	.globl	ret_from_crit_exc
1164ret_from_crit_exc:
1165	mfspr	r9,SPRN_SPRG_THREAD
1166	lis	r10,saved_ksp_limit@ha;
1167	lwz	r10,saved_ksp_limit@l(r10);
1168	tovirt(r9,r9);
1169	stw	r10,KSP_LIMIT(r9)
1170	lis	r9,crit_srr0@ha;
1171	lwz	r9,crit_srr0@l(r9);
1172	lis	r10,crit_srr1@ha;
1173	lwz	r10,crit_srr1@l(r10);
1174	mtspr	SPRN_SRR0,r9;
1175	mtspr	SPRN_SRR1,r10;
1176	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1177_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
1178#endif /* CONFIG_40x */
1179
1180#ifdef CONFIG_BOOKE
1181	.globl	ret_from_crit_exc
1182ret_from_crit_exc:
1183	mfspr	r9,SPRN_SPRG_THREAD
1184	lwz	r10,SAVED_KSP_LIMIT(r1)
1185	stw	r10,KSP_LIMIT(r9)
1186	RESTORE_xSRR(SRR0,SRR1);
1187	RESTORE_MMU_REGS;
1188	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1189_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
1190
1191	.globl	ret_from_debug_exc
1192ret_from_debug_exc:
1193	mfspr	r9,SPRN_SPRG_THREAD
1194	lwz	r10,SAVED_KSP_LIMIT(r1)
1195	stw	r10,KSP_LIMIT(r9)
1196	RESTORE_xSRR(SRR0,SRR1);
1197	RESTORE_xSRR(CSRR0,CSRR1);
1198	RESTORE_MMU_REGS;
1199	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1200_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
1201
1202	.globl	ret_from_mcheck_exc
1203ret_from_mcheck_exc:
1204	mfspr	r9,SPRN_SPRG_THREAD
1205	lwz	r10,SAVED_KSP_LIMIT(r1)
1206	stw	r10,KSP_LIMIT(r9)
1207	RESTORE_xSRR(SRR0,SRR1);
1208	RESTORE_xSRR(CSRR0,CSRR1);
1209	RESTORE_xSRR(DSRR0,DSRR1);
1210	RESTORE_MMU_REGS;
1211	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1212_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
1213#endif /* CONFIG_BOOKE */
1214
1215/*
1216 * Load the DBCR0 value for a task that is being ptraced,
1217 * having first saved away the global DBCR0.  Note that r0
1218 * has the dbcr0 value to set upon entry to this.
1219 */
1220load_dbcr0:
1221	mfmsr	r10		/* first disable debug exceptions */
1222	rlwinm	r10,r10,0,~MSR_DE
1223	mtmsr	r10
1224	isync
1225	mfspr	r10,SPRN_DBCR0
1226	lis	r11,global_dbcr0@ha
1227	addi	r11,r11,global_dbcr0@l
1228#ifdef CONFIG_SMP
1229	lwz	r9,TASK_CPU(r2)
1230	slwi	r9,r9,3
1231	add	r11,r11,r9
1232#endif
1233	stw	r10,0(r11)
1234	mtspr	SPRN_DBCR0,r0
1235	lwz	r10,4(r11)
1236	addi	r10,r10,1
1237	stw	r10,4(r11)
1238	li	r11,-1
1239	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1240	blr
1241
1242	.section .bss
1243	.align	4
1244	.global global_dbcr0
1245global_dbcr0:
1246	.space	8*NR_CPUS
1247	.previous
1248#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1249
1250do_work:			/* r10 contains MSR_KERNEL here */
1251	andi.	r0,r9,_TIF_NEED_RESCHED
1252	beq	do_user_signal
1253
1254do_resched:			/* r10 contains MSR_KERNEL here */
1255#ifdef CONFIG_TRACE_IRQFLAGS
1256	bl	trace_hardirqs_on
1257	mfmsr	r10
1258#endif
1259	ori	r10,r10,MSR_EE
1260	mtmsr	r10		/* hard-enable interrupts */
1261	bl	schedule
1262recheck:
1263	/* Note: And we don't tell it we are disabling them again
1264	 * neither. Those disable/enable cycles used to peek at
1265	 * TI_FLAGS aren't advertised.
1266	 */
1267	LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
1268	mtmsr	r10		/* disable interrupts */
1269	lwz	r9,TI_FLAGS(r2)
1270	andi.	r0,r9,_TIF_NEED_RESCHED
1271	bne-	do_resched
1272	andi.	r0,r9,_TIF_USER_WORK_MASK
1273	beq	restore_user
1274do_user_signal:			/* r10 contains MSR_KERNEL here */
1275	ori	r10,r10,MSR_EE
1276	mtmsr	r10		/* hard-enable interrupts */
1277	/* save r13-r31 in the exception frame, if not already done */
1278	lwz	r3,_TRAP(r1)
1279	andi.	r0,r3,1
1280	beq	2f
1281	SAVE_NVGPRS(r1)
1282	rlwinm	r3,r3,0,0,30
1283	stw	r3,_TRAP(r1)
12842:	addi	r3,r1,STACK_FRAME_OVERHEAD
1285	mr	r4,r9
1286	bl	do_notify_resume
1287	REST_NVGPRS(r1)
1288	b	recheck
1289
1290/*
1291 * We come here when we are at the end of handling an exception
1292 * that occurred at a place where taking an exception will lose
1293 * state information, such as the contents of SRR0 and SRR1.
1294 */
1295nonrecoverable:
1296	lis	r10,exc_exit_restart_end@ha
1297	addi	r10,r10,exc_exit_restart_end@l
1298	cmplw	r12,r10
1299	bge	3f
1300	lis	r11,exc_exit_restart@ha
1301	addi	r11,r11,exc_exit_restart@l
1302	cmplw	r12,r11
1303	blt	3f
1304	lis	r10,ee_restarts@ha
1305	lwz	r12,ee_restarts@l(r10)
1306	addi	r12,r12,1
1307	stw	r12,ee_restarts@l(r10)
1308	mr	r12,r11		/* restart at exc_exit_restart */
1309	blr
13103:	/* OK, we can't recover, kill this process */
1311	lwz	r3,_TRAP(r1)
1312	andi.	r0,r3,1
1313	beq	5f
1314	SAVE_NVGPRS(r1)
1315	rlwinm	r3,r3,0,0,30
1316	stw	r3,_TRAP(r1)
13175:	mfspr	r2,SPRN_SPRG_THREAD
1318	addi	r2,r2,-THREAD
1319	tovirt(r2,r2)			/* set back r2 to current */
13204:	addi	r3,r1,STACK_FRAME_OVERHEAD
1321	bl	unrecoverable_exception
1322	/* shouldn't return */
1323	b	4b
1324_ASM_NOKPROBE_SYMBOL(nonrecoverable)
1325
1326	.section .bss
1327	.align	2
1328ee_restarts:
1329	.space	4
1330	.previous
1331
1332/*
1333 * PROM code for specific machines follows.  Put it
1334 * here so it's easy to add arch-specific sections later.
1335 * -- Cort
1336 */
1337#ifdef CONFIG_PPC_RTAS
1338/*
1339 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1340 * called with the MMU off.
1341 */
1342_GLOBAL(enter_rtas)
1343	stwu	r1,-INT_FRAME_SIZE(r1)
1344	mflr	r0
1345	stw	r0,INT_FRAME_SIZE+4(r1)
1346	LOAD_REG_ADDR(r4, rtas)
1347	lis	r6,1f@ha	/* physical return address for rtas */
1348	addi	r6,r6,1f@l
1349	tophys(r6,r6)
1350	tophys_novmstack r7, r1
1351	lwz	r8,RTASENTRY(r4)
1352	lwz	r4,RTASBASE(r4)
1353	mfmsr	r9
1354	stw	r9,8(r1)
1355	LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
1356	mtmsr	r0	/* disable interrupts so SRR0/1 don't get trashed */
1357	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1358	mtlr	r6
1359	stw	r7, THREAD + RTAS_SP(r2)
1360	mtspr	SPRN_SRR0,r8
1361	mtspr	SPRN_SRR1,r9
1362	RFI
13631:	tophys_novmstack r9, r1
1364#ifdef CONFIG_VMAP_STACK
1365	li	r0, MSR_KERNEL & ~MSR_IR	/* can take DTLB miss */
1366	mtmsr	r0
1367	isync
1368#endif
1369	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1370	lwz	r9,8(r9)	/* original msr value */
1371	addi	r1,r1,INT_FRAME_SIZE
1372	li	r0,0
1373	tophys_novmstack r7, r2
1374	stw	r0, THREAD + RTAS_SP(r7)
1375	mtspr	SPRN_SRR0,r8
1376	mtspr	SPRN_SRR1,r9
1377	RFI			/* return to caller */
1378_ASM_NOKPROBE_SYMBOL(enter_rtas)
1379#endif /* CONFIG_PPC_RTAS */
1380