• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/err.h>
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
34#include <asm/ftrace.h>
35#include <asm/ptrace.h>
36#include <asm/barrier.h>
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x)	li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48	.globl	mcheck_transfer_to_handler
49mcheck_transfer_to_handler:
50	mfspr	r0,SPRN_DSRR0
51	stw	r0,_DSRR0(r11)
52	mfspr	r0,SPRN_DSRR1
53	stw	r0,_DSRR1(r11)
54	/* fall through */
55
56	.globl	debug_transfer_to_handler
57debug_transfer_to_handler:
58	mfspr	r0,SPRN_CSRR0
59	stw	r0,_CSRR0(r11)
60	mfspr	r0,SPRN_CSRR1
61	stw	r0,_CSRR1(r11)
62	/* fall through */
63
64	.globl	crit_transfer_to_handler
65crit_transfer_to_handler:
66#ifdef CONFIG_PPC_BOOK3E_MMU
67	mfspr	r0,SPRN_MAS0
68	stw	r0,MAS0(r11)
69	mfspr	r0,SPRN_MAS1
70	stw	r0,MAS1(r11)
71	mfspr	r0,SPRN_MAS2
72	stw	r0,MAS2(r11)
73	mfspr	r0,SPRN_MAS3
74	stw	r0,MAS3(r11)
75	mfspr	r0,SPRN_MAS6
76	stw	r0,MAS6(r11)
77#ifdef CONFIG_PHYS_64BIT
78	mfspr	r0,SPRN_MAS7
79	stw	r0,MAS7(r11)
80#endif /* CONFIG_PHYS_64BIT */
81#endif /* CONFIG_PPC_BOOK3E_MMU */
82#ifdef CONFIG_44x
83	mfspr	r0,SPRN_MMUCR
84	stw	r0,MMUCR(r11)
85#endif
86	mfspr	r0,SPRN_SRR0
87	stw	r0,_SRR0(r11)
88	mfspr	r0,SPRN_SRR1
89	stw	r0,_SRR1(r11)
90
91	/* set the stack limit to the current stack
92	 * and set the limit to protect the thread_info
93	 * struct
94	 */
95	mfspr	r8,SPRN_SPRG_THREAD
96	lwz	r0,KSP_LIMIT(r8)
97	stw	r0,SAVED_KSP_LIMIT(r11)
98	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
99	stw	r0,KSP_LIMIT(r8)
100	/* fall through */
101#endif
102
103#ifdef CONFIG_40x
104	.globl	crit_transfer_to_handler
105crit_transfer_to_handler:
106	lwz	r0,crit_r10@l(0)
107	stw	r0,GPR10(r11)
108	lwz	r0,crit_r11@l(0)
109	stw	r0,GPR11(r11)
110	mfspr	r0,SPRN_SRR0
111	stw	r0,crit_srr0@l(0)
112	mfspr	r0,SPRN_SRR1
113	stw	r0,crit_srr1@l(0)
114
115	/* set the stack limit to the current stack
116	 * and set the limit to protect the thread_info
117	 * struct
118	 */
119	mfspr	r8,SPRN_SPRG_THREAD
120	lwz	r0,KSP_LIMIT(r8)
121	stw	r0,saved_ksp_limit@l(0)
122	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
123	stw	r0,KSP_LIMIT(r8)
124	/* fall through */
125#endif
126
127/*
128 * This code finishes saving the registers to the exception frame
129 * and jumps to the appropriate handler for the exception, turning
130 * on address translation.
131 * Note that we rely on the caller having set cr0.eq iff the exception
132 * occurred in kernel mode (i.e. MSR:PR = 0).
133 */
134	.globl	transfer_to_handler_full
135transfer_to_handler_full:
136	SAVE_NVGPRS(r11)
137	/* fall through */
138
139	.globl	transfer_to_handler
140transfer_to_handler:
141	stw	r2,GPR2(r11)
142	stw	r12,_NIP(r11)
143	stw	r9,_MSR(r11)
144	andi.	r2,r9,MSR_PR
145	mfctr	r12
146	mfspr	r2,SPRN_XER
147	stw	r12,_CTR(r11)
148	stw	r2,_XER(r11)
149	mfspr	r12,SPRN_SPRG_THREAD
150	addi	r2,r12,-THREAD
151	tovirt(r2,r2)			/* set r2 to current */
152	beq	2f			/* if from user, fix up THREAD.regs */
153	addi	r11,r1,STACK_FRAME_OVERHEAD
154	stw	r11,PT_REGS(r12)
155#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
156	/* Check to see if the dbcr0 register is set up to debug.  Use the
157	   internal debug mode bit to do this. */
158	lwz	r12,THREAD_DBCR0(r12)
159	andis.	r12,r12,DBCR0_IDM@h
160	beq+	3f
161	/* From user and task is ptraced - load up global dbcr0 */
162	li	r12,-1			/* clear all pending debug events */
163	mtspr	SPRN_DBSR,r12
164	lis	r11,global_dbcr0@ha
165	tophys(r11,r11)
166	addi	r11,r11,global_dbcr0@l
167#ifdef CONFIG_SMP
168	CURRENT_THREAD_INFO(r9, r1)
169	lwz	r9,TI_CPU(r9)
170	slwi	r9,r9,3
171	add	r11,r11,r9
172#endif
173	lwz	r12,0(r11)
174	mtspr	SPRN_DBCR0,r12
175	lwz	r12,4(r11)
176	addi	r12,r12,-1
177	stw	r12,4(r11)
178#endif
179	b	3f
180
1812:	/* if from kernel, check interrupted DOZE/NAP mode and
182         * check for stack overflow
183         */
184	lwz	r9,KSP_LIMIT(r12)
185	cmplw	r1,r9			/* if r1 <= ksp_limit */
186	ble-	stack_ovf		/* then the kernel stack overflowed */
1875:
188#if defined(CONFIG_6xx) || defined(CONFIG_E500)
189	CURRENT_THREAD_INFO(r9, r1)
190	tophys(r9,r9)			/* check local flags */
191	lwz	r12,TI_LOCAL_FLAGS(r9)
192	mtcrf	0x01,r12
193	bt-	31-TLF_NAPPING,4f
194	bt-	31-TLF_SLEEPING,7f
195#endif /* CONFIG_6xx || CONFIG_E500 */
196	.globl transfer_to_handler_cont
197transfer_to_handler_cont:
1983:
199	mflr	r9
200	lwz	r11,0(r9)		/* virtual address of handler */
201	lwz	r9,4(r9)		/* where to go when done */
202#ifdef CONFIG_TRACE_IRQFLAGS
203	lis	r12,reenable_mmu@h
204	ori	r12,r12,reenable_mmu@l
205	mtspr	SPRN_SRR0,r12
206	mtspr	SPRN_SRR1,r10
207	SYNC
208	RFI
209reenable_mmu:				/* re-enable mmu so we can */
210	mfmsr	r10
211	lwz	r12,_MSR(r1)
212	xor	r10,r10,r12
213	andi.	r10,r10,MSR_EE		/* Did EE change? */
214	beq	1f
215
216	/*
217	 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
218	 * If from user mode there is only one stack frame on the stack, and
219	 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
220	 * stack frame to make trace_hardirqs_off happy.
221	 *
222	 * This is handy because we also need to save a bunch of GPRs,
223	 * r3 can be different from GPR3(r1) at this point, r9 and r11
224	 * contains the old MSR and handler address respectively,
225	 * r4 & r5 can contain page fault arguments that need to be passed
226	 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
227	 * they aren't useful past this point (aren't syscall arguments),
228	 * the rest is restored from the exception frame.
229	 */
230	stwu	r1,-32(r1)
231	stw	r9,8(r1)
232	stw	r11,12(r1)
233	stw	r3,16(r1)
234	stw	r4,20(r1)
235	stw	r5,24(r1)
236	bl	trace_hardirqs_off
237	lwz	r5,24(r1)
238	lwz	r4,20(r1)
239	lwz	r3,16(r1)
240	lwz	r11,12(r1)
241	lwz	r9,8(r1)
242	addi	r1,r1,32
243	lwz	r0,GPR0(r1)
244	lwz	r6,GPR6(r1)
245	lwz	r7,GPR7(r1)
246	lwz	r8,GPR8(r1)
2471:	mtctr	r11
248	mtlr	r9
249	bctr				/* jump to handler */
250#else /* CONFIG_TRACE_IRQFLAGS */
251	mtspr	SPRN_SRR0,r11
252	mtspr	SPRN_SRR1,r10
253	mtlr	r9
254	SYNC
255	RFI				/* jump to handler, enable MMU */
256#endif /* CONFIG_TRACE_IRQFLAGS */
257
258#if defined (CONFIG_6xx) || defined(CONFIG_E500)
2594:	rlwinm	r12,r12,0,~_TLF_NAPPING
260	stw	r12,TI_LOCAL_FLAGS(r9)
261	b	power_save_ppc32_restore
262
2637:	rlwinm	r12,r12,0,~_TLF_SLEEPING
264	stw	r12,TI_LOCAL_FLAGS(r9)
265	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
266	rlwinm	r9,r9,0,~MSR_EE
267	lwz	r12,_LINK(r11)		/* and return to address in LR */
268	b	fast_exception_return
269#endif
270
271/*
272 * On kernel stack overflow, load up an initial stack pointer
273 * and call StackOverflow(regs), which should not return.
274 */
275stack_ovf:
276	/* sometimes we use a statically-allocated stack, which is OK. */
277	lis	r12,_end@h
278	ori	r12,r12,_end@l
279	cmplw	r1,r12
280	ble	5b			/* r1 <= &_end is OK */
281	SAVE_NVGPRS(r11)
282	addi	r3,r1,STACK_FRAME_OVERHEAD
283	lis	r1,init_thread_union@ha
284	addi	r1,r1,init_thread_union@l
285	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
286	lis	r9,StackOverflow@ha
287	addi	r9,r9,StackOverflow@l
288	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
289	FIX_SRR1(r10,r12)
290	mtspr	SPRN_SRR0,r9
291	mtspr	SPRN_SRR1,r10
292	SYNC
293	RFI
294
295/*
296 * Handle a system call.
297 */
298	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
299	.stabs	"entry_32.S",N_SO,0,0,0f
3000:
301
302_GLOBAL(DoSyscall)
303	stw	r3,ORIG_GPR3(r1)
304	li	r12,0
305	stw	r12,RESULT(r1)
306	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
307	rlwinm	r11,r11,0,4,2
308	stw	r11,_CCR(r1)
309#ifdef CONFIG_TRACE_IRQFLAGS
310	/* Return from syscalls can (and generally will) hard enable
311	 * interrupts. You aren't supposed to call a syscall with
312	 * interrupts disabled in the first place. However, to ensure
313	 * that we get it right vs. lockdep if it happens, we force
314	 * that hard enable here with appropriate tracing if we see
315	 * that we have been called with interrupts off
316	 */
317	mfmsr	r11
318	andi.	r12,r11,MSR_EE
319	bne+	1f
320	/* We came in with interrupts disabled, we enable them now */
321	bl	trace_hardirqs_on
322	mfmsr	r11
323	lwz	r0,GPR0(r1)
324	lwz	r3,GPR3(r1)
325	lwz	r4,GPR4(r1)
326	ori	r11,r11,MSR_EE
327	lwz	r5,GPR5(r1)
328	lwz	r6,GPR6(r1)
329	lwz	r7,GPR7(r1)
330	lwz	r8,GPR8(r1)
331	mtmsr	r11
3321:
333#endif /* CONFIG_TRACE_IRQFLAGS */
334	CURRENT_THREAD_INFO(r10, r1)
335	lwz	r11,TI_FLAGS(r10)
336	andi.	r11,r11,_TIF_SYSCALL_DOTRACE
337	bne-	syscall_dotrace
338syscall_dotrace_cont:
339	cmplwi	0,r0,NR_syscalls
340	lis	r10,sys_call_table@h
341	ori	r10,r10,sys_call_table@l
342	slwi	r0,r0,2
343	bge-	66f
344
345	barrier_nospec_asm
346	/*
347	 * Prevent the load of the handler below (based on the user-passed
348	 * system call number) being speculatively executed until the test
349	 * against NR_syscalls and branch to .66f above has
350	 * committed.
351	 */
352
353	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
354	mtlr	r10
355	addi	r9,r1,STACK_FRAME_OVERHEAD
356	PPC440EP_ERR42
357	blrl			/* Call handler */
358	.globl	ret_from_syscall
359ret_from_syscall:
360	mr	r6,r3
361	CURRENT_THREAD_INFO(r12, r1)
362	/* disable interrupts so current_thread_info()->flags can't change */
363	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
364	/* Note: We don't bother telling lockdep about it */
365	SYNC
366	MTMSRD(r10)
367	lwz	r9,TI_FLAGS(r12)
368	li	r8,-MAX_ERRNO
369	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
370	bne-	syscall_exit_work
371	cmplw	0,r3,r8
372	blt+	syscall_exit_cont
373	lwz	r11,_CCR(r1)			/* Load CR */
374	neg	r3,r3
375	oris	r11,r11,0x1000	/* Set SO bit in CR */
376	stw	r11,_CCR(r1)
377syscall_exit_cont:
378	lwz	r8,_MSR(r1)
379#ifdef CONFIG_TRACE_IRQFLAGS
380	/* If we are going to return from the syscall with interrupts
381	 * off, we trace that here. It shouldn't happen though but we
382	 * want to catch the bugger if it does right ?
383	 */
384	andi.	r10,r8,MSR_EE
385	bne+	1f
386	stw	r3,GPR3(r1)
387	bl      trace_hardirqs_off
388	lwz	r3,GPR3(r1)
3891:
390#endif /* CONFIG_TRACE_IRQFLAGS */
391#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
392	/* If the process has its own DBCR0 value, load it up.  The internal
393	   debug mode bit tells us that dbcr0 should be loaded. */
394	lwz	r0,THREAD+THREAD_DBCR0(r2)
395	andis.	r10,r0,DBCR0_IDM@h
396	bnel-	load_dbcr0
397#endif
398#ifdef CONFIG_44x
399BEGIN_MMU_FTR_SECTION
400	lis	r4,icache_44x_need_flush@ha
401	lwz	r5,icache_44x_need_flush@l(r4)
402	cmplwi	cr0,r5,0
403	bne-	2f
4041:
405END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
406#endif /* CONFIG_44x */
407BEGIN_FTR_SECTION
408	lwarx	r7,0,r1
409END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
410	stwcx.	r0,0,r1			/* to clear the reservation */
411	lwz	r4,_LINK(r1)
412	lwz	r5,_CCR(r1)
413	mtlr	r4
414	mtcr	r5
415	lwz	r7,_NIP(r1)
416	FIX_SRR1(r8, r0)
417	lwz	r2,GPR2(r1)
418	lwz	r1,GPR1(r1)
419	mtspr	SPRN_SRR0,r7
420	mtspr	SPRN_SRR1,r8
421	SYNC
422	RFI
423#ifdef CONFIG_44x
4242:	li	r7,0
425	iccci	r0,r0
426	stw	r7,icache_44x_need_flush@l(r4)
427	b	1b
428#endif  /* CONFIG_44x */
429
43066:	li	r3,-ENOSYS
431	b	ret_from_syscall
432
433	.globl	ret_from_fork
434ret_from_fork:
435	REST_NVGPRS(r1)
436	bl	schedule_tail
437	li	r3,0
438	b	ret_from_syscall
439
440	.globl	ret_from_kernel_thread
441ret_from_kernel_thread:
442	REST_NVGPRS(r1)
443	bl	schedule_tail
444	mtlr	r14
445	mr	r3,r15
446	PPC440EP_ERR42
447	blrl
448	li	r3,0
449	b	ret_from_syscall
450
451/* Traced system call support */
452syscall_dotrace:
453	SAVE_NVGPRS(r1)
454	li	r0,0xc00
455	stw	r0,_TRAP(r1)
456	addi	r3,r1,STACK_FRAME_OVERHEAD
457	bl	do_syscall_trace_enter
458	/*
459	 * Restore argument registers possibly just changed.
460	 * We use the return value of do_syscall_trace_enter
461	 * for call number to look up in the table (r0).
462	 */
463	mr	r0,r3
464	lwz	r3,GPR3(r1)
465	lwz	r4,GPR4(r1)
466	lwz	r5,GPR5(r1)
467	lwz	r6,GPR6(r1)
468	lwz	r7,GPR7(r1)
469	lwz	r8,GPR8(r1)
470	REST_NVGPRS(r1)
471
472	cmplwi	r0,NR_syscalls
473	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
474	bge-	ret_from_syscall
475	b	syscall_dotrace_cont
476
477syscall_exit_work:
478	andi.	r0,r9,_TIF_RESTOREALL
479	beq+	0f
480	REST_NVGPRS(r1)
481	b	2f
4820:	cmplw	0,r3,r8
483	blt+	1f
484	andi.	r0,r9,_TIF_NOERROR
485	bne-	1f
486	lwz	r11,_CCR(r1)			/* Load CR */
487	neg	r3,r3
488	oris	r11,r11,0x1000	/* Set SO bit in CR */
489	stw	r11,_CCR(r1)
490
4911:	stw	r6,RESULT(r1)	/* Save result */
492	stw	r3,GPR3(r1)	/* Update return value */
4932:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
494	beq	4f
495
496	/* Clear per-syscall TIF flags if any are set.  */
497
498	li	r11,_TIF_PERSYSCALL_MASK
499	addi	r12,r12,TI_FLAGS
5003:	lwarx	r8,0,r12
501	andc	r8,r8,r11
502#ifdef CONFIG_IBM405_ERR77
503	dcbt	0,r12
504#endif
505	stwcx.	r8,0,r12
506	bne-	3b
507	subi	r12,r12,TI_FLAGS
508
5094:	/* Anything which requires enabling interrupts? */
510	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
511	beq	ret_from_except
512
513	/* Re-enable interrupts. There is no need to trace that with
514	 * lockdep as we are supposed to have IRQs on at this point
515	 */
516	ori	r10,r10,MSR_EE
517	SYNC
518	MTMSRD(r10)
519
520	/* Save NVGPRS if they're not saved already */
521	lwz	r4,_TRAP(r1)
522	andi.	r4,r4,1
523	beq	5f
524	SAVE_NVGPRS(r1)
525	li	r4,0xc00
526	stw	r4,_TRAP(r1)
5275:
528	addi	r3,r1,STACK_FRAME_OVERHEAD
529	bl	do_syscall_trace_leave
530	b	ret_from_except_full
531
532/*
533 * The fork/clone functions need to copy the full register set into
534 * the child process. Therefore we need to save all the nonvolatile
535 * registers (r13 - r31) before calling the C code.
536 */
537	.globl	ppc_fork
538ppc_fork:
539	SAVE_NVGPRS(r1)
540	lwz	r0,_TRAP(r1)
541	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
542	stw	r0,_TRAP(r1)		/* register set saved */
543	b	sys_fork
544
545	.globl	ppc_vfork
546ppc_vfork:
547	SAVE_NVGPRS(r1)
548	lwz	r0,_TRAP(r1)
549	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
550	stw	r0,_TRAP(r1)		/* register set saved */
551	b	sys_vfork
552
553	.globl	ppc_clone
554ppc_clone:
555	SAVE_NVGPRS(r1)
556	lwz	r0,_TRAP(r1)
557	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
558	stw	r0,_TRAP(r1)		/* register set saved */
559	b	sys_clone
560
561	.globl	ppc_swapcontext
562ppc_swapcontext:
563	SAVE_NVGPRS(r1)
564	lwz	r0,_TRAP(r1)
565	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
566	stw	r0,_TRAP(r1)		/* register set saved */
567	b	sys_swapcontext
568
569/*
570 * Top-level page fault handling.
571 * This is in assembler because if do_page_fault tells us that
572 * it is a bad kernel page fault, we want to save the non-volatile
573 * registers before calling bad_page_fault.
574 */
575	.globl	handle_page_fault
576handle_page_fault:
577	stw	r4,_DAR(r1)
578	addi	r3,r1,STACK_FRAME_OVERHEAD
579	bl	do_page_fault
580	cmpwi	r3,0
581	beq+	ret_from_except
582	SAVE_NVGPRS(r1)
583	lwz	r0,_TRAP(r1)
584	clrrwi	r0,r0,1
585	stw	r0,_TRAP(r1)
586	mr	r5,r3
587	addi	r3,r1,STACK_FRAME_OVERHEAD
588	lwz	r4,_DAR(r1)
589	bl	bad_page_fault
590	b	ret_from_except_full
591
592/*
593 * This routine switches between two different tasks.  The process
594 * state of one is saved on its kernel stack.  Then the state
595 * of the other is restored from its kernel stack.  The memory
596 * management hardware is updated to the second process's state.
597 * Finally, we can return to the second process.
598 * On entry, r3 points to the THREAD for the current task, r4
599 * points to the THREAD for the new task.
600 *
601 * This routine is always called with interrupts disabled.
602 *
603 * Note: there are two ways to get to the "going out" portion
604 * of this code; either by coming in via the entry (_switch)
605 * or via "fork" which must set up an environment equivalent
606 * to the "_switch" path.  If you change this , you'll have to
607 * change the fork code also.
608 *
609 * The code which creates the new task context is in 'copy_thread'
610 * in arch/ppc/kernel/process.c
611 */
612_GLOBAL(_switch)
613	stwu	r1,-INT_FRAME_SIZE(r1)
614	mflr	r0
615	stw	r0,INT_FRAME_SIZE+4(r1)
616	/* r3-r12 are caller saved -- Cort */
617	SAVE_NVGPRS(r1)
618	stw	r0,_NIP(r1)	/* Return to switch caller */
619	mfmsr	r11
620	li	r0,MSR_FP	/* Disable floating-point */
621#ifdef CONFIG_ALTIVEC
622BEGIN_FTR_SECTION
623	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
624	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
625	stw	r12,THREAD+THREAD_VRSAVE(r2)
626END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
627#endif /* CONFIG_ALTIVEC */
628#ifdef CONFIG_SPE
629BEGIN_FTR_SECTION
630	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
631	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
632	stw	r12,THREAD+THREAD_SPEFSCR(r2)
633END_FTR_SECTION_IFSET(CPU_FTR_SPE)
634#endif /* CONFIG_SPE */
635	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
636	beq+	1f
637	andc	r11,r11,r0
638	MTMSRD(r11)
639	isync
6401:	stw	r11,_MSR(r1)
641	mfcr	r10
642	stw	r10,_CCR(r1)
643	stw	r1,KSP(r3)	/* Set old stack pointer */
644
645#ifdef CONFIG_SMP
646	/* We need a sync somewhere here to make sure that if the
647	 * previous task gets rescheduled on another CPU, it sees all
648	 * stores it has performed on this one.
649	 */
650	sync
651#endif /* CONFIG_SMP */
652
653	tophys(r0,r4)
654	CLR_TOP32(r0)
655	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
656	lwz	r1,KSP(r4)	/* Load new stack pointer */
657
658	/* save the old current 'last' for return value */
659	mr	r3,r2
660	addi	r2,r4,-THREAD	/* Update current */
661
662#ifdef CONFIG_ALTIVEC
663BEGIN_FTR_SECTION
664	lwz	r0,THREAD+THREAD_VRSAVE(r2)
665	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
666END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
667#endif /* CONFIG_ALTIVEC */
668#ifdef CONFIG_SPE
669BEGIN_FTR_SECTION
670	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
671	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
672END_FTR_SECTION_IFSET(CPU_FTR_SPE)
673#endif /* CONFIG_SPE */
674
675	lwz	r0,_CCR(r1)
676	mtcrf	0xFF,r0
677	/* r3-r12 are destroyed -- Cort */
678	REST_NVGPRS(r1)
679
680	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
681	mtlr	r4
682	addi	r1,r1,INT_FRAME_SIZE
683	blr
684
685	.globl	fast_exception_return
686fast_exception_return:
687#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
688	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
689	beq	1f			/* if not, we've got problems */
690#endif
691
6922:	REST_4GPRS(3, r11)
693	lwz	r10,_CCR(r11)
694	REST_GPR(1, r11)
695	mtcr	r10
696	lwz	r10,_LINK(r11)
697	mtlr	r10
698	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
699	li	r10, 0
700	stw	r10, 8(r11)
701	REST_GPR(10, r11)
702	mtspr	SPRN_SRR1,r9
703	mtspr	SPRN_SRR0,r12
704	REST_GPR(9, r11)
705	REST_GPR(12, r11)
706	lwz	r11,GPR11(r11)
707	SYNC
708	RFI
709
710#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
711/* check if the exception happened in a restartable section */
7121:	lis	r3,exc_exit_restart_end@ha
713	addi	r3,r3,exc_exit_restart_end@l
714	cmplw	r12,r3
715	bge	3f
716	lis	r4,exc_exit_restart@ha
717	addi	r4,r4,exc_exit_restart@l
718	cmplw	r12,r4
719	blt	3f
720	lis	r3,fee_restarts@ha
721	tophys(r3,r3)
722	lwz	r5,fee_restarts@l(r3)
723	addi	r5,r5,1
724	stw	r5,fee_restarts@l(r3)
725	mr	r12,r4		/* restart at exc_exit_restart */
726	b	2b
727
728	.section .bss
729	.align	2
730fee_restarts:
731	.space	4
732	.previous
733
734/* aargh, a nonrecoverable interrupt, panic */
735/* aargh, we don't know which trap this is */
736/* but the 601 doesn't implement the RI bit, so assume it's OK */
7373:
738BEGIN_FTR_SECTION
739	b	2b
740END_FTR_SECTION_IFSET(CPU_FTR_601)
741	li	r10,-1
742	stw	r10,_TRAP(r11)
743	addi	r3,r1,STACK_FRAME_OVERHEAD
744	lis	r10,MSR_KERNEL@h
745	ori	r10,r10,MSR_KERNEL@l
746	bl	transfer_to_handler_full
747	.long	nonrecoverable_exception
748	.long	ret_from_except
749#endif
750
751	.globl	ret_from_except_full
752ret_from_except_full:
753	REST_NVGPRS(r1)
754	/* fall through */
755
756	.globl	ret_from_except
757ret_from_except:
758	/* Hard-disable interrupts so that current_thread_info()->flags
759	 * can't change between when we test it and when we return
760	 * from the interrupt. */
761	/* Note: We don't bother telling lockdep about it */
762	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
763	SYNC			/* Some chip revs have problems here... */
764	MTMSRD(r10)		/* disable interrupts */
765
766	lwz	r3,_MSR(r1)	/* Returning to user mode? */
767	andi.	r0,r3,MSR_PR
768	beq	resume_kernel
769
770user_exc_return:		/* r10 contains MSR_KERNEL here */
771	/* Check current_thread_info()->flags */
772	CURRENT_THREAD_INFO(r9, r1)
773	lwz	r9,TI_FLAGS(r9)
774	andi.	r0,r9,_TIF_USER_WORK_MASK
775	bne	do_work
776
777restore_user:
778#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
779	/* Check whether this process has its own DBCR0 value.  The internal
780	   debug mode bit tells us that dbcr0 should be loaded. */
781	lwz	r0,THREAD+THREAD_DBCR0(r2)
782	andis.	r10,r0,DBCR0_IDM@h
783	bnel-	load_dbcr0
784#endif
785
786	b	restore
787
788/* N.B. the only way to get here is from the beq following ret_from_except. */
789resume_kernel:
790	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
791	CURRENT_THREAD_INFO(r9, r1)
792	lwz	r8,TI_FLAGS(r9)
793	andis.	r0,r8,_TIF_EMULATE_STACK_STORE@h
794	beq+	1f
795
796	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
797
798	lwz	r3,GPR1(r1)
799	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
800	mr	r4,r1			/* src:  current exception frame */
801	mr	r1,r3			/* Reroute the trampoline frame to r1 */
802
803	/* Copy from the original to the trampoline. */
804	li	r5,INT_FRAME_SIZE/4	/* size: INT_FRAME_SIZE */
805	li	r6,0			/* start offset: 0 */
806	mtctr	r5
8072:	lwzx	r0,r6,r4
808	stwx	r0,r6,r3
809	addi	r6,r6,4
810	bdnz	2b
811
812	/* Do real store operation to complete stwu */
813	lwz	r5,GPR1(r1)
814	stw	r8,0(r5)
815
816	/* Clear _TIF_EMULATE_STACK_STORE flag */
817	lis	r11,_TIF_EMULATE_STACK_STORE@h
818	addi	r5,r9,TI_FLAGS
8190:	lwarx	r8,0,r5
820	andc	r8,r8,r11
821#ifdef CONFIG_IBM405_ERR77
822	dcbt	0,r5
823#endif
824	stwcx.	r8,0,r5
825	bne-	0b
8261:
827
828#ifdef CONFIG_PREEMPT
829	/* check current_thread_info->preempt_count */
830	lwz	r0,TI_PREEMPT(r9)
831	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
832	bne	restore
833	andi.	r8,r8,_TIF_NEED_RESCHED
834	beq+	restore
835	lwz	r3,_MSR(r1)
836	andi.	r0,r3,MSR_EE	/* interrupts off? */
837	beq	restore		/* don't schedule if so */
838#ifdef CONFIG_TRACE_IRQFLAGS
839	/* Lockdep thinks irqs are enabled, we need to call
840	 * preempt_schedule_irq with IRQs off, so we inform lockdep
841	 * now that we -did- turn them off already
842	 */
843	bl	trace_hardirqs_off
844#endif
8451:	bl	preempt_schedule_irq
846	CURRENT_THREAD_INFO(r9, r1)
847	lwz	r3,TI_FLAGS(r9)
848	andi.	r0,r3,_TIF_NEED_RESCHED
849	bne-	1b
850#ifdef CONFIG_TRACE_IRQFLAGS
851	/* And now, to properly rebalance the above, we tell lockdep they
852	 * are being turned back on, which will happen when we return
853	 */
854	bl	trace_hardirqs_on
855#endif
856#endif /* CONFIG_PREEMPT */
857
858	/* interrupts are hard-disabled at this point */
859restore:
860#ifdef CONFIG_44x
861BEGIN_MMU_FTR_SECTION
862	b	1f
863END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
864	lis	r4,icache_44x_need_flush@ha
865	lwz	r5,icache_44x_need_flush@l(r4)
866	cmplwi	cr0,r5,0
867	beq+	1f
868	li	r6,0
869	iccci	r0,r0
870	stw	r6,icache_44x_need_flush@l(r4)
8711:
872#endif  /* CONFIG_44x */
873
874	lwz	r9,_MSR(r1)
875#ifdef CONFIG_TRACE_IRQFLAGS
876	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
877	 * off in this assembly code while peeking at TI_FLAGS() and such. However
878	 * we need to inform it if the exception turned interrupts off, and we
879	 * are about to trun them back on.
880	 *
881	 * The problem here sadly is that we don't know whether the exceptions was
882	 * one that turned interrupts off or not. So we always tell lockdep about
883	 * turning them on here when we go back to wherever we came from with EE
884	 * on, even if that may meen some redudant calls being tracked. Maybe later
885	 * we could encode what the exception did somewhere or test the exception
886	 * type in the pt_regs but that sounds overkill
887	 */
888	andi.	r10,r9,MSR_EE
889	beq	1f
890	/*
891	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
892	 * which is the stack frame here, we need to force a stack frame
893	 * in case we came from user space.
894	 */
895	stwu	r1,-32(r1)
896	mflr	r0
897	stw	r0,4(r1)
898	stwu	r1,-32(r1)
899	bl	trace_hardirqs_on
900	lwz	r1,0(r1)
901	lwz	r1,0(r1)
902	lwz	r9,_MSR(r1)
9031:
904#endif /* CONFIG_TRACE_IRQFLAGS */
905
906	lwz	r0,GPR0(r1)
907	lwz	r2,GPR2(r1)
908	REST_4GPRS(3, r1)
909	REST_2GPRS(7, r1)
910
911	lwz	r10,_XER(r1)
912	lwz	r11,_CTR(r1)
913	mtspr	SPRN_XER,r10
914	mtctr	r11
915
916	PPC405_ERR77(0,r1)
917BEGIN_FTR_SECTION
918	lwarx	r11,0,r1
919END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
920	stwcx.	r0,0,r1			/* to clear the reservation */
921
922#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
923	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
924	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
925
926	lwz	r10,_CCR(r1)
927	lwz	r11,_LINK(r1)
928	mtcrf	0xFF,r10
929	mtlr	r11
930
931	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
932	li	r10, 0
933	stw	r10, 8(r1)
934	/*
935	 * Once we put values in SRR0 and SRR1, we are in a state
936	 * where exceptions are not recoverable, since taking an
937	 * exception will trash SRR0 and SRR1.  Therefore we clear the
938	 * MSR:RI bit to indicate this.  If we do take an exception,
939	 * we can't return to the point of the exception but we
940	 * can restart the exception exit path at the label
941	 * exc_exit_restart below.  -- paulus
942	 */
943	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
944	SYNC
945	MTMSRD(r10)		/* clear the RI bit */
946	.globl exc_exit_restart
947exc_exit_restart:
948	lwz	r12,_NIP(r1)
949	FIX_SRR1(r9,r10)
950	mtspr	SPRN_SRR0,r12
951	mtspr	SPRN_SRR1,r9
952	REST_4GPRS(9, r1)
953	lwz	r1,GPR1(r1)
954	.globl exc_exit_restart_end
955exc_exit_restart_end:
956	SYNC
957	RFI
958
959#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
960	/*
961	 * This is a bit different on 4xx/Book-E because it doesn't have
962	 * the RI bit in the MSR.
963	 * The TLB miss handler checks if we have interrupted
964	 * the exception exit path and restarts it if so
965	 * (well maybe one day it will... :).
966	 */
967	lwz	r11,_LINK(r1)
968	mtlr	r11
969	lwz	r10,_CCR(r1)
970	mtcrf	0xff,r10
971	/* Clear the exception_marker on the stack to avoid confusing stacktrace */
972	li	r10, 0
973	stw	r10, 8(r1)
974	REST_2GPRS(9, r1)
975	.globl exc_exit_restart
976exc_exit_restart:
977	lwz	r11,_NIP(r1)
978	lwz	r12,_MSR(r1)
979exc_exit_start:
980	mtspr	SPRN_SRR0,r11
981	mtspr	SPRN_SRR1,r12
982	REST_2GPRS(11, r1)
983	lwz	r1,GPR1(r1)
984	.globl exc_exit_restart_end
985exc_exit_restart_end:
986	PPC405_ERR77_SYNC
987	rfi
988	b	.			/* prevent prefetch past rfi */
989
990/*
991 * Returning from a critical interrupt in user mode doesn't need
992 * to be any different from a normal exception.  For a critical
993 * interrupt in the kernel, we just return (without checking for
994 * preemption) since the interrupt may have happened at some crucial
995 * place (e.g. inside the TLB miss handler), and because we will be
996 * running with r1 pointing into critical_stack, not the current
997 * process's kernel stack (and therefore current_thread_info() will
998 * give the wrong answer).
999 * We have to restore various SPRs that may have been in use at the
1000 * time of the critical interrupt.
1001 *
1002 */
1003#ifdef CONFIG_40x
1004#define PPC_40x_TURN_OFF_MSR_DR						    \
1005	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
1006	 * assume the instructions here are mapped by a pinned TLB entry */ \
1007	li	r10,MSR_IR;						    \
1008	mtmsr	r10;							    \
1009	isync;								    \
1010	tophys(r1, r1);
1011#else
1012#define PPC_40x_TURN_OFF_MSR_DR
1013#endif
1014
1015#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
1016	REST_NVGPRS(r1);						\
1017	lwz	r3,_MSR(r1);						\
1018	andi.	r3,r3,MSR_PR;						\
1019	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
1020	bne	user_exc_return;					\
1021	lwz	r0,GPR0(r1);						\
1022	lwz	r2,GPR2(r1);						\
1023	REST_4GPRS(3, r1);						\
1024	REST_2GPRS(7, r1);						\
1025	lwz	r10,_XER(r1);						\
1026	lwz	r11,_CTR(r1);						\
1027	mtspr	SPRN_XER,r10;						\
1028	mtctr	r11;							\
1029	PPC405_ERR77(0,r1);						\
1030	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1031	lwz	r11,_LINK(r1);						\
1032	mtlr	r11;							\
1033	lwz	r10,_CCR(r1);						\
1034	mtcrf	0xff,r10;						\
1035	PPC_40x_TURN_OFF_MSR_DR;					\
1036	lwz	r9,_DEAR(r1);						\
1037	lwz	r10,_ESR(r1);						\
1038	mtspr	SPRN_DEAR,r9;						\
1039	mtspr	SPRN_ESR,r10;						\
1040	lwz	r11,_NIP(r1);						\
1041	lwz	r12,_MSR(r1);						\
1042	mtspr	exc_lvl_srr0,r11;					\
1043	mtspr	exc_lvl_srr1,r12;					\
1044	lwz	r9,GPR9(r1);						\
1045	lwz	r12,GPR12(r1);						\
1046	lwz	r10,GPR10(r1);						\
1047	lwz	r11,GPR11(r1);						\
1048	lwz	r1,GPR1(r1);						\
1049	PPC405_ERR77_SYNC;						\
1050	exc_lvl_rfi;							\
1051	b	.;		/* prevent prefetch past exc_lvl_rfi */
1052
1053#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1054	lwz	r9,_##exc_lvl_srr0(r1);					\
1055	lwz	r10,_##exc_lvl_srr1(r1);				\
1056	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1057	mtspr	SPRN_##exc_lvl_srr1,r10;
1058
1059#if defined(CONFIG_PPC_BOOK3E_MMU)
1060#ifdef CONFIG_PHYS_64BIT
1061#define	RESTORE_MAS7							\
1062	lwz	r11,MAS7(r1);						\
1063	mtspr	SPRN_MAS7,r11;
1064#else
1065#define	RESTORE_MAS7
1066#endif /* CONFIG_PHYS_64BIT */
1067#define RESTORE_MMU_REGS						\
1068	lwz	r9,MAS0(r1);						\
1069	lwz	r10,MAS1(r1);						\
1070	lwz	r11,MAS2(r1);						\
1071	mtspr	SPRN_MAS0,r9;						\
1072	lwz	r9,MAS3(r1);						\
1073	mtspr	SPRN_MAS1,r10;						\
1074	lwz	r10,MAS6(r1);						\
1075	mtspr	SPRN_MAS2,r11;						\
1076	mtspr	SPRN_MAS3,r9;						\
1077	mtspr	SPRN_MAS6,r10;						\
1078	RESTORE_MAS7;
1079#elif defined(CONFIG_44x)
1080#define RESTORE_MMU_REGS						\
1081	lwz	r9,MMUCR(r1);						\
1082	mtspr	SPRN_MMUCR,r9;
1083#else
1084#define RESTORE_MMU_REGS
1085#endif
1086
1087#ifdef CONFIG_40x
1088	.globl	ret_from_crit_exc
1089ret_from_crit_exc:
1090	mfspr	r9,SPRN_SPRG_THREAD
1091	lis	r10,saved_ksp_limit@ha;
1092	lwz	r10,saved_ksp_limit@l(r10);
1093	tovirt(r9,r9);
1094	stw	r10,KSP_LIMIT(r9)
1095	lis	r9,crit_srr0@ha;
1096	lwz	r9,crit_srr0@l(r9);
1097	lis	r10,crit_srr1@ha;
1098	lwz	r10,crit_srr1@l(r10);
1099	mtspr	SPRN_SRR0,r9;
1100	mtspr	SPRN_SRR1,r10;
1101	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1102#endif /* CONFIG_40x */
1103
1104#ifdef CONFIG_BOOKE
1105	.globl	ret_from_crit_exc
1106ret_from_crit_exc:
1107	mfspr	r9,SPRN_SPRG_THREAD
1108	lwz	r10,SAVED_KSP_LIMIT(r1)
1109	stw	r10,KSP_LIMIT(r9)
1110	RESTORE_xSRR(SRR0,SRR1);
1111	RESTORE_MMU_REGS;
1112	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1113
1114	.globl	ret_from_debug_exc
1115ret_from_debug_exc:
1116	mfspr	r9,SPRN_SPRG_THREAD
1117	lwz	r10,SAVED_KSP_LIMIT(r1)
1118	stw	r10,KSP_LIMIT(r9)
1119	lwz	r9,THREAD_INFO-THREAD(r9)
1120	CURRENT_THREAD_INFO(r10, r1)
1121	lwz	r10,TI_PREEMPT(r10)
1122	stw	r10,TI_PREEMPT(r9)
1123	RESTORE_xSRR(SRR0,SRR1);
1124	RESTORE_xSRR(CSRR0,CSRR1);
1125	RESTORE_MMU_REGS;
1126	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1127
1128	.globl	ret_from_mcheck_exc
1129ret_from_mcheck_exc:
1130	mfspr	r9,SPRN_SPRG_THREAD
1131	lwz	r10,SAVED_KSP_LIMIT(r1)
1132	stw	r10,KSP_LIMIT(r9)
1133	RESTORE_xSRR(SRR0,SRR1);
1134	RESTORE_xSRR(CSRR0,CSRR1);
1135	RESTORE_xSRR(DSRR0,DSRR1);
1136	RESTORE_MMU_REGS;
1137	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1138#endif /* CONFIG_BOOKE */
1139
1140/*
1141 * Load the DBCR0 value for a task that is being ptraced,
1142 * having first saved away the global DBCR0.  Note that r0
1143 * has the dbcr0 value to set upon entry to this.
1144 */
1145load_dbcr0:
1146	mfmsr	r10		/* first disable debug exceptions */
1147	rlwinm	r10,r10,0,~MSR_DE
1148	mtmsr	r10
1149	isync
1150	mfspr	r10,SPRN_DBCR0
1151	lis	r11,global_dbcr0@ha
1152	addi	r11,r11,global_dbcr0@l
1153#ifdef CONFIG_SMP
1154	CURRENT_THREAD_INFO(r9, r1)
1155	lwz	r9,TI_CPU(r9)
1156	slwi	r9,r9,3
1157	add	r11,r11,r9
1158#endif
1159	stw	r10,0(r11)
1160	mtspr	SPRN_DBCR0,r0
1161	lwz	r10,4(r11)
1162	addi	r10,r10,1
1163	stw	r10,4(r11)
1164	li	r11,-1
1165	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1166	blr
1167
1168	.section .bss
1169	.align	4
1170global_dbcr0:
1171	.space	8*NR_CPUS
1172	.previous
1173#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1174
1175do_work:			/* r10 contains MSR_KERNEL here */
1176	andi.	r0,r9,_TIF_NEED_RESCHED
1177	beq	do_user_signal
1178
1179do_resched:			/* r10 contains MSR_KERNEL here */
1180	/* Note: We don't need to inform lockdep that we are enabling
1181	 * interrupts here. As far as it knows, they are already enabled
1182	 */
1183	ori	r10,r10,MSR_EE
1184	SYNC
1185	MTMSRD(r10)		/* hard-enable interrupts */
1186	bl	schedule
1187recheck:
1188	/* Note: And we don't tell it we are disabling them again
1189	 * neither. Those disable/enable cycles used to peek at
1190	 * TI_FLAGS aren't advertised.
1191	 */
1192	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1193	SYNC
1194	MTMSRD(r10)		/* disable interrupts */
1195	CURRENT_THREAD_INFO(r9, r1)
1196	lwz	r9,TI_FLAGS(r9)
1197	andi.	r0,r9,_TIF_NEED_RESCHED
1198	bne-	do_resched
1199	andi.	r0,r9,_TIF_USER_WORK_MASK
1200	beq	restore_user
1201do_user_signal:			/* r10 contains MSR_KERNEL here */
1202	ori	r10,r10,MSR_EE
1203	SYNC
1204	MTMSRD(r10)		/* hard-enable interrupts */
1205	/* save r13-r31 in the exception frame, if not already done */
1206	lwz	r3,_TRAP(r1)
1207	andi.	r0,r3,1
1208	beq	2f
1209	SAVE_NVGPRS(r1)
1210	rlwinm	r3,r3,0,0,30
1211	stw	r3,_TRAP(r1)
12122:	addi	r3,r1,STACK_FRAME_OVERHEAD
1213	mr	r4,r9
1214	bl	do_notify_resume
1215	REST_NVGPRS(r1)
1216	b	recheck
1217
1218/*
1219 * We come here when we are at the end of handling an exception
1220 * that occurred at a place where taking an exception will lose
1221 * state information, such as the contents of SRR0 and SRR1.
1222 */
1223nonrecoverable:
1224	lis	r10,exc_exit_restart_end@ha
1225	addi	r10,r10,exc_exit_restart_end@l
1226	cmplw	r12,r10
1227	bge	3f
1228	lis	r11,exc_exit_restart@ha
1229	addi	r11,r11,exc_exit_restart@l
1230	cmplw	r12,r11
1231	blt	3f
1232	lis	r10,ee_restarts@ha
1233	lwz	r12,ee_restarts@l(r10)
1234	addi	r12,r12,1
1235	stw	r12,ee_restarts@l(r10)
1236	mr	r12,r11		/* restart at exc_exit_restart */
1237	blr
12383:	/* OK, we can't recover, kill this process */
1239	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1240BEGIN_FTR_SECTION
1241	blr
1242END_FTR_SECTION_IFSET(CPU_FTR_601)
1243	lwz	r3,_TRAP(r1)
1244	andi.	r0,r3,1
1245	beq	4f
1246	SAVE_NVGPRS(r1)
1247	rlwinm	r3,r3,0,0,30
1248	stw	r3,_TRAP(r1)
12494:	addi	r3,r1,STACK_FRAME_OVERHEAD
1250	bl	nonrecoverable_exception
1251	/* shouldn't return */
1252	b	4b
1253
1254	.section .bss
1255	.align	2
1256ee_restarts:
1257	.space	4
1258	.previous
1259
1260/*
1261 * PROM code for specific machines follows.  Put it
1262 * here so it's easy to add arch-specific sections later.
1263 * -- Cort
1264 */
1265#ifdef CONFIG_PPC_RTAS
1266/*
1267 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1268 * called with the MMU off.
1269 */
1270_GLOBAL(enter_rtas)
1271	stwu	r1,-INT_FRAME_SIZE(r1)
1272	mflr	r0
1273	stw	r0,INT_FRAME_SIZE+4(r1)
1274	LOAD_REG_ADDR(r4, rtas)
1275	lis	r6,1f@ha	/* physical return address for rtas */
1276	addi	r6,r6,1f@l
1277	tophys(r6,r6)
1278	tophys(r7,r1)
1279	lwz	r8,RTASENTRY(r4)
1280	lwz	r4,RTASBASE(r4)
1281	mfmsr	r9
1282	stw	r9,8(r1)
1283	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1284	SYNC			/* disable interrupts so SRR0/1 */
1285	MTMSRD(r0)		/* don't get trashed */
1286	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1287	mtlr	r6
1288	mtspr	SPRN_SPRG_RTAS,r7
1289	mtspr	SPRN_SRR0,r8
1290	mtspr	SPRN_SRR1,r9
1291	RFI
12921:	tophys(r9,r1)
1293	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1294	lwz	r9,8(r9)	/* original msr value */
1295	FIX_SRR1(r9,r0)
1296	addi	r1,r1,INT_FRAME_SIZE
1297	li	r0,0
1298	mtspr	SPRN_SPRG_RTAS,r0
1299	mtspr	SPRN_SRR0,r8
1300	mtspr	SPRN_SRR1,r9
1301	RFI			/* return to caller */
1302
1303	.globl	machine_check_in_rtas
1304machine_check_in_rtas:
1305	twi	31,0,0
1306	/* XXX load up BATs and panic */
1307
1308#endif /* CONFIG_PPC_RTAS */
1309
1310#ifdef CONFIG_FUNCTION_TRACER
1311#ifdef CONFIG_DYNAMIC_FTRACE
1312_GLOBAL(mcount)
1313_GLOBAL(_mcount)
1314	/*
1315	 * It is required that _mcount on PPC32 must preserve the
1316	 * link register. But we have r0 to play with. We use r0
1317	 * to push the return address back to the caller of mcount
1318	 * into the ctr register, restore the link register and
1319	 * then jump back using the ctr register.
1320	 */
1321	mflr	r0
1322	mtctr	r0
1323	lwz	r0, 4(r1)
1324	mtlr	r0
1325	bctr
1326
1327_GLOBAL(ftrace_caller)
1328	MCOUNT_SAVE_FRAME
1329	/* r3 ends up with link register */
1330	subi	r3, r3, MCOUNT_INSN_SIZE
1331.globl ftrace_call
1332ftrace_call:
1333	bl	ftrace_stub
1334	nop
1335#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1336.globl ftrace_graph_call
1337ftrace_graph_call:
1338	b	ftrace_graph_stub
1339_GLOBAL(ftrace_graph_stub)
1340#endif
1341	MCOUNT_RESTORE_FRAME
1342	/* old link register ends up in ctr reg */
1343	bctr
1344#else
1345_GLOBAL(mcount)
1346_GLOBAL(_mcount)
1347
1348	MCOUNT_SAVE_FRAME
1349
1350	subi	r3, r3, MCOUNT_INSN_SIZE
1351	LOAD_REG_ADDR(r5, ftrace_trace_function)
1352	lwz	r5,0(r5)
1353
1354	mtctr	r5
1355	bctrl
1356	nop
1357
1358#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1359	b	ftrace_graph_caller
1360#endif
1361	MCOUNT_RESTORE_FRAME
1362	bctr
1363#endif
1364
1365_GLOBAL(ftrace_stub)
1366	blr
1367
1368#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1369_GLOBAL(ftrace_graph_caller)
1370	/* load r4 with local address */
1371	lwz	r4, 44(r1)
1372	subi	r4, r4, MCOUNT_INSN_SIZE
1373
1374	/* Grab the LR out of the caller stack frame */
1375	lwz	r3,52(r1)
1376
1377	bl	prepare_ftrace_return
1378	nop
1379
1380        /*
1381         * prepare_ftrace_return gives us the address we divert to.
1382         * Change the LR in the callers stack frame to this.
1383         */
1384	stw	r3,52(r1)
1385
1386	MCOUNT_RESTORE_FRAME
1387	/* old link register ends up in ctr reg */
1388	bctr
1389
1390_GLOBAL(return_to_handler)
1391	/* need to save return values */
1392	stwu	r1, -32(r1)
1393	stw	r3, 20(r1)
1394	stw	r4, 16(r1)
1395	stw	r31, 12(r1)
1396	mr	r31, r1
1397
1398	bl	ftrace_return_to_handler
1399	nop
1400
1401	/* return value has real return address */
1402	mtlr	r3
1403
1404	lwz	r3, 20(r1)
1405	lwz	r4, 16(r1)
1406	lwz	r31,12(r1)
1407	lwz	r1, 0(r1)
1408
1409	/* Jump back to real return address */
1410	blr
1411#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1412
1413#endif /* CONFIG_FUNCTION_TRACER */
1414