• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <asm/unistd.h>
24#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/mmu.h>
27#include <asm/thread_info.h>
28#include <asm/code-patching-asm.h>
29#include <asm/ppc_asm.h>
30#include <asm/asm-offsets.h>
31#include <asm/cputable.h>
32#include <asm/firmware.h>
33#include <asm/bug.h>
34#include <asm/ptrace.h>
35#include <asm/irqflags.h>
36#include <asm/hw_irq.h>
37#include <asm/context_tracking.h>
38#include <asm/tm.h>
39#include <asm/ppc-opcode.h>
40#include <asm/barrier.h>
41#include <asm/export.h>
42#ifdef CONFIG_PPC_BOOK3S
43#include <asm/exception-64s.h>
44#else
45#include <asm/exception-64e.h>
46#endif
47
48/*
49 * System calls.
50 */
51	.section	".toc","aw"
52SYS_CALL_TABLE:
53	.tc sys_call_table[TC],sys_call_table
54
55/* This value is used to mark exception frames on the stack. */
56exception_marker:
57	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
58
59	.section	".text"
60	.align 7
61
62	.globl system_call_common
63system_call_common:
64#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
65BEGIN_FTR_SECTION
66	extrdi.	r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
67	bne	.Ltabort_syscall
68END_FTR_SECTION_IFSET(CPU_FTR_TM)
69#endif
70	andi.	r10,r12,MSR_PR
71	mr	r10,r1
72	addi	r1,r1,-INT_FRAME_SIZE
73	beq-	1f
74	ld	r1,PACAKSAVE(r13)
751:	std	r10,0(r1)
76	std	r11,_NIP(r1)
77	std	r12,_MSR(r1)
78	std	r0,GPR0(r1)
79	std	r10,GPR1(r1)
80	beq	2f			/* if from kernel mode */
81#ifdef CONFIG_PPC_FSL_BOOK3E
82START_BTB_FLUSH_SECTION
83	BTB_FLUSH(r10)
84END_BTB_FLUSH_SECTION
85#endif
86	ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
872:	std	r2,GPR2(r1)
88	std	r3,GPR3(r1)
89	mfcr	r2
90	std	r4,GPR4(r1)
91	std	r5,GPR5(r1)
92	std	r6,GPR6(r1)
93	std	r7,GPR7(r1)
94	std	r8,GPR8(r1)
95	li	r11,0
96	std	r11,GPR9(r1)
97	std	r11,GPR10(r1)
98	std	r11,GPR11(r1)
99	std	r11,GPR12(r1)
100	std	r11,_XER(r1)
101	std	r11,_CTR(r1)
102	std	r9,GPR13(r1)
103	mflr	r10
104	/*
105	 * This clears CR0.SO (bit 28), which is the error indication on
106	 * return from this system call.
107	 */
108	rldimi	r2,r11,28,(63-28)
109	li	r11,0xc01
110	std	r10,_LINK(r1)
111	std	r11,_TRAP(r1)
112	std	r3,ORIG_GPR3(r1)
113	std	r2,_CCR(r1)
114	ld	r2,PACATOC(r13)
115	addi	r9,r1,STACK_FRAME_OVERHEAD
116	ld	r11,exception_marker@toc(r2)
117	std	r11,-16(r9)		/* "regshere" marker */
118#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
119BEGIN_FW_FTR_SECTION
120	beq	33f
121	/* if from user, see if there are any DTL entries to process */
122	ld	r10,PACALPPACAPTR(r13)	/* get ptr to VPA */
123	ld	r11,PACA_DTL_RIDX(r13)	/* get log read index */
124	addi	r10,r10,LPPACA_DTLIDX
125	LDX_BE	r10,0,r10		/* get log write index */
126	cmpd	cr1,r11,r10
127	beq+	cr1,33f
128	bl	accumulate_stolen_time
129	REST_GPR(0,r1)
130	REST_4GPRS(3,r1)
131	REST_2GPRS(7,r1)
132	addi	r9,r1,STACK_FRAME_OVERHEAD
13333:
134END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
135#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
136
137	/*
138	 * A syscall should always be called with interrupts enabled
139	 * so we just unconditionally hard-enable here. When some kind
140	 * of irq tracing is used, we additionally check that condition
141	 * is correct
142	 */
143#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
144	lbz	r10,PACASOFTIRQEN(r13)
145	xori	r10,r10,1
1461:	tdnei	r10,0
147	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
148#endif
149
150#ifdef CONFIG_PPC_BOOK3E
151	wrteei	1
152#else
153	li	r11,MSR_RI
154	ori	r11,r11,MSR_EE
155	mtmsrd	r11,1
156#endif /* CONFIG_PPC_BOOK3E */
157
158system_call:			/* label this so stack traces look sane */
159	/* We do need to set SOFTE in the stack frame or the return
160	 * from interrupt will be painful
161	 */
162	li	r10,1
163	std	r10,SOFTE(r1)
164
165	CURRENT_THREAD_INFO(r11, r1)
166	ld	r10,TI_FLAGS(r11)
167	andi.	r11,r10,_TIF_SYSCALL_DOTRACE
168	bne	.Lsyscall_dotrace		/* does not return */
169	cmpldi	0,r0,NR_syscalls
170	bge-	.Lsyscall_enosys
171
172.Lsyscall:
173/*
174 * Need to vector to 32 Bit or default sys_call_table here,
175 * based on caller's run-mode / personality.
176 */
177	ld	r11,SYS_CALL_TABLE@toc(2)
178	andi.	r10,r10,_TIF_32BIT
179	beq	15f
180	addi	r11,r11,8	/* use 32-bit syscall entries */
181	clrldi	r3,r3,32
182	clrldi	r4,r4,32
183	clrldi	r5,r5,32
184	clrldi	r6,r6,32
185	clrldi	r7,r7,32
186	clrldi	r8,r8,32
18715:
188	slwi	r0,r0,4
189
190	barrier_nospec_asm
191	/*
192	 * Prevent the load of the handler below (based on the user-passed
193	 * system call number) being speculatively executed until the test
194	 * against NR_syscalls and branch to .Lsyscall_enosys above has
195	 * committed.
196	 */
197
198	ldx	r12,r11,r0	/* Fetch system call handler [ptr] */
199	mtctr   r12
200	bctrl			/* Call handler */
201
202.Lsyscall_exit:
203	std	r3,RESULT(r1)
204	CURRENT_THREAD_INFO(r12, r1)
205
206	ld	r8,_MSR(r1)
207#ifdef CONFIG_PPC_BOOK3S
208	/* No MSR:RI on BookE */
209	andi.	r10,r8,MSR_RI
210	beq-	.Lunrecov_restore
211#endif
212
213/*
214 * This is a few instructions into the actual syscall exit path (which actually
215 * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the
216 * number of visible symbols for profiling purposes.
217 *
218 * We can probe from system_call until this point as MSR_RI is set. But once it
219 * is cleared below, we won't be able to take a trap.
220 *
221 * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL().
222 */
223system_call_exit:
224	/*
225	 * Disable interrupts so current_thread_info()->flags can't change,
226	 * and so that we don't get interrupted after loading SRR0/1.
227	 */
228#ifdef CONFIG_PPC_BOOK3E
229	wrteei	0
230#else
231	/*
232	 * For performance reasons we clear RI the same time that we
233	 * clear EE. We only need to clear RI just before we restore r13
234	 * below, but batching it with EE saves us one expensive mtmsrd call.
235	 * We have to be careful to restore RI if we branch anywhere from
236	 * here (eg syscall_exit_work).
237	 */
238	li	r11,0
239	mtmsrd	r11,1
240#endif /* CONFIG_PPC_BOOK3E */
241
242	ld	r9,TI_FLAGS(r12)
243	li	r11,-MAX_ERRNO
244	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
245	bne-	.Lsyscall_exit_work
246
247	andi.	r0,r8,MSR_FP
248	beq 2f
249#ifdef CONFIG_ALTIVEC
250	andis.	r0,r8,MSR_VEC@h
251	bne	3f
252#endif
2532:	addi    r3,r1,STACK_FRAME_OVERHEAD
254#ifdef CONFIG_PPC_BOOK3S
255	li	r10,MSR_RI
256	mtmsrd	r10,1		/* Restore RI */
257#endif
258	bl	restore_math
259#ifdef CONFIG_PPC_BOOK3S
260	li	r11,0
261	mtmsrd	r11,1
262#endif
263	ld	r8,_MSR(r1)
264	ld	r3,RESULT(r1)
265	li	r11,-MAX_ERRNO
266
2673:	cmpld	r3,r11
268	ld	r5,_CCR(r1)
269	bge-	.Lsyscall_error
270.Lsyscall_error_cont:
271	ld	r7,_NIP(r1)
272BEGIN_FTR_SECTION
273	stdcx.	r0,0,r1			/* to clear the reservation */
274END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
275	andi.	r6,r8,MSR_PR
276	ld	r4,_LINK(r1)
277
278	beq-	1f
279	ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
280
281BEGIN_FTR_SECTION
282	HMT_MEDIUM_LOW
283END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
284
285	ld	r13,GPR13(r1)	/* only restore r13 if returning to usermode */
286	ld	r2,GPR2(r1)
287	ld	r1,GPR1(r1)
288	mtlr	r4
289	mtcr	r5
290	mtspr	SPRN_SRR0,r7
291	mtspr	SPRN_SRR1,r8
292	RFI_TO_USER
293	b	.	/* prevent speculative execution */
294
295	/* exit to kernel */
2961:	ld	r2,GPR2(r1)
297	ld	r1,GPR1(r1)
298	mtlr	r4
299	mtcr	r5
300	mtspr	SPRN_SRR0,r7
301	mtspr	SPRN_SRR1,r8
302	RFI_TO_KERNEL
303	b	.	/* prevent speculative execution */
304
305.Lsyscall_error:
306	oris	r5,r5,0x1000	/* Set SO bit in CR */
307	neg	r3,r3
308	std	r5,_CCR(r1)
309	b	.Lsyscall_error_cont
310
311/* Traced system call support */
312.Lsyscall_dotrace:
313	bl	save_nvgprs
314	addi	r3,r1,STACK_FRAME_OVERHEAD
315	bl	do_syscall_trace_enter
316
317	/*
318	 * We use the return value of do_syscall_trace_enter() as the syscall
319	 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
320	 * returns an invalid syscall number and the test below against
321	 * NR_syscalls will fail.
322	 */
323	mr	r0,r3
324
325	/* Restore argument registers just clobbered and/or possibly changed. */
326	ld	r3,GPR3(r1)
327	ld	r4,GPR4(r1)
328	ld	r5,GPR5(r1)
329	ld	r6,GPR6(r1)
330	ld	r7,GPR7(r1)
331	ld	r8,GPR8(r1)
332
333	/* Repopulate r9 and r10 for the syscall path */
334	addi	r9,r1,STACK_FRAME_OVERHEAD
335	CURRENT_THREAD_INFO(r10, r1)
336	ld	r10,TI_FLAGS(r10)
337
338	cmpldi	r0,NR_syscalls
339	blt+	.Lsyscall
340
341	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
342	b	.Lsyscall_exit
343
344
345.Lsyscall_enosys:
346	li	r3,-ENOSYS
347	b	.Lsyscall_exit
348
349.Lsyscall_exit_work:
350#ifdef CONFIG_PPC_BOOK3S
351	li	r10,MSR_RI
352	mtmsrd	r10,1		/* Restore RI */
353#endif
354	/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
355	 If TIF_NOERROR is set, just save r3 as it is. */
356
357	andi.	r0,r9,_TIF_RESTOREALL
358	beq+	0f
359	REST_NVGPRS(r1)
360	b	2f
3610:	cmpld	r3,r11		/* r11 is -MAX_ERRNO */
362	blt+	1f
363	andi.	r0,r9,_TIF_NOERROR
364	bne-	1f
365	ld	r5,_CCR(r1)
366	neg	r3,r3
367	oris	r5,r5,0x1000	/* Set SO bit in CR */
368	std	r5,_CCR(r1)
3691:	std	r3,GPR3(r1)
3702:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
371	beq	4f
372
373	/* Clear per-syscall TIF flags if any are set.  */
374
375	li	r11,_TIF_PERSYSCALL_MASK
376	addi	r12,r12,TI_FLAGS
3773:	ldarx	r10,0,r12
378	andc	r10,r10,r11
379	stdcx.	r10,0,r12
380	bne-	3b
381	subi	r12,r12,TI_FLAGS
382
3834:	/* Anything else left to do? */
384BEGIN_FTR_SECTION
385	lis	r3,INIT_PPR@highest	/* Set thread.ppr = 3 */
386	ld	r10,PACACURRENT(r13)
387	sldi	r3,r3,32	/* bits 11-13 are used for ppr */
388	std	r3,TASKTHREADPPR(r10)
389END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
390
391	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
392	beq	ret_from_except_lite
393
394	/* Re-enable interrupts */
395#ifdef CONFIG_PPC_BOOK3E
396	wrteei	1
397#else
398	li	r10,MSR_RI
399	ori	r10,r10,MSR_EE
400	mtmsrd	r10,1
401#endif /* CONFIG_PPC_BOOK3E */
402
403	bl	save_nvgprs
404	addi	r3,r1,STACK_FRAME_OVERHEAD
405	bl	do_syscall_trace_leave
406	b	ret_from_except
407
408#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
409.Ltabort_syscall:
410	/* Firstly we need to enable TM in the kernel */
411	mfmsr	r10
412	li	r9, 1
413	rldimi	r10, r9, MSR_TM_LG, 63-MSR_TM_LG
414	mtmsrd	r10, 0
415
416	/* tabort, this dooms the transaction, nothing else */
417	li	r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
418	TABORT(R9)
419
420	/*
421	 * Return directly to userspace. We have corrupted user register state,
422	 * but userspace will never see that register state. Execution will
423	 * resume after the tbegin of the aborted transaction with the
424	 * checkpointed register state.
425	 */
426	li	r9, MSR_RI
427	andc	r10, r10, r9
428	mtmsrd	r10, 1
429	mtspr	SPRN_SRR0, r11
430	mtspr	SPRN_SRR1, r12
431	RFI_TO_USER
432	b	.	/* prevent speculative execution */
433#endif
434_ASM_NOKPROBE_SYMBOL(system_call_common);
435_ASM_NOKPROBE_SYMBOL(system_call_exit);
436
437/* Save non-volatile GPRs, if not already saved. */
438_GLOBAL(save_nvgprs)
439	ld	r11,_TRAP(r1)
440	andi.	r0,r11,1
441	beqlr-
442	SAVE_NVGPRS(r1)
443	clrrdi	r0,r11,1
444	std	r0,_TRAP(r1)
445	blr
446_ASM_NOKPROBE_SYMBOL(save_nvgprs);
447
448
449/*
450 * The sigsuspend and rt_sigsuspend system calls can call do_signal
451 * and thus put the process into the stopped state where we might
452 * want to examine its user state with ptrace.  Therefore we need
453 * to save all the nonvolatile registers (r14 - r31) before calling
454 * the C code.  Similarly, fork, vfork and clone need the full
455 * register state on the stack so that it can be copied to the child.
456 */
457
458_GLOBAL(ppc_fork)
459	bl	save_nvgprs
460	bl	sys_fork
461	b	.Lsyscall_exit
462
463_GLOBAL(ppc_vfork)
464	bl	save_nvgprs
465	bl	sys_vfork
466	b	.Lsyscall_exit
467
468_GLOBAL(ppc_clone)
469	bl	save_nvgprs
470	bl	sys_clone
471	b	.Lsyscall_exit
472
473_GLOBAL(ppc32_swapcontext)
474	bl	save_nvgprs
475	bl	compat_sys_swapcontext
476	b	.Lsyscall_exit
477
478_GLOBAL(ppc64_swapcontext)
479	bl	save_nvgprs
480	bl	sys_swapcontext
481	b	.Lsyscall_exit
482
483_GLOBAL(ppc_switch_endian)
484	bl	save_nvgprs
485	bl	sys_switch_endian
486	b	.Lsyscall_exit
487
488_GLOBAL(ret_from_fork)
489	bl	schedule_tail
490	REST_NVGPRS(r1)
491	li	r3,0
492	b	.Lsyscall_exit
493
494_GLOBAL(ret_from_kernel_thread)
495	bl	schedule_tail
496	REST_NVGPRS(r1)
497	mtlr	r14
498	mr	r3,r15
499#ifdef PPC64_ELF_ABI_v2
500	mr	r12,r14
501#endif
502	blrl
503	li	r3,0
504	b	.Lsyscall_exit
505
506#ifdef CONFIG_PPC_BOOK3S_64
507
508#define FLUSH_COUNT_CACHE	\
5091:	nop;			\
510	patch_site 1b, patch__call_flush_count_cache
511
512
513#define BCCTR_FLUSH	.long 0x4c400420
514
515.macro nops number
516	.rept \number
517	nop
518	.endr
519.endm
520
521.balign 32
522.global flush_count_cache
523flush_count_cache:
524	/* Save LR into r9 */
525	mflr	r9
526
527	// Flush the link stack
528	.rept 64
529	bl	.+4
530	.endr
531	b	1f
532	nops	6
533
534	.balign 32
535	/* Restore LR */
5361:	mtlr	r9
537
538	// If we're just flushing the link stack, return here
5393:	nop
540	patch_site 3b patch__flush_link_stack_return
541
542	li	r9,0x7fff
543	mtctr	r9
544
545	BCCTR_FLUSH
546
5472:	nop
548	patch_site 2b patch__flush_count_cache_return
549
550	nops	3
551
552	.rept 278
553	.balign 32
554	BCCTR_FLUSH
555	nops	7
556	.endr
557
558	blr
559#else
560#define FLUSH_COUNT_CACHE
561#endif /* CONFIG_PPC_BOOK3S_64 */
562
563/*
564 * This routine switches between two different tasks.  The process
565 * state of one is saved on its kernel stack.  Then the state
566 * of the other is restored from its kernel stack.  The memory
567 * management hardware is updated to the second process's state.
568 * Finally, we can return to the second process, via ret_from_except.
569 * On entry, r3 points to the THREAD for the current task, r4
570 * points to the THREAD for the new task.
571 *
572 * Note: there are two ways to get to the "going out" portion
573 * of this code; either by coming in via the entry (_switch)
574 * or via "fork" which must set up an environment equivalent
575 * to the "_switch" path.  If you change this you'll have to change
576 * the fork code also.
577 *
578 * The code which creates the new task context is in 'copy_thread'
579 * in arch/powerpc/kernel/process.c
580 */
581	.align	7
582_GLOBAL(_switch)
583	mflr	r0
584	std	r0,16(r1)
585	stdu	r1,-SWITCH_FRAME_SIZE(r1)
586	/* r3-r13 are caller saved -- Cort */
587	SAVE_8GPRS(14, r1)
588	SAVE_10GPRS(22, r1)
589	std	r0,_NIP(r1)	/* Return to switch caller */
590	mfcr	r23
591	std	r23,_CCR(r1)
592	std	r1,KSP(r3)	/* Set old stack pointer */
593
594	FLUSH_COUNT_CACHE
595
596	/*
597	 * On SMP kernels, care must be taken because a task may be
598	 * scheduled off CPUx and on to CPUy. Memory ordering must be
599	 * considered.
600	 *
601	 * Cacheable stores on CPUx will be visible when the task is
602	 * scheduled on CPUy by virtue of the core scheduler barriers
603	 * (see "Notes on Program-Order guarantees on SMP systems." in
604	 * kernel/sched/core.c).
605	 *
606	 * Uncacheable stores in the case of involuntary preemption must
607	 * be taken care of. The smp_mb__before_spin_lock() in __schedule()
608	 * is implemented as hwsync on powerpc, which orders MMIO too. So
609	 * long as there is an hwsync in the context switch path, it will
610	 * be executed on the source CPU after the task has performed
611	 * all MMIO ops on that CPU, and on the destination CPU before the
612	 * task performs any MMIO ops there.
613	 */
614
615	/*
616	 * The kernel context switch path must contain a spin_lock,
617	 * which contains larx/stcx, which will clear any reservation
618	 * of the task being switched.
619	 */
620#ifdef CONFIG_PPC_BOOK3S
621/* Cancel all explict user streams as they will have no use after context
622 * switch and will stop the HW from creating streams itself
623 */
624	DCBT_STOP_ALL_STREAM_IDS(r6)
625#endif
626
627	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
628	std	r6,PACACURRENT(r13)	/* Set new 'current' */
629
630	ld	r8,KSP(r4)	/* new stack pointer */
631#ifdef CONFIG_PPC_STD_MMU_64
632BEGIN_MMU_FTR_SECTION
633	b	2f
634END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
635BEGIN_FTR_SECTION
636	clrrdi	r6,r8,28	/* get its ESID */
637	clrrdi	r9,r1,28	/* get current sp ESID */
638FTR_SECTION_ELSE
639	clrrdi	r6,r8,40	/* get its 1T ESID */
640	clrrdi	r9,r1,40	/* get current sp 1T ESID */
641ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
642	clrldi.	r0,r6,2		/* is new ESID c00000000? */
643	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
644	cror	eq,4*cr1+eq,eq
645	beq	2f		/* if yes, don't slbie it */
646
647	/* Bolt in the new stack SLB entry */
648	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
649	oris	r0,r6,(SLB_ESID_V)@h
650	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
651BEGIN_FTR_SECTION
652	li	r9,MMU_SEGSIZE_1T	/* insert B field */
653	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
654	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
655END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
656
657	/* Update the last bolted SLB.  No write barriers are needed
658	 * here, provided we only update the current CPU's SLB shadow
659	 * buffer.
660	 */
661	ld	r9,PACA_SLBSHADOWPTR(r13)
662	li	r12,0
663	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
664	li	r12,SLBSHADOW_STACKVSID
665	STDX_BE	r7,r12,r9			/* Save VSID */
666	li	r12,SLBSHADOW_STACKESID
667	STDX_BE	r0,r12,r9			/* Save ESID */
668
669	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
670	 * we have 1TB segments, the only CPUs known to have the errata
671	 * only support less than 1TB of system memory and we'll never
672	 * actually hit this code path.
673	 */
674
675	isync
676	slbie	r6
677	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
678	slbmte	r7,r0
679	isync
6802:
681#endif /* CONFIG_PPC_STD_MMU_64 */
682
683	CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
684	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
685	   because we don't need to leave the 288-byte ABI gap at the
686	   top of the kernel stack. */
687	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
688
689	/*
690	 * PMU interrupts in radix may come in here. They will use r1, not
691	 * PACAKSAVE, so this stack switch will not cause a problem. They
692	 * will store to the process stack, which may then be migrated to
693	 * another CPU. However the rq lock release on this CPU paired with
694	 * the rq lock acquire on the new CPU before the stack becomes
695	 * active on the new CPU, will order those stores.
696	 */
697	mr	r1,r8		/* start using new stack pointer */
698	std	r7,PACAKSAVE(r13)
699
700	ld	r6,_CCR(r1)
701	mtcrf	0xFF,r6
702
703	/* r3-r13 are destroyed -- Cort */
704	REST_8GPRS(14, r1)
705	REST_10GPRS(22, r1)
706
707	/* convert old thread to its task_struct for return value */
708	addi	r3,r3,-THREAD
709	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
710	mtlr	r7
711	addi	r1,r1,SWITCH_FRAME_SIZE
712	blr
713
714	.align	7
715_GLOBAL(ret_from_except)
716	ld	r11,_TRAP(r1)
717	andi.	r0,r11,1
718	bne	ret_from_except_lite
719	REST_NVGPRS(r1)
720
721_GLOBAL(ret_from_except_lite)
722	/*
723	 * Disable interrupts so that current_thread_info()->flags
724	 * can't change between when we test it and when we return
725	 * from the interrupt.
726	 */
727#ifdef CONFIG_PPC_BOOK3E
728	wrteei	0
729#else
730	li	r10,MSR_RI
731	mtmsrd	r10,1		  /* Update machine state */
732#endif /* CONFIG_PPC_BOOK3E */
733
734	CURRENT_THREAD_INFO(r9, r1)
735	ld	r3,_MSR(r1)
736#ifdef CONFIG_PPC_BOOK3E
737	ld	r10,PACACURRENT(r13)
738#endif /* CONFIG_PPC_BOOK3E */
739	ld	r4,TI_FLAGS(r9)
740	andi.	r3,r3,MSR_PR
741	beq	resume_kernel
742#ifdef CONFIG_PPC_BOOK3E
743	lwz	r3,(THREAD+THREAD_DBCR0)(r10)
744#endif /* CONFIG_PPC_BOOK3E */
745
746	/* Check current_thread_info()->flags */
747	andi.	r0,r4,_TIF_USER_WORK_MASK
748	bne	1f
749#ifdef CONFIG_PPC_BOOK3E
750	/*
751	 * Check to see if the dbcr0 register is set up to debug.
752	 * Use the internal debug mode bit to do this.
753	 */
754	andis.	r0,r3,DBCR0_IDM@h
755	beq	restore
756	mfmsr	r0
757	rlwinm	r0,r0,0,~MSR_DE	/* Clear MSR.DE */
758	mtmsr	r0
759	mtspr	SPRN_DBCR0,r3
760	li	r10, -1
761	mtspr	SPRN_DBSR,r10
762	b	restore
763#else
764	addi	r3,r1,STACK_FRAME_OVERHEAD
765	bl	restore_math
766	b	restore
767#endif
7681:	andi.	r0,r4,_TIF_NEED_RESCHED
769	beq	2f
770	bl	restore_interrupts
771	SCHEDULE_USER
772	b	ret_from_except_lite
7732:
774#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
775	andi.	r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
776	bne	3f		/* only restore TM if nothing else to do */
777	addi	r3,r1,STACK_FRAME_OVERHEAD
778	bl	restore_tm_state
779	b	restore
7803:
781#endif
782	bl	save_nvgprs
783	/*
784	 * Use a non volatile GPR to save and restore our thread_info flags
785	 * across the call to restore_interrupts.
786	 */
787	mr	r30,r4
788	bl	restore_interrupts
789	mr	r4,r30
790	addi	r3,r1,STACK_FRAME_OVERHEAD
791	bl	do_notify_resume
792	b	ret_from_except
793
794resume_kernel:
795	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
796	andis.	r8,r4,_TIF_EMULATE_STACK_STORE@h
797	beq+	1f
798
799	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
800
801	ld	r3,GPR1(r1)
802	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
803	mr	r4,r1			/* src:  current exception frame */
804	mr	r1,r3			/* Reroute the trampoline frame to r1 */
805
806	/* Copy from the original to the trampoline. */
807	li	r5,INT_FRAME_SIZE/8	/* size: INT_FRAME_SIZE */
808	li	r6,0			/* start offset: 0 */
809	mtctr	r5
8102:	ldx	r0,r6,r4
811	stdx	r0,r6,r3
812	addi	r6,r6,8
813	bdnz	2b
814
815	/* Do real store operation to complete stdu */
816	ld	r5,GPR1(r1)
817	std	r8,0(r5)
818
819	/* Clear _TIF_EMULATE_STACK_STORE flag */
820	lis	r11,_TIF_EMULATE_STACK_STORE@h
821	addi	r5,r9,TI_FLAGS
8220:	ldarx	r4,0,r5
823	andc	r4,r4,r11
824	stdcx.	r4,0,r5
825	bne-	0b
8261:
827
828#ifdef CONFIG_PREEMPT
829	/* Check if we need to preempt */
830	andi.	r0,r4,_TIF_NEED_RESCHED
831	beq+	restore
832	/* Check that preempt_count() == 0 and interrupts are enabled */
833	lwz	r8,TI_PREEMPT(r9)
834	cmpwi	cr1,r8,0
835	ld	r0,SOFTE(r1)
836	cmpdi	r0,0
837	crandc	eq,cr1*4+eq,eq
838	bne	restore
839
840	/*
841	 * Here we are preempting the current task. We want to make
842	 * sure we are soft-disabled first and reconcile irq state.
843	 */
844	RECONCILE_IRQ_STATE(r3,r4)
8451:	bl	preempt_schedule_irq
846
847	/* Re-test flags and eventually loop */
848	CURRENT_THREAD_INFO(r9, r1)
849	ld	r4,TI_FLAGS(r9)
850	andi.	r0,r4,_TIF_NEED_RESCHED
851	bne	1b
852
853	/*
854	 * arch_local_irq_restore() from preempt_schedule_irq above may
855	 * enable hard interrupt but we really should disable interrupts
856	 * when we return from the interrupt, and so that we don't get
857	 * interrupted after loading SRR0/1.
858	 */
859#ifdef CONFIG_PPC_BOOK3E
860	wrteei	0
861#else
862	li	r10,MSR_RI
863	mtmsrd	r10,1		  /* Update machine state */
864#endif /* CONFIG_PPC_BOOK3E */
865#endif /* CONFIG_PREEMPT */
866
867	.globl	fast_exc_return_irq
868fast_exc_return_irq:
869restore:
870	/*
871	 * This is the main kernel exit path. First we check if we
872	 * are about to re-enable interrupts
873	 */
874	ld	r5,SOFTE(r1)
875	lbz	r6,PACASOFTIRQEN(r13)
876	cmpwi	cr0,r5,0
877	beq	.Lrestore_irq_off
878
879	/* We are enabling, were we already enabled ? Yes, just return */
880	cmpwi	cr0,r6,1
881	beq	cr0,.Ldo_restore
882
883	/*
884	 * We are about to soft-enable interrupts (we are hard disabled
885	 * at this point). We check if there's anything that needs to
886	 * be replayed first.
887	 */
888	lbz	r0,PACAIRQHAPPENED(r13)
889	cmpwi	cr0,r0,0
890	bne-	.Lrestore_check_irq_replay
891
892	/*
893	 * Get here when nothing happened while soft-disabled, just
894	 * soft-enable and move-on. We will hard-enable as a side
895	 * effect of rfi
896	 */
897.Lrestore_no_replay:
898	TRACE_ENABLE_INTS
899	li	r0,1
900	stb	r0,PACASOFTIRQEN(r13);
901
902	/*
903	 * Final return path. BookE is handled in a different file
904	 */
905.Ldo_restore:
906#ifdef CONFIG_PPC_BOOK3E
907	b	exception_return_book3e
908#else
909	/*
910	 * Clear the reservation. If we know the CPU tracks the address of
911	 * the reservation then we can potentially save some cycles and use
912	 * a larx. On POWER6 and POWER7 this is significantly faster.
913	 */
914BEGIN_FTR_SECTION
915	stdcx.	r0,0,r1		/* to clear the reservation */
916FTR_SECTION_ELSE
917	ldarx	r4,0,r1
918ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
919
920	/*
921	 * Some code path such as load_up_fpu or altivec return directly
922	 * here. They run entirely hard disabled and do not alter the
923	 * interrupt state. They also don't use lwarx/stwcx. and thus
924	 * are known not to leave dangling reservations.
925	 */
926	.globl	fast_exception_return
927fast_exception_return:
928	ld	r3,_MSR(r1)
929	ld	r4,_CTR(r1)
930	ld	r0,_LINK(r1)
931	mtctr	r4
932	mtlr	r0
933	ld	r4,_XER(r1)
934	mtspr	SPRN_XER,r4
935
936	REST_8GPRS(5, r1)
937
938	andi.	r0,r3,MSR_RI
939	beq-	.Lunrecov_restore
940
941	/* Load PPR from thread struct before we clear MSR:RI */
942BEGIN_FTR_SECTION
943	ld	r2,PACACURRENT(r13)
944	ld	r2,TASKTHREADPPR(r2)
945END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
946
947	/*
948	 * Clear RI before restoring r13.  If we are returning to
949	 * userspace and we take an exception after restoring r13,
950	 * we end up corrupting the userspace r13 value.
951	 */
952	li	r4,0
953	mtmsrd	r4,1
954
955#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
956	/* TM debug */
957	std	r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
958#endif
959	/*
960	 * r13 is our per cpu area, only restore it if we are returning to
961	 * userspace the value stored in the stack frame may belong to
962	 * another CPU.
963	 */
964	andi.	r0,r3,MSR_PR
965	beq	1f
966BEGIN_FTR_SECTION
967	mtspr	SPRN_PPR,r2	/* Restore PPR */
968END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
969	ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
970	REST_GPR(13, r1)
971
972	mtspr	SPRN_SRR1,r3
973
974	ld	r2,_CCR(r1)
975	mtcrf	0xFF,r2
976	ld	r2,_NIP(r1)
977	mtspr	SPRN_SRR0,r2
978
979	ld	r0,GPR0(r1)
980	ld	r2,GPR2(r1)
981	ld	r3,GPR3(r1)
982	ld	r4,GPR4(r1)
983	ld	r1,GPR1(r1)
984	RFI_TO_USER
985	b	.	/* prevent speculative execution */
986
9871:	mtspr	SPRN_SRR1,r3
988
989	ld	r2,_CCR(r1)
990	mtcrf	0xFF,r2
991	ld	r2,_NIP(r1)
992	mtspr	SPRN_SRR0,r2
993
994	ld	r0,GPR0(r1)
995	ld	r2,GPR2(r1)
996	ld	r3,GPR3(r1)
997	ld	r4,GPR4(r1)
998	ld	r1,GPR1(r1)
999	RFI_TO_KERNEL
1000	b	.	/* prevent speculative execution */
1001
1002#endif /* CONFIG_PPC_BOOK3E */
1003
1004	/*
1005	 * We are returning to a context with interrupts soft disabled.
1006	 *
1007	 * However, we may also about to hard enable, so we need to
1008	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
1009	 * or that bit can get out of sync and bad things will happen
1010	 */
1011.Lrestore_irq_off:
1012	ld	r3,_MSR(r1)
1013	lbz	r7,PACAIRQHAPPENED(r13)
1014	andi.	r0,r3,MSR_EE
1015	beq	1f
1016	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
1017	stb	r7,PACAIRQHAPPENED(r13)
10181:
1019#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
1020	/* The interrupt should not have soft enabled. */
1021	lbz	r7,PACASOFTIRQEN(r13)
10221:	tdnei	r7,0
1023	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1024#endif
1025	b	.Ldo_restore
1026
1027	/*
1028	 * Something did happen, check if a re-emit is needed
1029	 * (this also clears paca->irq_happened)
1030	 */
1031.Lrestore_check_irq_replay:
1032	/* XXX: We could implement a fast path here where we check
1033	 * for irq_happened being just 0x01, in which case we can
1034	 * clear it and return. That means that we would potentially
1035	 * miss a decrementer having wrapped all the way around.
1036	 *
1037	 * Still, this might be useful for things like hash_page
1038	 */
1039	bl	__check_irq_replay
1040	cmpwi	cr0,r3,0
1041	beq	.Lrestore_no_replay
1042
1043	/*
1044	 * We need to re-emit an interrupt. We do so by re-using our
1045	 * existing exception frame. We first change the trap value,
1046	 * but we need to ensure we preserve the low nibble of it
1047	 */
1048	ld	r4,_TRAP(r1)
1049	clrldi	r4,r4,60
1050	or	r4,r4,r3
1051	std	r4,_TRAP(r1)
1052
1053	/*
1054	 * Then find the right handler and call it. Interrupts are
1055	 * still soft-disabled and we keep them that way.
1056	*/
1057	cmpwi	cr0,r3,0x500
1058	bne	1f
1059	addi	r3,r1,STACK_FRAME_OVERHEAD;
1060 	bl	do_IRQ
1061	b	ret_from_except
10621:	cmpwi	cr0,r3,0xe60
1063	bne	1f
1064	addi	r3,r1,STACK_FRAME_OVERHEAD;
1065	bl	handle_hmi_exception
1066	b	ret_from_except
10671:	cmpwi	cr0,r3,0x900
1068	bne	1f
1069	addi	r3,r1,STACK_FRAME_OVERHEAD;
1070	bl	timer_interrupt
1071	b	ret_from_except
1072#ifdef CONFIG_PPC_DOORBELL
10731:
1074#ifdef CONFIG_PPC_BOOK3E
1075	cmpwi	cr0,r3,0x280
1076#else
1077	cmpwi	cr0,r3,0xa00
1078#endif /* CONFIG_PPC_BOOK3E */
1079	bne	1f
1080	addi	r3,r1,STACK_FRAME_OVERHEAD;
1081	bl	doorbell_exception
1082#endif /* CONFIG_PPC_DOORBELL */
10831:	b	ret_from_except /* What else to do here ? */
1084
1085.Lunrecov_restore:
1086	addi	r3,r1,STACK_FRAME_OVERHEAD
1087	bl	unrecoverable_exception
1088	b	.Lunrecov_restore
1089
1090_ASM_NOKPROBE_SYMBOL(ret_from_except);
1091_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
1092_ASM_NOKPROBE_SYMBOL(resume_kernel);
1093_ASM_NOKPROBE_SYMBOL(fast_exc_return_irq);
1094_ASM_NOKPROBE_SYMBOL(restore);
1095_ASM_NOKPROBE_SYMBOL(fast_exception_return);
1096
1097
1098#ifdef CONFIG_PPC_RTAS
1099/*
1100 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1101 * called with the MMU off.
1102 *
1103 * In addition, we need to be in 32b mode, at least for now.
1104 *
1105 * Note: r3 is an input parameter to rtas, so don't trash it...
1106 */
1107_GLOBAL(enter_rtas)
1108	mflr	r0
1109	std	r0,16(r1)
1110        stdu	r1,-RTAS_FRAME_SIZE(r1)	/* Save SP and create stack space. */
1111
1112	/* Because RTAS is running in 32b mode, it clobbers the high order half
1113	 * of all registers that it saves.  We therefore save those registers
1114	 * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
1115   	 */
1116	SAVE_GPR(2, r1)			/* Save the TOC */
1117	SAVE_GPR(13, r1)		/* Save paca */
1118	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
1119	SAVE_10GPRS(22, r1)		/* ditto */
1120
1121	mfcr	r4
1122	std	r4,_CCR(r1)
1123	mfctr	r5
1124	std	r5,_CTR(r1)
1125	mfspr	r6,SPRN_XER
1126	std	r6,_XER(r1)
1127	mfdar	r7
1128	std	r7,_DAR(r1)
1129	mfdsisr	r8
1130	std	r8,_DSISR(r1)
1131
1132	/* Temporary workaround to clear CR until RTAS can be modified to
1133	 * ignore all bits.
1134	 */
1135	li	r0,0
1136	mtcr	r0
1137
1138#ifdef CONFIG_BUG
1139	/* There is no way it is acceptable to get here with interrupts enabled,
1140	 * check it with the asm equivalent of WARN_ON
1141	 */
1142	lbz	r0,PACASOFTIRQEN(r13)
11431:	tdnei	r0,0
1144	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1145#endif
1146
1147	/* Hard-disable interrupts */
1148	mfmsr	r6
1149	rldicl	r7,r6,48,1
1150	rotldi	r7,r7,16
1151	mtmsrd	r7,1
1152
1153	/* Unfortunately, the stack pointer and the MSR are also clobbered,
1154	 * so they are saved in the PACA which allows us to restore
1155	 * our original state after RTAS returns.
1156         */
1157	std	r1,PACAR1(r13)
1158        std	r6,PACASAVEDMSR(r13)
1159
1160	/* Setup our real return addr */
1161	LOAD_REG_ADDR(r4,rtas_return_loc)
1162	clrldi	r4,r4,2			/* convert to realmode address */
1163       	mtlr	r4
1164
1165	li	r0,0
1166	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1167	andc	r0,r6,r0
1168
1169        li      r9,1
1170        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1171	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1172	andc	r6,r0,r9
1173
1174__enter_rtas:
1175	sync				/* disable interrupts so SRR0/1 */
1176	mtmsrd	r0			/* don't get trashed */
1177
1178	LOAD_REG_ADDR(r4, rtas)
1179	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
1180	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
1181
1182	mtspr	SPRN_SRR0,r5
1183	mtspr	SPRN_SRR1,r6
1184	RFI_TO_KERNEL
1185	b	.	/* prevent speculative execution */
1186
1187rtas_return_loc:
1188	FIXUP_ENDIAN
1189
1190	/* relocation is off at this point */
1191	GET_PACA(r4)
1192	clrldi	r4,r4,2			/* convert to realmode address */
1193
1194	bcl	20,31,$+4
11950:	mflr	r3
1196	ld	r3,(1f-0b)(r3)		/* get &rtas_restore_regs */
1197
1198	mfmsr   r6
1199	li	r0,MSR_RI
1200	andc	r6,r6,r0
1201	sync
1202	mtmsrd  r6
1203
1204        ld	r1,PACAR1(r4)           /* Restore our SP */
1205        ld	r4,PACASAVEDMSR(r4)     /* Restore our MSR */
1206
1207	mtspr	SPRN_SRR0,r3
1208	mtspr	SPRN_SRR1,r4
1209	RFI_TO_KERNEL
1210	b	.	/* prevent speculative execution */
1211_ASM_NOKPROBE_SYMBOL(__enter_rtas)
1212_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
1213
1214	.align	3
12151:	.8byte	rtas_restore_regs
1216
1217rtas_restore_regs:
1218	/* relocation is on at this point */
1219	REST_GPR(2, r1)			/* Restore the TOC */
1220	REST_GPR(13, r1)		/* Restore paca */
1221	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
1222	REST_10GPRS(22, r1)		/* ditto */
1223
1224	GET_PACA(r13)
1225
1226	ld	r4,_CCR(r1)
1227	mtcr	r4
1228	ld	r5,_CTR(r1)
1229	mtctr	r5
1230	ld	r6,_XER(r1)
1231	mtspr	SPRN_XER,r6
1232	ld	r7,_DAR(r1)
1233	mtdar	r7
1234	ld	r8,_DSISR(r1)
1235	mtdsisr	r8
1236
1237        addi	r1,r1,RTAS_FRAME_SIZE	/* Unstack our frame */
1238	ld	r0,16(r1)		/* get return address */
1239
1240	mtlr    r0
1241        blr				/* return to caller */
1242
1243#endif /* CONFIG_PPC_RTAS */
1244
1245_GLOBAL(enter_prom)
1246	mflr	r0
1247	std	r0,16(r1)
1248        stdu	r1,-PROM_FRAME_SIZE(r1)	/* Save SP and create stack space */
1249
1250	/* Because PROM is running in 32b mode, it clobbers the high order half
1251	 * of all registers that it saves.  We therefore save those registers
1252	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
1253   	 */
1254	SAVE_GPR(2, r1)
1255	SAVE_GPR(13, r1)
1256	SAVE_8GPRS(14, r1)
1257	SAVE_10GPRS(22, r1)
1258	mfcr	r10
1259	mfmsr	r11
1260	std	r10,_CCR(r1)
1261	std	r11,_MSR(r1)
1262
1263	/* Put PROM address in SRR0 */
1264	mtsrr0	r4
1265
1266	/* Setup our trampoline return addr in LR */
1267	bcl	20,31,$+4
12680:	mflr	r4
1269	addi	r4,r4,(1f - 0b)
1270       	mtlr	r4
1271
1272	/* Prepare a 32-bit mode big endian MSR
1273	 */
1274#ifdef CONFIG_PPC_BOOK3E
1275	rlwinm	r11,r11,0,1,31
1276	mtsrr1	r11
1277	rfi
1278#else /* CONFIG_PPC_BOOK3E */
1279	LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1280	andc	r11,r11,r12
1281	mtsrr1	r11
1282	RFI_TO_KERNEL
1283#endif /* CONFIG_PPC_BOOK3E */
1284
12851:	/* Return from OF */
1286	FIXUP_ENDIAN
1287
1288	/* Just make sure that r1 top 32 bits didn't get
1289	 * corrupt by OF
1290	 */
1291	rldicl	r1,r1,0,32
1292
1293	/* Restore the MSR (back to 64 bits) */
1294	ld	r0,_MSR(r1)
1295	MTMSRD(r0)
1296        isync
1297
1298	/* Restore other registers */
1299	REST_GPR(2, r1)
1300	REST_GPR(13, r1)
1301	REST_8GPRS(14, r1)
1302	REST_10GPRS(22, r1)
1303	ld	r4,_CCR(r1)
1304	mtcr	r4
1305
1306        addi	r1,r1,PROM_FRAME_SIZE
1307	ld	r0,16(r1)
1308	mtlr    r0
1309        blr
1310