• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/magic.h>
24#include <asm/unistd.h>
25#include <asm/processor.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/thread_info.h>
29#include <asm/ppc_asm.h>
30#include <asm/asm-offsets.h>
31#include <asm/cputable.h>
32#include <asm/firmware.h>
33#include <asm/bug.h>
34#include <asm/ptrace.h>
35#include <asm/irqflags.h>
36#include <asm/ftrace.h>
37#include <asm/hw_irq.h>
38#include <asm/context_tracking.h>
39#include <asm/tm.h>
40#include <asm/ppc-opcode.h>
41#include <asm/export.h>
42#ifdef CONFIG_PPC_BOOK3S
43#include <asm/exception-64s.h>
44#else
45#include <asm/exception-64e.h>
46#endif
47
48/*
49 * System calls.
50 */
51	.section	".toc","aw"
52SYS_CALL_TABLE:
53	.tc sys_call_table[TC],sys_call_table
54
55/* This value is used to mark exception frames on the stack. */
56exception_marker:
57	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
58
59	.section	".text"
60	.align 7
61
62	.globl system_call_common
63system_call_common:
64#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
65BEGIN_FTR_SECTION
66	extrdi.	r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
67	bne	tabort_syscall
68END_FTR_SECTION_IFSET(CPU_FTR_TM)
69#endif
70	andi.	r10,r12,MSR_PR
71	mr	r10,r1
72	addi	r1,r1,-INT_FRAME_SIZE
73	beq-	1f
74	ld	r1,PACAKSAVE(r13)
751:	std	r10,0(r1)
76	std	r11,_NIP(r1)
77	std	r12,_MSR(r1)
78	std	r0,GPR0(r1)
79	std	r10,GPR1(r1)
80	beq	2f			/* if from kernel mode */
81	ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
822:	std	r2,GPR2(r1)
83	std	r3,GPR3(r1)
84	mfcr	r2
85	std	r4,GPR4(r1)
86	std	r5,GPR5(r1)
87	std	r6,GPR6(r1)
88	std	r7,GPR7(r1)
89	std	r8,GPR8(r1)
90	li	r11,0
91	std	r11,GPR9(r1)
92	std	r11,GPR10(r1)
93	std	r11,GPR11(r1)
94	std	r11,GPR12(r1)
95	std	r11,_XER(r1)
96	std	r11,_CTR(r1)
97	std	r9,GPR13(r1)
98	mflr	r10
99	/*
100	 * This clears CR0.SO (bit 28), which is the error indication on
101	 * return from this system call.
102	 */
103	rldimi	r2,r11,28,(63-28)
104	li	r11,0xc01
105	std	r10,_LINK(r1)
106	std	r11,_TRAP(r1)
107	std	r3,ORIG_GPR3(r1)
108	std	r2,_CCR(r1)
109	ld	r2,PACATOC(r13)
110	addi	r9,r1,STACK_FRAME_OVERHEAD
111	ld	r11,exception_marker@toc(r2)
112	std	r11,-16(r9)		/* "regshere" marker */
113#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
114BEGIN_FW_FTR_SECTION
115	beq	33f
116	/* if from user, see if there are any DTL entries to process */
117	ld	r10,PACALPPACAPTR(r13)	/* get ptr to VPA */
118	ld	r11,PACA_DTL_RIDX(r13)	/* get log read index */
119	addi	r10,r10,LPPACA_DTLIDX
120	LDX_BE	r10,0,r10		/* get log write index */
121	cmpd	cr1,r11,r10
122	beq+	cr1,33f
123	bl	accumulate_stolen_time
124	REST_GPR(0,r1)
125	REST_4GPRS(3,r1)
126	REST_2GPRS(7,r1)
127	addi	r9,r1,STACK_FRAME_OVERHEAD
12833:
129END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
130#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
131
132	/*
133	 * A syscall should always be called with interrupts enabled
134	 * so we just unconditionally hard-enable here. When some kind
135	 * of irq tracing is used, we additionally check that condition
136	 * is correct
137	 */
138#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
139	lbz	r10,PACASOFTIRQEN(r13)
140	xori	r10,r10,1
1411:	tdnei	r10,0
142	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
143#endif
144
145#ifdef CONFIG_PPC_BOOK3E
146	wrteei	1
147#else
148	li	r11,MSR_RI
149	ori	r11,r11,MSR_EE
150	mtmsrd	r11,1
151#endif /* CONFIG_PPC_BOOK3E */
152
153	/* We do need to set SOFTE in the stack frame or the return
154	 * from interrupt will be painful
155	 */
156	li	r10,1
157	std	r10,SOFTE(r1)
158
159	CURRENT_THREAD_INFO(r11, r1)
160	ld	r10,TI_FLAGS(r11)
161	andi.	r11,r10,_TIF_SYSCALL_DOTRACE
162	bne	syscall_dotrace		/* does not return */
163	cmpldi	0,r0,NR_syscalls
164	bge-	syscall_enosys
165
166system_call:			/* label this so stack traces look sane */
167/*
168 * Need to vector to 32 Bit or default sys_call_table here,
169 * based on caller's run-mode / personality.
170 */
171	ld	r11,SYS_CALL_TABLE@toc(2)
172	andi.	r10,r10,_TIF_32BIT
173	beq	15f
174	addi	r11,r11,8	/* use 32-bit syscall entries */
175	clrldi	r3,r3,32
176	clrldi	r4,r4,32
177	clrldi	r5,r5,32
178	clrldi	r6,r6,32
179	clrldi	r7,r7,32
180	clrldi	r8,r8,32
18115:
182	slwi	r0,r0,4
183	ldx	r12,r11,r0	/* Fetch system call handler [ptr] */
184	mtctr   r12
185	bctrl			/* Call handler */
186
187.Lsyscall_exit:
188	std	r3,RESULT(r1)
189	CURRENT_THREAD_INFO(r12, r1)
190
191	ld	r8,_MSR(r1)
192#ifdef CONFIG_PPC_BOOK3S
193	/* No MSR:RI on BookE */
194	andi.	r10,r8,MSR_RI
195	beq-	unrecov_restore
196#endif
197	/*
198	 * Disable interrupts so current_thread_info()->flags can't change,
199	 * and so that we don't get interrupted after loading SRR0/1.
200	 */
201#ifdef CONFIG_PPC_BOOK3E
202	wrteei	0
203#else
204	/*
205	 * For performance reasons we clear RI the same time that we
206	 * clear EE. We only need to clear RI just before we restore r13
207	 * below, but batching it with EE saves us one expensive mtmsrd call.
208	 * We have to be careful to restore RI if we branch anywhere from
209	 * here (eg syscall_exit_work).
210	 */
211	li	r11,0
212	mtmsrd	r11,1
213#endif /* CONFIG_PPC_BOOK3E */
214
215	ld	r9,TI_FLAGS(r12)
216	li	r11,-MAX_ERRNO
217	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
218	bne-	syscall_exit_work
219
220	andi.	r0,r8,MSR_FP
221	beq 2f
222#ifdef CONFIG_ALTIVEC
223	andis.	r0,r8,MSR_VEC@h
224	bne	3f
225#endif
2262:	addi    r3,r1,STACK_FRAME_OVERHEAD
227#ifdef CONFIG_PPC_BOOK3S
228	li	r10,MSR_RI
229	mtmsrd	r10,1		/* Restore RI */
230#endif
231	bl	restore_math
232#ifdef CONFIG_PPC_BOOK3S
233	li	r11,0
234	mtmsrd	r11,1
235#endif
236	ld	r8,_MSR(r1)
237	ld	r3,RESULT(r1)
238	li	r11,-MAX_ERRNO
239
2403:	cmpld	r3,r11
241	ld	r5,_CCR(r1)
242	bge-	syscall_error
243.Lsyscall_error_cont:
244	ld	r7,_NIP(r1)
245BEGIN_FTR_SECTION
246	stdcx.	r0,0,r1			/* to clear the reservation */
247END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
248	andi.	r6,r8,MSR_PR
249	ld	r4,_LINK(r1)
250
251	beq-	1f
252	ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
253
254BEGIN_FTR_SECTION
255	HMT_MEDIUM_LOW
256END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
257
258	ld	r13,GPR13(r1)	/* only restore r13 if returning to usermode */
259	ld	r2,GPR2(r1)
260	ld	r1,GPR1(r1)
261	mtlr	r4
262	mtcr	r5
263	mtspr	SPRN_SRR0,r7
264	mtspr	SPRN_SRR1,r8
265	RFI_TO_USER
266	b	.	/* prevent speculative execution */
267
268	/* exit to kernel */
2691:	ld	r2,GPR2(r1)
270	ld	r1,GPR1(r1)
271	mtlr	r4
272	mtcr	r5
273	mtspr	SPRN_SRR0,r7
274	mtspr	SPRN_SRR1,r8
275	RFI_TO_KERNEL
276	b	.	/* prevent speculative execution */
277
278syscall_error:
279	oris	r5,r5,0x1000	/* Set SO bit in CR */
280	neg	r3,r3
281	std	r5,_CCR(r1)
282	b	.Lsyscall_error_cont
283
284/* Traced system call support */
285syscall_dotrace:
286	bl	save_nvgprs
287	addi	r3,r1,STACK_FRAME_OVERHEAD
288	bl	do_syscall_trace_enter
289
290	/*
291	 * We use the return value of do_syscall_trace_enter() as the syscall
292	 * number. If the syscall was rejected for any reason do_syscall_trace_enter()
293	 * returns an invalid syscall number and the test below against
294	 * NR_syscalls will fail.
295	 */
296	mr	r0,r3
297
298	/* Restore argument registers just clobbered and/or possibly changed. */
299	ld	r3,GPR3(r1)
300	ld	r4,GPR4(r1)
301	ld	r5,GPR5(r1)
302	ld	r6,GPR6(r1)
303	ld	r7,GPR7(r1)
304	ld	r8,GPR8(r1)
305
306	/* Repopulate r9 and r10 for the system_call path */
307	addi	r9,r1,STACK_FRAME_OVERHEAD
308	CURRENT_THREAD_INFO(r10, r1)
309	ld	r10,TI_FLAGS(r10)
310
311	cmpldi	r0,NR_syscalls
312	blt+	system_call
313
314	/* Return code is already in r3 thanks to do_syscall_trace_enter() */
315	b	.Lsyscall_exit
316
317
318syscall_enosys:
319	li	r3,-ENOSYS
320	b	.Lsyscall_exit
321
322syscall_exit_work:
323#ifdef CONFIG_PPC_BOOK3S
324	li	r10,MSR_RI
325	mtmsrd	r10,1		/* Restore RI */
326#endif
327	/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
328	 If TIF_NOERROR is set, just save r3 as it is. */
329
330	andi.	r0,r9,_TIF_RESTOREALL
331	beq+	0f
332	REST_NVGPRS(r1)
333	b	2f
3340:	cmpld	r3,r11		/* r11 is -MAX_ERRNO */
335	blt+	1f
336	andi.	r0,r9,_TIF_NOERROR
337	bne-	1f
338	ld	r5,_CCR(r1)
339	neg	r3,r3
340	oris	r5,r5,0x1000	/* Set SO bit in CR */
341	std	r5,_CCR(r1)
3421:	std	r3,GPR3(r1)
3432:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
344	beq	4f
345
346	/* Clear per-syscall TIF flags if any are set.  */
347
348	li	r11,_TIF_PERSYSCALL_MASK
349	addi	r12,r12,TI_FLAGS
3503:	ldarx	r10,0,r12
351	andc	r10,r10,r11
352	stdcx.	r10,0,r12
353	bne-	3b
354	subi	r12,r12,TI_FLAGS
355
3564:	/* Anything else left to do? */
357BEGIN_FTR_SECTION
358	lis	r3,INIT_PPR@highest	/* Set thread.ppr = 3 */
359	ld	r10,PACACURRENT(r13)
360	sldi	r3,r3,32	/* bits 11-13 are used for ppr */
361	std	r3,TASKTHREADPPR(r10)
362END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
363
364	andi.	r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
365	beq	ret_from_except_lite
366
367	/* Re-enable interrupts */
368#ifdef CONFIG_PPC_BOOK3E
369	wrteei	1
370#else
371	li	r10,MSR_RI
372	ori	r10,r10,MSR_EE
373	mtmsrd	r10,1
374#endif /* CONFIG_PPC_BOOK3E */
375
376	bl	save_nvgprs
377	addi	r3,r1,STACK_FRAME_OVERHEAD
378	bl	do_syscall_trace_leave
379	b	ret_from_except
380
381#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
382tabort_syscall:
383	/* Firstly we need to enable TM in the kernel */
384	mfmsr	r10
385	li	r9, 1
386	rldimi	r10, r9, MSR_TM_LG, 63-MSR_TM_LG
387	mtmsrd	r10, 0
388
389	/* tabort, this dooms the transaction, nothing else */
390	li	r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
391	TABORT(R9)
392
393	/*
394	 * Return directly to userspace. We have corrupted user register state,
395	 * but userspace will never see that register state. Execution will
396	 * resume after the tbegin of the aborted transaction with the
397	 * checkpointed register state.
398	 */
399	li	r9, MSR_RI
400	andc	r10, r10, r9
401	mtmsrd	r10, 1
402	mtspr	SPRN_SRR0, r11
403	mtspr	SPRN_SRR1, r12
404	RFI_TO_USER
405	b	.	/* prevent speculative execution */
406#endif
407
408/* Save non-volatile GPRs, if not already saved. */
409_GLOBAL(save_nvgprs)
410	ld	r11,_TRAP(r1)
411	andi.	r0,r11,1
412	beqlr-
413	SAVE_NVGPRS(r1)
414	clrrdi	r0,r11,1
415	std	r0,_TRAP(r1)
416	blr
417
418
419/*
420 * The sigsuspend and rt_sigsuspend system calls can call do_signal
421 * and thus put the process into the stopped state where we might
422 * want to examine its user state with ptrace.  Therefore we need
423 * to save all the nonvolatile registers (r14 - r31) before calling
424 * the C code.  Similarly, fork, vfork and clone need the full
425 * register state on the stack so that it can be copied to the child.
426 */
427
428_GLOBAL(ppc_fork)
429	bl	save_nvgprs
430	bl	sys_fork
431	b	.Lsyscall_exit
432
433_GLOBAL(ppc_vfork)
434	bl	save_nvgprs
435	bl	sys_vfork
436	b	.Lsyscall_exit
437
438_GLOBAL(ppc_clone)
439	bl	save_nvgprs
440	bl	sys_clone
441	b	.Lsyscall_exit
442
443_GLOBAL(ppc32_swapcontext)
444	bl	save_nvgprs
445	bl	compat_sys_swapcontext
446	b	.Lsyscall_exit
447
448_GLOBAL(ppc64_swapcontext)
449	bl	save_nvgprs
450	bl	sys_swapcontext
451	b	.Lsyscall_exit
452
453_GLOBAL(ppc_switch_endian)
454	bl	save_nvgprs
455	bl	sys_switch_endian
456	b	.Lsyscall_exit
457
458_GLOBAL(ret_from_fork)
459	bl	schedule_tail
460	REST_NVGPRS(r1)
461	li	r3,0
462	b	.Lsyscall_exit
463
464_GLOBAL(ret_from_kernel_thread)
465	bl	schedule_tail
466	REST_NVGPRS(r1)
467	mtlr	r14
468	mr	r3,r15
469#ifdef PPC64_ELF_ABI_v2
470	mr	r12,r14
471#endif
472	blrl
473	li	r3,0
474	b	.Lsyscall_exit
475
476/*
477 * This routine switches between two different tasks.  The process
478 * state of one is saved on its kernel stack.  Then the state
479 * of the other is restored from its kernel stack.  The memory
480 * management hardware is updated to the second process's state.
481 * Finally, we can return to the second process, via ret_from_except.
482 * On entry, r3 points to the THREAD for the current task, r4
483 * points to the THREAD for the new task.
484 *
485 * Note: there are two ways to get to the "going out" portion
486 * of this code; either by coming in via the entry (_switch)
487 * or via "fork" which must set up an environment equivalent
488 * to the "_switch" path.  If you change this you'll have to change
489 * the fork code also.
490 *
491 * The code which creates the new task context is in 'copy_thread'
492 * in arch/powerpc/kernel/process.c
493 */
494	.align	7
495_GLOBAL(_switch)
496	mflr	r0
497	std	r0,16(r1)
498	stdu	r1,-SWITCH_FRAME_SIZE(r1)
499	/* r3-r13 are caller saved -- Cort */
500	SAVE_8GPRS(14, r1)
501	SAVE_10GPRS(22, r1)
502	std	r0,_NIP(r1)	/* Return to switch caller */
503	mfcr	r23
504	std	r23,_CCR(r1)
505	std	r1,KSP(r3)	/* Set old stack pointer */
506
507#ifdef CONFIG_SMP
508	/* We need a sync somewhere here to make sure that if the
509	 * previous task gets rescheduled on another CPU, it sees all
510	 * stores it has performed on this one.
511	 */
512	sync
513#endif /* CONFIG_SMP */
514
515	/*
516	 * If we optimise away the clear of the reservation in system
517	 * calls because we know the CPU tracks the address of the
518	 * reservation, then we need to clear it here to cover the
519	 * case that the kernel context switch path has no larx
520	 * instructions.
521	 */
522BEGIN_FTR_SECTION
523	ldarx	r6,0,r1
524END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
525
526BEGIN_FTR_SECTION
527/*
528 * A cp_abort (copy paste abort) here ensures that when context switching, a
529 * copy from one process can't leak into the paste of another.
530 */
531	PPC_CP_ABORT
532END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
533
534#ifdef CONFIG_PPC_BOOK3S
535/* Cancel all explict user streams as they will have no use after context
536 * switch and will stop the HW from creating streams itself
537 */
538	DCBT_STOP_ALL_STREAM_IDS(r6)
539#endif
540
541	addi	r6,r4,-THREAD	/* Convert THREAD to 'current' */
542	std	r6,PACACURRENT(r13)	/* Set new 'current' */
543
544	ld	r8,KSP(r4)	/* new stack pointer */
545#ifdef CONFIG_PPC_STD_MMU_64
546BEGIN_MMU_FTR_SECTION
547	b	2f
548END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
549BEGIN_FTR_SECTION
550	clrrdi	r6,r8,28	/* get its ESID */
551	clrrdi	r9,r1,28	/* get current sp ESID */
552FTR_SECTION_ELSE
553	clrrdi	r6,r8,40	/* get its 1T ESID */
554	clrrdi	r9,r1,40	/* get current sp 1T ESID */
555ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
556	clrldi.	r0,r6,2		/* is new ESID c00000000? */
557	cmpd	cr1,r6,r9	/* or is new ESID the same as current ESID? */
558	cror	eq,4*cr1+eq,eq
559	beq	2f		/* if yes, don't slbie it */
560
561	/* Bolt in the new stack SLB entry */
562	ld	r7,KSP_VSID(r4)	/* Get new stack's VSID */
563	oris	r0,r6,(SLB_ESID_V)@h
564	ori	r0,r0,(SLB_NUM_BOLTED-1)@l
565BEGIN_FTR_SECTION
566	li	r9,MMU_SEGSIZE_1T	/* insert B field */
567	oris	r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
568	rldimi	r7,r9,SLB_VSID_SSIZE_SHIFT,0
569END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
570
571	/* Update the last bolted SLB.  No write barriers are needed
572	 * here, provided we only update the current CPU's SLB shadow
573	 * buffer.
574	 */
575	ld	r9,PACA_SLBSHADOWPTR(r13)
576	li	r12,0
577	std	r12,SLBSHADOW_STACKESID(r9)	/* Clear ESID */
578	li	r12,SLBSHADOW_STACKVSID
579	STDX_BE	r7,r12,r9			/* Save VSID */
580	li	r12,SLBSHADOW_STACKESID
581	STDX_BE	r0,r12,r9			/* Save ESID */
582
583	/* No need to check for MMU_FTR_NO_SLBIE_B here, since when
584	 * we have 1TB segments, the only CPUs known to have the errata
585	 * only support less than 1TB of system memory and we'll never
586	 * actually hit this code path.
587	 */
588
589	slbie	r6
590	slbie	r6		/* Workaround POWER5 < DD2.1 issue */
591	slbmte	r7,r0
592	isync
5932:
594#endif /* CONFIG_PPC_STD_MMU_64 */
595
596	CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
597	/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
598	   because we don't need to leave the 288-byte ABI gap at the
599	   top of the kernel stack. */
600	addi	r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
601
602	mr	r1,r8		/* start using new stack pointer */
603	std	r7,PACAKSAVE(r13)
604
605	ld	r6,_CCR(r1)
606	mtcrf	0xFF,r6
607
608	/* r3-r13 are destroyed -- Cort */
609	REST_8GPRS(14, r1)
610	REST_10GPRS(22, r1)
611
612	/* convert old thread to its task_struct for return value */
613	addi	r3,r3,-THREAD
614	ld	r7,_NIP(r1)	/* Return to _switch caller in new task */
615	mtlr	r7
616	addi	r1,r1,SWITCH_FRAME_SIZE
617	blr
618
619	.align	7
620_GLOBAL(ret_from_except)
621	ld	r11,_TRAP(r1)
622	andi.	r0,r11,1
623	bne	ret_from_except_lite
624	REST_NVGPRS(r1)
625
626_GLOBAL(ret_from_except_lite)
627	/*
628	 * Disable interrupts so that current_thread_info()->flags
629	 * can't change between when we test it and when we return
630	 * from the interrupt.
631	 */
632#ifdef CONFIG_PPC_BOOK3E
633	wrteei	0
634#else
635	li	r10,MSR_RI
636	mtmsrd	r10,1		  /* Update machine state */
637#endif /* CONFIG_PPC_BOOK3E */
638
639	CURRENT_THREAD_INFO(r9, r1)
640	ld	r3,_MSR(r1)
641#ifdef CONFIG_PPC_BOOK3E
642	ld	r10,PACACURRENT(r13)
643#endif /* CONFIG_PPC_BOOK3E */
644	ld	r4,TI_FLAGS(r9)
645	andi.	r3,r3,MSR_PR
646	beq	resume_kernel
647#ifdef CONFIG_PPC_BOOK3E
648	lwz	r3,(THREAD+THREAD_DBCR0)(r10)
649#endif /* CONFIG_PPC_BOOK3E */
650
651	/* Check current_thread_info()->flags */
652	andi.	r0,r4,_TIF_USER_WORK_MASK
653	bne	1f
654#ifdef CONFIG_PPC_BOOK3E
655	/*
656	 * Check to see if the dbcr0 register is set up to debug.
657	 * Use the internal debug mode bit to do this.
658	 */
659	andis.	r0,r3,DBCR0_IDM@h
660	beq	restore
661	mfmsr	r0
662	rlwinm	r0,r0,0,~MSR_DE	/* Clear MSR.DE */
663	mtmsr	r0
664	mtspr	SPRN_DBCR0,r3
665	li	r10, -1
666	mtspr	SPRN_DBSR,r10
667	b	restore
668#else
669	addi	r3,r1,STACK_FRAME_OVERHEAD
670	bl	restore_math
671	b	restore
672#endif
6731:	andi.	r0,r4,_TIF_NEED_RESCHED
674	beq	2f
675	bl	restore_interrupts
676	SCHEDULE_USER
677	b	ret_from_except_lite
6782:
679#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
680	andi.	r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
681	bne	3f		/* only restore TM if nothing else to do */
682	addi	r3,r1,STACK_FRAME_OVERHEAD
683	bl	restore_tm_state
684	b	restore
6853:
686#endif
687	bl	save_nvgprs
688	/*
689	 * Use a non volatile GPR to save and restore our thread_info flags
690	 * across the call to restore_interrupts.
691	 */
692	mr	r30,r4
693	bl	restore_interrupts
694	mr	r4,r30
695	addi	r3,r1,STACK_FRAME_OVERHEAD
696	bl	do_notify_resume
697	b	ret_from_except
698
699resume_kernel:
700	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
701	andis.	r8,r4,_TIF_EMULATE_STACK_STORE@h
702	beq+	1f
703
704	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
705
706	ld	r3,GPR1(r1)
707	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
708	mr	r4,r1			/* src:  current exception frame */
709	mr	r1,r3			/* Reroute the trampoline frame to r1 */
710
711	/* Copy from the original to the trampoline. */
712	li	r5,INT_FRAME_SIZE/8	/* size: INT_FRAME_SIZE */
713	li	r6,0			/* start offset: 0 */
714	mtctr	r5
7152:	ldx	r0,r6,r4
716	stdx	r0,r6,r3
717	addi	r6,r6,8
718	bdnz	2b
719
720	/* Do real store operation to complete stdu */
721	ld	r5,GPR1(r1)
722	std	r8,0(r5)
723
724	/* Clear _TIF_EMULATE_STACK_STORE flag */
725	lis	r11,_TIF_EMULATE_STACK_STORE@h
726	addi	r5,r9,TI_FLAGS
7270:	ldarx	r4,0,r5
728	andc	r4,r4,r11
729	stdcx.	r4,0,r5
730	bne-	0b
7311:
732
733#ifdef CONFIG_PREEMPT
734	/* Check if we need to preempt */
735	andi.	r0,r4,_TIF_NEED_RESCHED
736	beq+	restore
737	/* Check that preempt_count() == 0 and interrupts are enabled */
738	lwz	r8,TI_PREEMPT(r9)
739	cmpwi	cr1,r8,0
740	ld	r0,SOFTE(r1)
741	cmpdi	r0,0
742	crandc	eq,cr1*4+eq,eq
743	bne	restore
744
745	/*
746	 * Here we are preempting the current task. We want to make
747	 * sure we are soft-disabled first and reconcile irq state.
748	 */
749	RECONCILE_IRQ_STATE(r3,r4)
7501:	bl	preempt_schedule_irq
751
752	/* Re-test flags and eventually loop */
753	CURRENT_THREAD_INFO(r9, r1)
754	ld	r4,TI_FLAGS(r9)
755	andi.	r0,r4,_TIF_NEED_RESCHED
756	bne	1b
757
758	/*
759	 * arch_local_irq_restore() from preempt_schedule_irq above may
760	 * enable hard interrupt but we really should disable interrupts
761	 * when we return from the interrupt, and so that we don't get
762	 * interrupted after loading SRR0/1.
763	 */
764#ifdef CONFIG_PPC_BOOK3E
765	wrteei	0
766#else
767	li	r10,MSR_RI
768	mtmsrd	r10,1		  /* Update machine state */
769#endif /* CONFIG_PPC_BOOK3E */
770#endif /* CONFIG_PREEMPT */
771
772	.globl	fast_exc_return_irq
773fast_exc_return_irq:
774restore:
775	/*
776	 * This is the main kernel exit path. First we check if we
777	 * are about to re-enable interrupts
778	 */
779	ld	r5,SOFTE(r1)
780	lbz	r6,PACASOFTIRQEN(r13)
781	cmpwi	cr0,r5,0
782	beq	restore_irq_off
783
784	/* We are enabling, were we already enabled ? Yes, just return */
785	cmpwi	cr0,r6,1
786	beq	cr0,do_restore
787
788	/*
789	 * We are about to soft-enable interrupts (we are hard disabled
790	 * at this point). We check if there's anything that needs to
791	 * be replayed first.
792	 */
793	lbz	r0,PACAIRQHAPPENED(r13)
794	cmpwi	cr0,r0,0
795	bne-	restore_check_irq_replay
796
797	/*
798	 * Get here when nothing happened while soft-disabled, just
799	 * soft-enable and move-on. We will hard-enable as a side
800	 * effect of rfi
801	 */
802restore_no_replay:
803	TRACE_ENABLE_INTS
804	li	r0,1
805	stb	r0,PACASOFTIRQEN(r13);
806
807	/*
808	 * Final return path. BookE is handled in a different file
809	 */
810do_restore:
811#ifdef CONFIG_PPC_BOOK3E
812	b	exception_return_book3e
813#else
814	/*
815	 * Clear the reservation. If we know the CPU tracks the address of
816	 * the reservation then we can potentially save some cycles and use
817	 * a larx. On POWER6 and POWER7 this is significantly faster.
818	 */
819BEGIN_FTR_SECTION
820	stdcx.	r0,0,r1		/* to clear the reservation */
821FTR_SECTION_ELSE
822	ldarx	r4,0,r1
823ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
824
825	/*
826	 * Some code path such as load_up_fpu or altivec return directly
827	 * here. They run entirely hard disabled and do not alter the
828	 * interrupt state. They also don't use lwarx/stwcx. and thus
829	 * are known not to leave dangling reservations.
830	 */
831	.globl	fast_exception_return
832fast_exception_return:
833	ld	r3,_MSR(r1)
834	ld	r4,_CTR(r1)
835	ld	r0,_LINK(r1)
836	mtctr	r4
837	mtlr	r0
838	ld	r4,_XER(r1)
839	mtspr	SPRN_XER,r4
840
841	REST_8GPRS(5, r1)
842
843	andi.	r0,r3,MSR_RI
844	beq-	unrecov_restore
845
846	/* Load PPR from thread struct before we clear MSR:RI */
847BEGIN_FTR_SECTION
848	ld	r2,PACACURRENT(r13)
849	ld	r2,TASKTHREADPPR(r2)
850END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
851
852	/*
853	 * Clear RI before restoring r13.  If we are returning to
854	 * userspace and we take an exception after restoring r13,
855	 * we end up corrupting the userspace r13 value.
856	 */
857	li	r4,0
858	mtmsrd	r4,1
859
860#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
861	/* TM debug */
862	std	r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
863#endif
864	/*
865	 * r13 is our per cpu area, only restore it if we are returning to
866	 * userspace the value stored in the stack frame may belong to
867	 * another CPU.
868	 */
869	andi.	r0,r3,MSR_PR
870	beq	1f
871BEGIN_FTR_SECTION
872	mtspr	SPRN_PPR,r2	/* Restore PPR */
873END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
874	ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
875	REST_GPR(13, r1)
876
877	mtspr	SPRN_SRR1,r3
878
879	ld	r2,_CCR(r1)
880	mtcrf	0xFF,r2
881	ld	r2,_NIP(r1)
882	mtspr	SPRN_SRR0,r2
883
884	ld	r0,GPR0(r1)
885	ld	r2,GPR2(r1)
886	ld	r3,GPR3(r1)
887	ld	r4,GPR4(r1)
888	ld	r1,GPR1(r1)
889	RFI_TO_USER
890	b	.	/* prevent speculative execution */
891
8921:	mtspr	SPRN_SRR1,r3
893
894	ld	r2,_CCR(r1)
895	mtcrf	0xFF,r2
896	ld	r2,_NIP(r1)
897	mtspr	SPRN_SRR0,r2
898
899	ld	r0,GPR0(r1)
900	ld	r2,GPR2(r1)
901	ld	r3,GPR3(r1)
902	ld	r4,GPR4(r1)
903	ld	r1,GPR1(r1)
904	RFI_TO_KERNEL
905	b	.	/* prevent speculative execution */
906
907#endif /* CONFIG_PPC_BOOK3E */
908
909	/*
910	 * We are returning to a context with interrupts soft disabled.
911	 *
912	 * However, we may also about to hard enable, so we need to
913	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
914	 * or that bit can get out of sync and bad things will happen
915	 */
916restore_irq_off:
917	ld	r3,_MSR(r1)
918	lbz	r7,PACAIRQHAPPENED(r13)
919	andi.	r0,r3,MSR_EE
920	beq	1f
921	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
922	stb	r7,PACAIRQHAPPENED(r13)
9231:	li	r0,0
924	stb	r0,PACASOFTIRQEN(r13);
925	TRACE_DISABLE_INTS
926	b	do_restore
927
928	/*
929	 * Something did happen, check if a re-emit is needed
930	 * (this also clears paca->irq_happened)
931	 */
932restore_check_irq_replay:
933	/* XXX: We could implement a fast path here where we check
934	 * for irq_happened being just 0x01, in which case we can
935	 * clear it and return. That means that we would potentially
936	 * miss a decrementer having wrapped all the way around.
937	 *
938	 * Still, this might be useful for things like hash_page
939	 */
940	bl	__check_irq_replay
941	cmpwi	cr0,r3,0
942 	beq	restore_no_replay
943
944	/*
945	 * We need to re-emit an interrupt. We do so by re-using our
946	 * existing exception frame. We first change the trap value,
947	 * but we need to ensure we preserve the low nibble of it
948	 */
949	ld	r4,_TRAP(r1)
950	clrldi	r4,r4,60
951	or	r4,r4,r3
952	std	r4,_TRAP(r1)
953
954	/*
955	 * Then find the right handler and call it. Interrupts are
956	 * still soft-disabled and we keep them that way.
957	*/
958	cmpwi	cr0,r3,0x500
959	bne	1f
960	addi	r3,r1,STACK_FRAME_OVERHEAD;
961 	bl	do_IRQ
962	b	ret_from_except
9631:	cmpwi	cr0,r3,0xe60
964	bne	1f
965	addi	r3,r1,STACK_FRAME_OVERHEAD;
966	bl	handle_hmi_exception
967	b	ret_from_except
9681:	cmpwi	cr0,r3,0x900
969	bne	1f
970	addi	r3,r1,STACK_FRAME_OVERHEAD;
971	bl	timer_interrupt
972	b	ret_from_except
973#ifdef CONFIG_PPC_DOORBELL
9741:
975#ifdef CONFIG_PPC_BOOK3E
976	cmpwi	cr0,r3,0x280
977#else
978	BEGIN_FTR_SECTION
979		cmpwi	cr0,r3,0xe80
980	FTR_SECTION_ELSE
981		cmpwi	cr0,r3,0xa00
982	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
983#endif /* CONFIG_PPC_BOOK3E */
984	bne	1f
985	addi	r3,r1,STACK_FRAME_OVERHEAD;
986	bl	doorbell_exception
987	b	ret_from_except
988#endif /* CONFIG_PPC_DOORBELL */
9891:	b	ret_from_except /* What else to do here ? */
990
991unrecov_restore:
992	addi	r3,r1,STACK_FRAME_OVERHEAD
993	bl	unrecoverable_exception
994	b	unrecov_restore
995
996#ifdef CONFIG_PPC_RTAS
997/*
998 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
999 * called with the MMU off.
1000 *
1001 * In addition, we need to be in 32b mode, at least for now.
1002 *
1003 * Note: r3 is an input parameter to rtas, so don't trash it...
1004 */
1005_GLOBAL(enter_rtas)
1006	mflr	r0
1007	std	r0,16(r1)
1008        stdu	r1,-RTAS_FRAME_SIZE(r1)	/* Save SP and create stack space. */
1009
1010	/* Because RTAS is running in 32b mode, it clobbers the high order half
1011	 * of all registers that it saves.  We therefore save those registers
1012	 * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
1013   	 */
1014	SAVE_GPR(2, r1)			/* Save the TOC */
1015	SAVE_GPR(13, r1)		/* Save paca */
1016	SAVE_8GPRS(14, r1)		/* Save the non-volatiles */
1017	SAVE_10GPRS(22, r1)		/* ditto */
1018
1019	mfcr	r4
1020	std	r4,_CCR(r1)
1021	mfctr	r5
1022	std	r5,_CTR(r1)
1023	mfspr	r6,SPRN_XER
1024	std	r6,_XER(r1)
1025	mfdar	r7
1026	std	r7,_DAR(r1)
1027	mfdsisr	r8
1028	std	r8,_DSISR(r1)
1029
1030	/* Temporary workaround to clear CR until RTAS can be modified to
1031	 * ignore all bits.
1032	 */
1033	li	r0,0
1034	mtcr	r0
1035
1036#ifdef CONFIG_BUG
1037	/* There is no way it is acceptable to get here with interrupts enabled,
1038	 * check it with the asm equivalent of WARN_ON
1039	 */
1040	lbz	r0,PACASOFTIRQEN(r13)
10411:	tdnei	r0,0
1042	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1043#endif
1044
1045	/* Hard-disable interrupts */
1046	mfmsr	r6
1047	rldicl	r7,r6,48,1
1048	rotldi	r7,r7,16
1049	mtmsrd	r7,1
1050
1051	/* Unfortunately, the stack pointer and the MSR are also clobbered,
1052	 * so they are saved in the PACA which allows us to restore
1053	 * our original state after RTAS returns.
1054         */
1055	std	r1,PACAR1(r13)
1056        std	r6,PACASAVEDMSR(r13)
1057
1058	/* Setup our real return addr */
1059	LOAD_REG_ADDR(r4,rtas_return_loc)
1060	clrldi	r4,r4,2			/* convert to realmode address */
1061       	mtlr	r4
1062
1063	li	r0,0
1064	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1065	andc	r0,r6,r0
1066
1067        li      r9,1
1068        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1069	ori	r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
1070	andc	r6,r0,r9
1071	sync				/* disable interrupts so SRR0/1 */
1072	mtmsrd	r0			/* don't get trashed */
1073
1074	LOAD_REG_ADDR(r4, rtas)
1075	ld	r5,RTASENTRY(r4)	/* get the rtas->entry value */
1076	ld	r4,RTASBASE(r4)		/* get the rtas->base value */
1077
1078	mtspr	SPRN_SRR0,r5
1079	mtspr	SPRN_SRR1,r6
1080	RFI_TO_KERNEL
1081	b	.	/* prevent speculative execution */
1082
1083rtas_return_loc:
1084	FIXUP_ENDIAN
1085
1086	/* relocation is off at this point */
1087	GET_PACA(r4)
1088	clrldi	r4,r4,2			/* convert to realmode address */
1089
1090	bcl	20,31,$+4
10910:	mflr	r3
1092	ld	r3,(1f-0b)(r3)		/* get &rtas_restore_regs */
1093
1094	mfmsr   r6
1095	li	r0,MSR_RI
1096	andc	r6,r6,r0
1097	sync
1098	mtmsrd  r6
1099
1100        ld	r1,PACAR1(r4)           /* Restore our SP */
1101        ld	r4,PACASAVEDMSR(r4)     /* Restore our MSR */
1102
1103	mtspr	SPRN_SRR0,r3
1104	mtspr	SPRN_SRR1,r4
1105	RFI_TO_KERNEL
1106	b	.	/* prevent speculative execution */
1107
1108	.align	3
11091:	.llong	rtas_restore_regs
1110
1111rtas_restore_regs:
1112	/* relocation is on at this point */
1113	REST_GPR(2, r1)			/* Restore the TOC */
1114	REST_GPR(13, r1)		/* Restore paca */
1115	REST_8GPRS(14, r1)		/* Restore the non-volatiles */
1116	REST_10GPRS(22, r1)		/* ditto */
1117
1118	GET_PACA(r13)
1119
1120	ld	r4,_CCR(r1)
1121	mtcr	r4
1122	ld	r5,_CTR(r1)
1123	mtctr	r5
1124	ld	r6,_XER(r1)
1125	mtspr	SPRN_XER,r6
1126	ld	r7,_DAR(r1)
1127	mtdar	r7
1128	ld	r8,_DSISR(r1)
1129	mtdsisr	r8
1130
1131        addi	r1,r1,RTAS_FRAME_SIZE	/* Unstack our frame */
1132	ld	r0,16(r1)		/* get return address */
1133
1134	mtlr    r0
1135        blr				/* return to caller */
1136
1137#endif /* CONFIG_PPC_RTAS */
1138
1139_GLOBAL(enter_prom)
1140	mflr	r0
1141	std	r0,16(r1)
1142        stdu	r1,-PROM_FRAME_SIZE(r1)	/* Save SP and create stack space */
1143
1144	/* Because PROM is running in 32b mode, it clobbers the high order half
1145	 * of all registers that it saves.  We therefore save those registers
1146	 * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
1147   	 */
1148	SAVE_GPR(2, r1)
1149	SAVE_GPR(13, r1)
1150	SAVE_8GPRS(14, r1)
1151	SAVE_10GPRS(22, r1)
1152	mfcr	r10
1153	mfmsr	r11
1154	std	r10,_CCR(r1)
1155	std	r11,_MSR(r1)
1156
1157	/* Put PROM address in SRR0 */
1158	mtsrr0	r4
1159
1160	/* Setup our trampoline return addr in LR */
1161	bcl	20,31,$+4
11620:	mflr	r4
1163	addi	r4,r4,(1f - 0b)
1164       	mtlr	r4
1165
1166	/* Prepare a 32-bit mode big endian MSR
1167	 */
1168#ifdef CONFIG_PPC_BOOK3E
1169	rlwinm	r11,r11,0,1,31
1170	mtsrr1	r11
1171	rfi
1172#else /* CONFIG_PPC_BOOK3E */
1173	LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
1174	andc	r11,r11,r12
1175	mtsrr1	r11
1176	RFI_TO_KERNEL
1177#endif /* CONFIG_PPC_BOOK3E */
1178
11791:	/* Return from OF */
1180	FIXUP_ENDIAN
1181
1182	/* Just make sure that r1 top 32 bits didn't get
1183	 * corrupt by OF
1184	 */
1185	rldicl	r1,r1,0,32
1186
1187	/* Restore the MSR (back to 64 bits) */
1188	ld	r0,_MSR(r1)
1189	MTMSRD(r0)
1190        isync
1191
1192	/* Restore other registers */
1193	REST_GPR(2, r1)
1194	REST_GPR(13, r1)
1195	REST_8GPRS(14, r1)
1196	REST_10GPRS(22, r1)
1197	ld	r4,_CCR(r1)
1198	mtcr	r4
1199
1200        addi	r1,r1,PROM_FRAME_SIZE
1201	ld	r0,16(r1)
1202	mtlr    r0
1203        blr
1204
1205#ifdef CONFIG_FUNCTION_TRACER
1206#ifdef CONFIG_DYNAMIC_FTRACE
1207_GLOBAL(mcount)
1208_GLOBAL(_mcount)
1209EXPORT_SYMBOL(_mcount)
1210	mflr	r12
1211	mtctr	r12
1212	mtlr	r0
1213	bctr
1214
1215#ifndef CC_USING_MPROFILE_KERNEL
1216_GLOBAL_TOC(ftrace_caller)
1217	/* Taken from output of objdump from lib64/glibc */
1218	mflr	r3
1219	ld	r11, 0(r1)
1220	stdu	r1, -112(r1)
1221	std	r3, 128(r1)
1222	ld	r4, 16(r11)
1223	subi	r3, r3, MCOUNT_INSN_SIZE
1224.globl ftrace_call
1225ftrace_call:
1226	bl	ftrace_stub
1227	nop
1228#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1229.globl ftrace_graph_call
1230ftrace_graph_call:
1231	b	ftrace_graph_stub
1232_GLOBAL(ftrace_graph_stub)
1233#endif
1234	ld	r0, 128(r1)
1235	mtlr	r0
1236	addi	r1, r1, 112
1237
1238#else /* CC_USING_MPROFILE_KERNEL */
1239/*
1240 *
1241 * ftrace_caller() is the function that replaces _mcount() when ftrace is
1242 * active.
1243 *
1244 * We arrive here after a function A calls function B, and we are the trace
1245 * function for B. When we enter r1 points to A's stack frame, B has not yet
1246 * had a chance to allocate one yet.
1247 *
1248 * Additionally r2 may point either to the TOC for A, or B, depending on
1249 * whether B did a TOC setup sequence before calling us.
1250 *
1251 * On entry the LR points back to the _mcount() call site, and r0 holds the
1252 * saved LR as it was on entry to B, ie. the original return address at the
1253 * call site in A.
1254 *
1255 * Our job is to save the register state into a struct pt_regs (on the stack)
1256 * and then arrange for the ftrace function to be called.
1257 */
1258_GLOBAL(ftrace_caller)
1259	/* Save the original return address in A's stack frame */
1260	std	r0,LRSAVE(r1)
1261
1262	/* Create our stack frame + pt_regs */
1263	stdu	r1,-SWITCH_FRAME_SIZE(r1)
1264
1265	/* Save all gprs to pt_regs */
1266	SAVE_GPR(0, r1)
1267	SAVE_10GPRS(2, r1)
1268	SAVE_10GPRS(12, r1)
1269	SAVE_10GPRS(22, r1)
1270
1271	/* Save previous stack pointer (r1) */
1272	addi	r8, r1, SWITCH_FRAME_SIZE
1273	std	r8, GPR1(r1)
1274
1275	/* Load special regs for save below */
1276	mfmsr   r8
1277	mfctr   r9
1278	mfxer   r10
1279	mfcr	r11
1280
1281	/* Get the _mcount() call site out of LR */
1282	mflr	r7
1283	/* Save it as pt_regs->nip & pt_regs->link */
1284	std     r7, _NIP(r1)
1285	std     r7, _LINK(r1)
1286
1287	/* Save callee's TOC in the ABI compliant location */
1288	std	r2, 24(r1)
1289	ld	r2,PACATOC(r13)	/* get kernel TOC in r2 */
1290
1291	addis	r3,r2,function_trace_op@toc@ha
1292	addi	r3,r3,function_trace_op@toc@l
1293	ld	r5,0(r3)
1294
1295#ifdef CONFIG_LIVEPATCH
1296	mr	r14,r7		/* remember old NIP */
1297#endif
1298	/* Calculate ip from nip-4 into r3 for call below */
1299	subi    r3, r7, MCOUNT_INSN_SIZE
1300
1301	/* Put the original return address in r4 as parent_ip */
1302	mr	r4, r0
1303
1304	/* Save special regs */
1305	std     r8, _MSR(r1)
1306	std     r9, _CTR(r1)
1307	std     r10, _XER(r1)
1308	std     r11, _CCR(r1)
1309
1310	/* Load &pt_regs in r6 for call below */
1311	addi    r6, r1 ,STACK_FRAME_OVERHEAD
1312
1313	/* ftrace_call(r3, r4, r5, r6) */
1314.globl ftrace_call
1315ftrace_call:
1316	bl	ftrace_stub
1317	nop
1318
1319	/* Load ctr with the possibly modified NIP */
1320	ld	r3, _NIP(r1)
1321	mtctr	r3
1322#ifdef CONFIG_LIVEPATCH
1323	cmpd	r14,r3		/* has NIP been altered? */
1324#endif
1325
1326	/* Restore gprs */
1327	REST_GPR(0,r1)
1328	REST_10GPRS(2,r1)
1329	REST_10GPRS(12,r1)
1330	REST_10GPRS(22,r1)
1331
1332	/* Restore callee's TOC */
1333	ld	r2, 24(r1)
1334
1335	/* Pop our stack frame */
1336	addi r1, r1, SWITCH_FRAME_SIZE
1337
1338	/* Restore original LR for return to B */
1339	ld	r0, LRSAVE(r1)
1340	mtlr	r0
1341
1342#ifdef CONFIG_LIVEPATCH
1343        /* Based on the cmpd above, if the NIP was altered handle livepatch */
1344	bne-	livepatch_handler
1345#endif
1346
1347#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1348	stdu	r1, -112(r1)
1349.globl ftrace_graph_call
1350ftrace_graph_call:
1351	b	ftrace_graph_stub
1352_GLOBAL(ftrace_graph_stub)
1353	addi	r1, r1, 112
1354#endif
1355
1356	ld	r0,LRSAVE(r1)	/* restore callee's lr at _mcount site */
1357	mtlr	r0
1358	bctr			/* jump after _mcount site */
1359#endif /* CC_USING_MPROFILE_KERNEL */
1360
1361_GLOBAL(ftrace_stub)
1362	blr
1363
1364#ifdef CONFIG_LIVEPATCH
1365	/*
1366	 * This function runs in the mcount context, between two functions. As
1367	 * such it can only clobber registers which are volatile and used in
1368	 * function linkage.
1369	 *
1370	 * We get here when a function A, calls another function B, but B has
1371	 * been live patched with a new function C.
1372	 *
1373	 * On entry:
1374	 *  - we have no stack frame and can not allocate one
1375	 *  - LR points back to the original caller (in A)
1376	 *  - CTR holds the new NIP in C
1377	 *  - r0 & r12 are free
1378	 *
1379	 * r0 can't be used as the base register for a DS-form load or store, so
1380	 * we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
1381	 */
1382livepatch_handler:
1383	CURRENT_THREAD_INFO(r12, r1)
1384
1385	/* Save stack pointer into r0 */
1386	mr	r0, r1
1387
1388	/* Allocate 3 x 8 bytes */
1389	ld	r1, TI_livepatch_sp(r12)
1390	addi	r1, r1, 24
1391	std	r1, TI_livepatch_sp(r12)
1392
1393	/* Save toc & real LR on livepatch stack */
1394	std	r2,  -24(r1)
1395	mflr	r12
1396	std	r12, -16(r1)
1397
1398	/* Store stack end marker */
1399	lis     r12, STACK_END_MAGIC@h
1400	ori     r12, r12, STACK_END_MAGIC@l
1401	std	r12, -8(r1)
1402
1403	/* Restore real stack pointer */
1404	mr	r1, r0
1405
1406	/* Put ctr in r12 for global entry and branch there */
1407	mfctr	r12
1408	bctrl
1409
1410	/*
1411	 * Now we are returning from the patched function to the original
1412	 * caller A. We are free to use r0 and r12, and we can use r2 until we
1413	 * restore it.
1414	 */
1415
1416	CURRENT_THREAD_INFO(r12, r1)
1417
1418	/* Save stack pointer into r0 */
1419	mr	r0, r1
1420
1421	ld	r1, TI_livepatch_sp(r12)
1422
1423	/* Check stack marker hasn't been trashed */
1424	lis     r2,  STACK_END_MAGIC@h
1425	ori     r2,  r2, STACK_END_MAGIC@l
1426	ld	r12, -8(r1)
14271:	tdne	r12, r2
1428	EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
1429
1430	/* Restore LR & toc from livepatch stack */
1431	ld	r12, -16(r1)
1432	mtlr	r12
1433	ld	r2,  -24(r1)
1434
1435	/* Pop livepatch stack frame */
1436	CURRENT_THREAD_INFO(r12, r0)
1437	subi	r1, r1, 24
1438	std	r1, TI_livepatch_sp(r12)
1439
1440	/* Restore real stack pointer */
1441	mr	r1, r0
1442
1443	/* Return to original caller of live patched function */
1444	blr
1445#endif
1446
1447
1448#else
1449_GLOBAL_TOC(_mcount)
1450EXPORT_SYMBOL(_mcount)
1451	/* Taken from output of objdump from lib64/glibc */
1452	mflr	r3
1453	ld	r11, 0(r1)
1454	stdu	r1, -112(r1)
1455	std	r3, 128(r1)
1456	ld	r4, 16(r11)
1457
1458	subi	r3, r3, MCOUNT_INSN_SIZE
1459	LOAD_REG_ADDR(r5,ftrace_trace_function)
1460	ld	r5,0(r5)
1461	ld	r5,0(r5)
1462	mtctr	r5
1463	bctrl
1464	nop
1465
1466
1467#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1468	b	ftrace_graph_caller
1469#endif
1470	ld	r0, 128(r1)
1471	mtlr	r0
1472	addi	r1, r1, 112
1473_GLOBAL(ftrace_stub)
1474	blr
1475
1476#endif /* CONFIG_DYNAMIC_FTRACE */
1477
1478#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1479#ifndef CC_USING_MPROFILE_KERNEL
1480_GLOBAL(ftrace_graph_caller)
1481	/* load r4 with local address */
1482	ld	r4, 128(r1)
1483	subi	r4, r4, MCOUNT_INSN_SIZE
1484
1485	/* Grab the LR out of the caller stack frame */
1486	ld	r11, 112(r1)
1487	ld	r3, 16(r11)
1488
1489	bl	prepare_ftrace_return
1490	nop
1491
1492	/*
1493	 * prepare_ftrace_return gives us the address we divert to.
1494	 * Change the LR in the callers stack frame to this.
1495	 */
1496	ld	r11, 112(r1)
1497	std	r3, 16(r11)
1498
1499	ld	r0, 128(r1)
1500	mtlr	r0
1501	addi	r1, r1, 112
1502	blr
1503
1504#else /* CC_USING_MPROFILE_KERNEL */
1505_GLOBAL(ftrace_graph_caller)
1506	/* with -mprofile-kernel, parameter regs are still alive at _mcount */
1507	std	r10, 104(r1)
1508	std	r9, 96(r1)
1509	std	r8, 88(r1)
1510	std	r7, 80(r1)
1511	std	r6, 72(r1)
1512	std	r5, 64(r1)
1513	std	r4, 56(r1)
1514	std	r3, 48(r1)
1515
1516	/* Save callee's TOC in the ABI compliant location */
1517	std	r2, 24(r1)
1518	ld	r2, PACATOC(r13)	/* get kernel TOC in r2 */
1519
1520	mfctr	r4		/* ftrace_caller has moved local addr here */
1521	std	r4, 40(r1)
1522	mflr	r3		/* ftrace_caller has restored LR from stack */
1523	subi	r4, r4, MCOUNT_INSN_SIZE
1524
1525	bl	prepare_ftrace_return
1526	nop
1527
1528	/*
1529	 * prepare_ftrace_return gives us the address we divert to.
1530	 * Change the LR to this.
1531	 */
1532	mtlr	r3
1533
1534	ld	r0, 40(r1)
1535	mtctr	r0
1536	ld	r10, 104(r1)
1537	ld	r9, 96(r1)
1538	ld	r8, 88(r1)
1539	ld	r7, 80(r1)
1540	ld	r6, 72(r1)
1541	ld	r5, 64(r1)
1542	ld	r4, 56(r1)
1543	ld	r3, 48(r1)
1544
1545	/* Restore callee's TOC */
1546	ld	r2, 24(r1)
1547
1548	addi	r1, r1, 112
1549	mflr	r0
1550	std	r0, LRSAVE(r1)
1551	bctr
1552#endif /* CC_USING_MPROFILE_KERNEL */
1553
1554_GLOBAL(return_to_handler)
1555	/* need to save return values */
1556	std	r4,  -32(r1)
1557	std	r3,  -24(r1)
1558	/* save TOC */
1559	std	r2,  -16(r1)
1560	std	r31, -8(r1)
1561	mr	r31, r1
1562	stdu	r1, -112(r1)
1563
1564	/*
1565	 * We might be called from a module.
1566	 * Switch to our TOC to run inside the core kernel.
1567	 */
1568	ld	r2, PACATOC(r13)
1569
1570	bl	ftrace_return_to_handler
1571	nop
1572
1573	/* return value has real return address */
1574	mtlr	r3
1575
1576	ld	r1, 0(r1)
1577	ld	r4,  -32(r1)
1578	ld	r3,  -24(r1)
1579	ld	r2,  -16(r1)
1580	ld	r31, -8(r1)
1581
1582	/* Jump back to real return address */
1583	blr
1584#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1585#endif /* CONFIG_FUNCTION_TRACER */
1586