• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/hw_irq.h>
16#include <asm/exception-64s.h>
17#include <asm/ptrace.h>
18#include <asm/cpuidle.h>
19
20/*
21 * We layout physical memory as follows:
22 * 0x0000 - 0x00ff : Secondary processor spin code
23 * 0x0100 - 0x17ff : pSeries Interrupt prologs
24 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
25 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
26 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
27 * 0x7000 - 0x7fff : FWNMI data area
28 * 0x8000 - 0x8fff : Initial (CPU0) segment table
29 * 0x9000 -        : Early init and support code
30 */
31	/* Syscall routine is used twice, in reloc-off and reloc-on paths */
32#define SYSCALL_PSERIES_1 					\
33BEGIN_FTR_SECTION						\
34	cmpdi	r0,0x1ebe ; 					\
35	beq-	1f ;						\
36END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
37	mr	r9,r13 ;					\
38	GET_PACA(r13) ;						\
39	INTERRUPT_TO_KERNEL ;					\
40	mfspr	r11,SPRN_SRR0 ;					\
410:
42
43#define SYSCALL_PSERIES_2_RFID 					\
44	mfspr	r12,SPRN_SRR1 ;					\
45	ld	r10,PACAKBASE(r13) ; 				\
46	LOAD_HANDLER(r10, system_call_entry) ; 			\
47	mtspr	SPRN_SRR0,r10 ; 				\
48	ld	r10,PACAKMSR(r13) ;				\
49	mtspr	SPRN_SRR1,r10 ; 				\
50	RFI_TO_KERNEL ; 							\
51	b	. ;	/* prevent speculative execution */
52
53#define SYSCALL_PSERIES_3					\
54	/* Fast LE/BE switch system call */			\
551:	mfspr	r12,SPRN_SRR1 ;					\
56	xori	r12,r12,MSR_LE ;				\
57	mtspr	SPRN_SRR1,r12 ;					\
58	RFI_TO_USER ;		/* return to userspace */		\
59	b	. ;	/* prevent speculative execution */
60
61#if defined(CONFIG_RELOCATABLE)
62	/*
63	 * We can't branch directly so we do it via the CTR which
64	 * is volatile across system calls.
65	 */
66#define SYSCALL_PSERIES_2_DIRECT				\
67	mflr	r10 ;						\
68	ld	r12,PACAKBASE(r13) ; 				\
69	LOAD_HANDLER(r12, system_call_entry) ;			\
70	mtctr	r12 ;						\
71	mfspr	r12,SPRN_SRR1 ;					\
72	/* Re-use of r13... No spare regs to do this */	\
73	li	r13,MSR_RI ;					\
74	mtmsrd 	r13,1 ;						\
75	GET_PACA(r13) ;	/* get r13 back */			\
76	bctr ;
77#else
78	/* We can branch directly */
79#define SYSCALL_PSERIES_2_DIRECT				\
80	mfspr	r12,SPRN_SRR1 ;					\
81	li	r10,MSR_RI ;					\
82	mtmsrd 	r10,1 ;			/* Set RI (EE=0) */	\
83	b	system_call_common ;
84#endif
85
86/*
87 * This is the start of the interrupt handlers for pSeries
88 * This code runs with relocation off.
89 * Code from here to __end_interrupts gets copied down to real
90 * address 0x100 when we are running a relocatable kernel.
91 * Therefore any relative branches in this section must only
92 * branch to labels in this section.
93 */
94	. = 0x100
95	.globl __start_interrupts
96__start_interrupts:
97
98	.globl system_reset_pSeries;
99system_reset_pSeries:
100	HMT_MEDIUM_PPR_DISCARD
101	SET_SCRATCH0(r13)
102#ifdef CONFIG_PPC_P7_NAP
103BEGIN_FTR_SECTION
104	/* Running native on arch 2.06 or later, check if we are
105	 * waking up from nap/sleep/winkle.
106	 */
107	mfspr	r13,SPRN_SRR1
108	rlwinm.	r13,r13,47-31,30,31
109	beq	9f
110
111	cmpwi	cr3,r13,2
112
113	/*
114	 * Check if last bit of HSPGR0 is set. This indicates whether we are
115	 * waking up from winkle.
116	 */
117	GET_PACA(r13)
118	clrldi	r5,r13,63
119	clrrdi	r13,r13,1
120	cmpwi	cr4,r5,1
121	mtspr	SPRN_HSPRG0,r13
122
123	lbz	r0,PACA_THREAD_IDLE_STATE(r13)
124	cmpwi   cr2,r0,PNV_THREAD_NAP
125	bgt     cr2,8f				/* Either sleep or Winkle */
126
127	/* Waking up from nap should not cause hypervisor state loss */
128	bgt	cr3,.
129
130	/* Waking up from nap */
131	li	r0,PNV_THREAD_RUNNING
132	stb	r0,PACA_THREAD_IDLE_STATE(r13)	/* Clear thread state */
133
134#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
135	li	r0,KVM_HWTHREAD_IN_KERNEL
136	stb	r0,HSTATE_HWTHREAD_STATE(r13)
137	/* Order setting hwthread_state vs. testing hwthread_req */
138	sync
139	lbz	r0,HSTATE_HWTHREAD_REQ(r13)
140	cmpwi	r0,0
141	beq	1f
142	b	kvm_start_guest
1431:
144#endif
145
146	/* Return SRR1 from power7_nap() */
147	mfspr	r3,SPRN_SRR1
148	beq	cr3,2f
149	b	power7_wakeup_noloss
1502:	b	power7_wakeup_loss
151
152	/* Fast Sleep wakeup on PowerNV */
1538:	GET_PACA(r13)
154	b 	power7_wakeup_tb_loss
155
1569:
157END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
158#endif /* CONFIG_PPC_P7_NAP */
159	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
160				 NOTEST, 0x100)
161
162	. = 0x200
163machine_check_pSeries_1:
164	/* This is moved out of line as it can be patched by FW, but
165	 * some code path might still want to branch into the original
166	 * vector
167	 */
168	HMT_MEDIUM_PPR_DISCARD
169	SET_SCRATCH0(r13)		/* save r13 */
170#ifdef CONFIG_PPC_P7_NAP
171BEGIN_FTR_SECTION
172	/* Running native on arch 2.06 or later, check if we are
173	 * waking up from nap. We only handle no state loss and
174	 * supervisor state loss. We do -not- handle hypervisor
175	 * state loss at this time.
176	 */
177	mfspr	r13,SPRN_SRR1
178	rlwinm.	r13,r13,47-31,30,31
179	OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
180	beq	9f
181
182	mfspr	r13,SPRN_SRR1
183	rlwinm.	r13,r13,47-31,30,31
184	/* waking up from powersave (nap) state */
185	cmpwi	cr1,r13,2
186	/* Total loss of HV state is fatal. let's just stay stuck here */
187	OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
188	bgt	cr1,.
1899:
190	OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
191END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
192#endif /* CONFIG_PPC_P7_NAP */
193	EXCEPTION_PROLOG_0(PACA_EXMC)
194BEGIN_FTR_SECTION
195	b	machine_check_pSeries_early
196FTR_SECTION_ELSE
197	b	machine_check_pSeries_0
198ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
199
200	. = 0x300
201	.globl data_access_pSeries
202data_access_pSeries:
203	HMT_MEDIUM_PPR_DISCARD
204	SET_SCRATCH0(r13)
205	EXCEPTION_PROLOG_0(PACA_EXGEN)
206	b data_access_pSeries_ool
207
208	. = 0x380
209	.globl data_access_slb_pSeries
210data_access_slb_pSeries:
211	HMT_MEDIUM_PPR_DISCARD
212	SET_SCRATCH0(r13)
213	EXCEPTION_PROLOG_0(PACA_EXSLB)
214	b data_access_slb_pSeries_ool
215
216	. = 0x400
217	.globl instruction_access_pSeries
218instruction_access_pSeries:
219	HMT_MEDIUM_PPR_DISCARD
220	SET_SCRATCH0(r13)
221	EXCEPTION_PROLOG_0(PACA_EXGEN)
222	b instruction_access_pSeries_ool
223
224	. = 0x480
225	.globl instruction_access_slb_pSeries
226instruction_access_slb_pSeries:
227	HMT_MEDIUM_PPR_DISCARD
228	SET_SCRATCH0(r13)
229	EXCEPTION_PROLOG_0(PACA_EXSLB)
230	b instruction_access_slb_pSeries_ool
231
232	/* We open code these as we can't have a ". = x" (even with
233	 * x = "." within a feature section
234	 */
235	. = 0x500;
236	.globl hardware_interrupt_pSeries;
237	.globl hardware_interrupt_hv;
238hardware_interrupt_pSeries:
239hardware_interrupt_hv:
240	HMT_MEDIUM_PPR_DISCARD
241	BEGIN_FTR_SECTION
242		_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
243					    EXC_HV, SOFTEN_TEST_HV)
244		KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
245	FTR_SECTION_ELSE
246		_MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
247					    EXC_STD, SOFTEN_TEST_HV_201)
248		KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
249	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
250
251	STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
252	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
253
254	STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
255	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
256
257	STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
258	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
259
260	. = 0x900
261	.globl decrementer_trampoline
262decrementer_trampoline:
263	SET_SCRATCH0(r13)
264	EXCEPTION_PROLOG_0(PACA_EXGEN)
265	b	decrementer_ool
266
267	. = 0x980
268	.globl hdecrementer_trampoline
269hdecrementer_trampoline:
270	HMT_MEDIUM_PPR_DISCARD;
271	SET_SCRATCH0(r13);
272	EXCEPTION_PROLOG_0(PACA_EXGEN)
273	b hdecrementer_hv
274
275	MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
276	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
277
278	STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
279	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
280
281	. = 0xc00
282	.globl	system_call_pSeries
283system_call_pSeries:
284	 /*
285	  * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
286	  * that support it) before changing to HMT_MEDIUM. That allows the KVM
287	  * code to save that value into the guest state (it is the guest's PPR
288	  * value). Otherwise just change to HMT_MEDIUM as userspace has
289	  * already saved the PPR.
290	  */
291#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
292	SET_SCRATCH0(r13)
293	GET_PACA(r13)
294	std	r9,PACA_EXGEN+EX_R9(r13)
295	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
296	HMT_MEDIUM;
297	std	r10,PACA_EXGEN+EX_R10(r13)
298	INTERRUPT_TO_KERNEL
299	OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
300	mfcr	r9
301	KVMTEST(0xc00)
302	GET_SCRATCH0(r13)
303#else
304	HMT_MEDIUM;
305#endif
306	SYSCALL_PSERIES_1
307	SYSCALL_PSERIES_2_RFID
308	SYSCALL_PSERIES_3
309	KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
310
311	STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
312	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
313
314	/* At 0xe??? we have a bunch of hypervisor exceptions, we branch
315	 * out of line to handle them
316	 */
317	. = 0xe00
318hv_data_storage_trampoline:
319	SET_SCRATCH0(r13)
320	EXCEPTION_PROLOG_0(PACA_EXGEN)
321	b	h_data_storage_hv
322
323	. = 0xe20
324hv_instr_storage_trampoline:
325	SET_SCRATCH0(r13)
326	EXCEPTION_PROLOG_0(PACA_EXGEN)
327	b	h_instr_storage_hv
328
329	. = 0xe40
330emulation_assist_trampoline:
331	SET_SCRATCH0(r13)
332	EXCEPTION_PROLOG_0(PACA_EXGEN)
333	b	emulation_assist_hv
334
335	. = 0xe60
336hv_exception_trampoline:
337	SET_SCRATCH0(r13)
338	EXCEPTION_PROLOG_0(PACA_EXGEN)
339	b	hmi_exception_early
340
341	. = 0xe80
342hv_doorbell_trampoline:
343	SET_SCRATCH0(r13)
344	EXCEPTION_PROLOG_0(PACA_EXGEN)
345	b	h_doorbell_hv
346
347	/* We need to deal with the Altivec unavailable exception
348	 * here which is at 0xf20, thus in the middle of the
349	 * prolog code of the PerformanceMonitor one. A little
350	 * trickery is thus necessary
351	 */
352	. = 0xf00
353performance_monitor_pseries_trampoline:
354	SET_SCRATCH0(r13)
355	EXCEPTION_PROLOG_0(PACA_EXGEN)
356	b	performance_monitor_pSeries
357
358	. = 0xf20
359altivec_unavailable_pseries_trampoline:
360	SET_SCRATCH0(r13)
361	EXCEPTION_PROLOG_0(PACA_EXGEN)
362	b	altivec_unavailable_pSeries
363
364	. = 0xf40
365vsx_unavailable_pseries_trampoline:
366	SET_SCRATCH0(r13)
367	EXCEPTION_PROLOG_0(PACA_EXGEN)
368	b	vsx_unavailable_pSeries
369
370	. = 0xf60
371facility_unavailable_trampoline:
372	SET_SCRATCH0(r13)
373	EXCEPTION_PROLOG_0(PACA_EXGEN)
374	b	facility_unavailable_pSeries
375
376	. = 0xf80
377hv_facility_unavailable_trampoline:
378	SET_SCRATCH0(r13)
379	EXCEPTION_PROLOG_0(PACA_EXGEN)
380	b	facility_unavailable_hv
381
382#ifdef CONFIG_CBE_RAS
383	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
384	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
385#endif /* CONFIG_CBE_RAS */
386
387	STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
388	KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
389
390	. = 0x1500
391	.global denorm_exception_hv
392denorm_exception_hv:
393	HMT_MEDIUM_PPR_DISCARD
394	mtspr	SPRN_SPRG_HSCRATCH0,r13
395	EXCEPTION_PROLOG_0(PACA_EXGEN)
396	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
397
398#ifdef CONFIG_PPC_DENORMALISATION
399	mfspr	r10,SPRN_HSRR1
400	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
401	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
402	addi	r11,r11,-4		/* HSRR0 is next instruction */
403	bne+	denorm_assist
404#endif
405
406	KVMTEST(0x1500)
407	EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
408	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
409
410#ifdef CONFIG_CBE_RAS
411	STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
412	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
413#endif /* CONFIG_CBE_RAS */
414
415	STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
416	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
417
418#ifdef CONFIG_CBE_RAS
419	STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
420	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
421#else
422	. = 0x1800
423#endif /* CONFIG_CBE_RAS */
424
425
426/*** Out of line interrupts support ***/
427
428	.align	7
429	/* moved from 0x200 */
430machine_check_pSeries_early:
431BEGIN_FTR_SECTION
432	EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
433	/*
434	 * Register contents:
435	 * R13		= PACA
436	 * R9		= CR
437	 * Original R9 to R13 is saved on PACA_EXMC
438	 *
439	 * Switch to mc_emergency stack and handle re-entrancy (we limit
440	 * the nested MCE upto level 4 to avoid stack overflow).
441	 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
442	 *
443	 * We use paca->in_mce to check whether this is the first entry or
444	 * nested machine check. We increment paca->in_mce to track nested
445	 * machine checks.
446	 *
447	 * If this is the first entry then set stack pointer to
448	 * paca->mc_emergency_sp, otherwise r1 is already pointing to
449	 * stack frame on mc_emergency stack.
450	 *
451	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
452	 * checkstop if we get another machine check exception before we do
453	 * rfid with MSR_ME=1.
454	 */
455	mr	r11,r1			/* Save r1 */
456	lhz	r10,PACA_IN_MCE(r13)
457	cmpwi	r10,0			/* Are we in nested machine check */
458	bne	0f			/* Yes, we are. */
459	/* First machine check entry */
460	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */
4610:	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */
462	addi	r10,r10,1		/* increment paca->in_mce */
463	sth	r10,PACA_IN_MCE(r13)
464	/* Limit nested MCE to level 4 to avoid stack overflow */
465	cmpwi	r10,4
466	bgt	2f			/* Check if we hit limit of 4 */
467	std	r11,GPR1(r1)		/* Save r1 on the stack. */
468	std	r11,0(r1)		/* make stack chain pointer */
469	mfspr	r11,SPRN_SRR0		/* Save SRR0 */
470	std	r11,_NIP(r1)
471	mfspr	r11,SPRN_SRR1		/* Save SRR1 */
472	std	r11,_MSR(r1)
473	mfspr	r11,SPRN_DAR		/* Save DAR */
474	std	r11,_DAR(r1)
475	mfspr	r11,SPRN_DSISR		/* Save DSISR */
476	std	r11,_DSISR(r1)
477	std	r9,_CCR(r1)		/* Save CR in stackframe */
478	/* Save r9 through r13 from EXMC save area to stack frame. */
479	EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
480	mfmsr	r11			/* get MSR value */
481	ori	r11,r11,MSR_ME		/* turn on ME bit */
482	ori	r11,r11,MSR_RI		/* turn on RI bit */
483	ld	r12,PACAKBASE(r13)	/* get high part of &label */
484	LOAD_HANDLER(r12, machine_check_handle_early)
4851:	mtspr	SPRN_SRR0,r12
486	mtspr	SPRN_SRR1,r11
487	RFI_TO_KERNEL
488	b	.	/* prevent speculative execution */
4892:
490	/* Stack overflow. Stay on emergency stack and panic.
491	 * Keep the ME bit off while panic-ing, so that if we hit
492	 * another machine check we checkstop.
493	 */
494	addi	r1,r1,INT_FRAME_SIZE	/* go back to previous stack frame */
495	ld	r11,PACAKMSR(r13)
496	ld	r12,PACAKBASE(r13)
497	LOAD_HANDLER(r12, unrecover_mce)
498	li	r10,MSR_ME
499	andc	r11,r11,r10		/* Turn off MSR_ME */
500	b	1b
501	b	.	/* prevent speculative execution */
502END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
503
504machine_check_pSeries:
505	.globl machine_check_fwnmi
506machine_check_fwnmi:
507	HMT_MEDIUM_PPR_DISCARD
508	SET_SCRATCH0(r13)		/* save r13 */
509	EXCEPTION_PROLOG_0(PACA_EXMC)
510machine_check_pSeries_0:
511	EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
512	EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
513	KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
514	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
515	KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
516	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
517	KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
518	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
519	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
520
521/* moved from 0x300 */
522	.globl data_access_pSeries_ool
523data_access_pSeries_ool:
524	EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, 0x300)
525	EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD)
526
527	.globl data_access_slb_pSeries_ool
528data_access_slb_pSeries_ool:
529	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
530	std	r3,PACA_EXSLB+EX_R3(r13)
531	mfspr	r3,SPRN_DAR
532#ifdef __DISABLED__
533	/* Keep that around for when we re-implement dynamic VSIDs */
534	cmpdi	r3,0
535	bge	slb_miss_user_pseries
536#endif /* __DISABLED__ */
537	mfspr	r12,SPRN_SRR1
538#ifndef CONFIG_RELOCATABLE
539	b	slb_miss_realmode
540#else
541	/*
542	 * We can't just use a direct branch to slb_miss_realmode
543	 * because the distance from here to there depends on where
544	 * the kernel ends up being put.
545	 */
546	mfctr	r11
547	ld	r10,PACAKBASE(r13)
548	LOAD_HANDLER(r10, slb_miss_realmode)
549	mtctr	r10
550	bctr
551#endif
552
553	.globl instruction_access_pSeries_ool
554instruction_access_pSeries_ool:
555	EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, 0x400)
556	EXCEPTION_PROLOG_PSERIES_1(instruction_access_common, EXC_STD)
557
558	.globl instruction_access_slb_pSeries_ool
559instruction_access_slb_pSeries_ool:
560	EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
561	std	r3,PACA_EXSLB+EX_R3(r13)
562	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
563#ifdef __DISABLED__
564	/* Keep that around for when we re-implement dynamic VSIDs */
565	cmpdi	r3,0
566	bge	slb_miss_user_pseries
567#endif /* __DISABLED__ */
568	mfspr	r12,SPRN_SRR1
569#ifndef CONFIG_RELOCATABLE
570	b	slb_miss_realmode
571#else
572	mfctr	r11
573	ld	r10,PACAKBASE(r13)
574	LOAD_HANDLER(r10, slb_miss_realmode)
575	mtctr	r10
576	bctr
577#endif
578
579#ifdef CONFIG_PPC_DENORMALISATION
580denorm_assist:
581BEGIN_FTR_SECTION
582/*
583 * To denormalise we need to move a copy of the register to itself.
584 * For POWER6 do that here for all FP regs.
585 */
586	mfmsr	r10
587	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
588	xori	r10,r10,(MSR_FE0|MSR_FE1)
589	mtmsrd	r10
590	sync
591
592#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
593#define FMR4(n)  FMR2(n) ; FMR2(n+2)
594#define FMR8(n)  FMR4(n) ; FMR4(n+4)
595#define FMR16(n) FMR8(n) ; FMR8(n+8)
596#define FMR32(n) FMR16(n) ; FMR16(n+16)
597	FMR32(0)
598
599FTR_SECTION_ELSE
600/*
601 * To denormalise we need to move a copy of the register to itself.
602 * For POWER7 do that here for the first 32 VSX registers only.
603 */
604	mfmsr	r10
605	oris	r10,r10,MSR_VSX@h
606	mtmsrd	r10
607	sync
608
609#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
610#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
611#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
612#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
613#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
614	XVCPSGNDP32(0)
615
616ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
617
618BEGIN_FTR_SECTION
619	b	denorm_done
620END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
621/*
622 * To denormalise we need to move a copy of the register to itself.
623 * For POWER8 we need to do that for all 64 VSX registers
624 */
625	XVCPSGNDP32(32)
626denorm_done:
627	mtspr	SPRN_HSRR0,r11
628	mtcrf	0x80,r9
629	ld	r9,PACA_EXGEN+EX_R9(r13)
630	RESTORE_PPR_PACA(PACA_EXGEN, r10)
631BEGIN_FTR_SECTION
632	ld	r10,PACA_EXGEN+EX_CFAR(r13)
633	mtspr	SPRN_CFAR,r10
634END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
635	ld	r10,PACA_EXGEN+EX_R10(r13)
636	ld	r11,PACA_EXGEN+EX_R11(r13)
637	ld	r12,PACA_EXGEN+EX_R12(r13)
638	ld	r13,PACA_EXGEN+EX_R13(r13)
639	HRFI_TO_UNKNOWN
640	b	.
641#endif
642
643	.align	7
644	/* moved from 0xe00 */
645	MASKABLE_EXCEPTION_OOL(0x900, decrementer)
646	STD_EXCEPTION_HV_OOL(0x982, hdecrementer)
647	STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
648	KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
649	STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
650	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
651	STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
652	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
653	MASKABLE_EXCEPTION_HV_OOL(0xe62, hmi_exception)
654	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
655
656	MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
657	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
658
659	/* moved from 0xf00 */
660	STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
661	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
662	STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
663	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
664	STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
665	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
666	STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
667	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
668	STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
669	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
670
671/*
672 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
673 * - If it was a decrementer interrupt, we bump the dec to max and and return.
674 * - If it was a doorbell we return immediately since doorbells are edge
675 *   triggered and won't automatically refire.
676 * - If it was a HMI we return immediately since we handled it in realmode
677 *   and it won't refire.
678 * - else we hard disable and return.
679 * This is called with r10 containing the value to OR to the paca field.
680 */
681#define MASKED_INTERRUPT(_H)				\
682masked_##_H##interrupt:					\
683	std	r11,PACA_EXGEN+EX_R11(r13);		\
684	lbz	r11,PACAIRQHAPPENED(r13);		\
685	or	r11,r11,r10;				\
686	stb	r11,PACAIRQHAPPENED(r13);		\
687	cmpwi	r10,PACA_IRQ_DEC;			\
688	bne	1f;					\
689	lis	r10,0x7fff;				\
690	ori	r10,r10,0xffff;				\
691	mtspr	SPRN_DEC,r10;				\
692	b	2f;					\
6931:	cmpwi	r10,PACA_IRQ_DBELL;			\
694	beq	2f;					\
695	cmpwi	r10,PACA_IRQ_HMI;			\
696	beq	2f;					\
697	mfspr	r10,SPRN_##_H##SRR1;			\
698	rldicl	r10,r10,48,1; /* clear MSR_EE */	\
699	rotldi	r10,r10,16;				\
700	mtspr	SPRN_##_H##SRR1,r10;			\
7012:	mtcrf	0x80,r9;				\
702	ld	r9,PACA_EXGEN+EX_R9(r13);		\
703	ld	r10,PACA_EXGEN+EX_R10(r13);		\
704	ld	r11,PACA_EXGEN+EX_R11(r13);		\
705	GET_SCRATCH0(r13);				\
706	##_H##RFI_TO_KERNEL;				\
707	b	.
708
709	MASKED_INTERRUPT()
710	MASKED_INTERRUPT(H)
711
712/*
713 * Called from arch_local_irq_enable when an interrupt needs
714 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
715 * which kind of interrupt. MSR:EE is already off. We generate a
716 * stackframe like if a real interrupt had happened.
717 *
718 * Note: While MSR:EE is off, we need to make sure that _MSR
719 * in the generated frame has EE set to 1 or the exception
720 * handler will not properly re-enable them.
721 */
722_GLOBAL(__replay_interrupt)
723	/* We are going to jump to the exception common code which
724	 * will retrieve various register values from the PACA which
725	 * we don't give a damn about, so we don't bother storing them.
726	 */
727	mfmsr	r12
728	mflr	r11
729	mfcr	r9
730	ori	r12,r12,MSR_EE
731	cmpwi	r3,0x900
732	beq	decrementer_common
733	cmpwi	r3,0x500
734	beq	hardware_interrupt_common
735BEGIN_FTR_SECTION
736	cmpwi	r3,0xe80
737	beq	h_doorbell_common
738FTR_SECTION_ELSE
739	cmpwi	r3,0xa00
740	beq	doorbell_super_common
741ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
742	blr
743
744#ifdef CONFIG_PPC_PSERIES
745/*
746 * Vectors for the FWNMI option.  Share common code.
747 */
748	.globl system_reset_fwnmi
749      .align 7
750system_reset_fwnmi:
751	HMT_MEDIUM_PPR_DISCARD
752	SET_SCRATCH0(r13)		/* save r13 */
753	EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
754				 NOTEST, 0x100)
755
756#endif /* CONFIG_PPC_PSERIES */
757
758#ifdef __DISABLED__
759/*
760 * This is used for when the SLB miss handler has to go virtual,
761 * which doesn't happen for now anymore but will once we re-implement
762 * dynamic VSIDs for shared page tables
763 */
764slb_miss_user_pseries:
765	std	r10,PACA_EXGEN+EX_R10(r13)
766	std	r11,PACA_EXGEN+EX_R11(r13)
767	std	r12,PACA_EXGEN+EX_R12(r13)
768	GET_SCRATCH0(r10)
769	ld	r11,PACA_EXSLB+EX_R9(r13)
770	ld	r12,PACA_EXSLB+EX_R3(r13)
771	std	r10,PACA_EXGEN+EX_R13(r13)
772	std	r11,PACA_EXGEN+EX_R9(r13)
773	std	r12,PACA_EXGEN+EX_R3(r13)
774	clrrdi	r12,r13,32
775	mfmsr	r10
776	mfspr	r11,SRR0			/* save SRR0 */
777	ori	r12,r12,slb_miss_user_common@l	/* virt addr of handler */
778	ori	r10,r10,MSR_IR|MSR_DR|MSR_RI
779	mtspr	SRR0,r12
780	mfspr	r12,SRR1			/* and SRR1 */
781	mtspr	SRR1,r10
782	rfid
783	b	.				/* prevent spec. execution */
784#endif /* __DISABLED__ */
785
786#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
787kvmppc_skip_interrupt:
788	/*
789	 * Here all GPRs are unchanged from when the interrupt happened
790	 * except for r13, which is saved in SPRG_SCRATCH0.
791	 */
792	mfspr	r13, SPRN_SRR0
793	addi	r13, r13, 4
794	mtspr	SPRN_SRR0, r13
795	GET_SCRATCH0(r13)
796	RFI_TO_KERNEL
797	b	.
798
799kvmppc_skip_Hinterrupt:
800	/*
801	 * Here all GPRs are unchanged from when the interrupt happened
802	 * except for r13, which is saved in SPRG_SCRATCH0.
803	 */
804	mfspr	r13, SPRN_HSRR0
805	addi	r13, r13, 4
806	mtspr	SPRN_HSRR0, r13
807	GET_SCRATCH0(r13)
808	HRFI_TO_KERNEL
809	b	.
810#endif
811
812/*
813 * Code from here down to __end_handlers is invoked from the
814 * exception prologs above.  Because the prologs assemble the
815 * addresses of these handlers using the LOAD_HANDLER macro,
816 * which uses an ori instruction, these handlers must be in
817 * the first 64k of the kernel image.
818 */
819
820/*** Common interrupt handlers ***/
821
822	STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception)
823
824	STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
825	STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt)
826	STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt)
827#ifdef CONFIG_PPC_DOORBELL
828	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception)
829#else
830	STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception)
831#endif
832	STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception)
833	STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception)
834	STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception)
835	STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt)
836	STD_EXCEPTION_COMMON_ASYNC(0xe60, hmi_exception, handle_hmi_exception)
837#ifdef CONFIG_PPC_DOORBELL
838	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception)
839#else
840	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)
841#endif
842	STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception)
843	STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception)
844	STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)
845#ifdef CONFIG_ALTIVEC
846	STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception)
847#else
848	STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)
849#endif
850#ifdef CONFIG_CBE_RAS
851	STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
852	STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
853	STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
854#endif /* CONFIG_CBE_RAS */
855
856	/*
857	 * Relocation-on interrupts: A subset of the interrupts can be delivered
858	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
859	 * it.  Addresses are the same as the original interrupt addresses, but
860	 * offset by 0xc000000000004000.
861	 * It's impossible to receive interrupts below 0x300 via this mechanism.
862	 * KVM: None of these traps are from the guest ; anything that escalated
863	 * to HV=1 from HV=0 is delivered via real mode handlers.
864	 */
865
866	/*
867	 * This uses the standard macro, since the original 0x300 vector
868	 * only has extra guff for STAB-based processors -- which never
869	 * come here.
870	 */
871	STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
872	. = 0x4380
873	.globl data_access_slb_relon_pSeries
874data_access_slb_relon_pSeries:
875	SET_SCRATCH0(r13)
876	EXCEPTION_PROLOG_0(PACA_EXSLB)
877	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
878	std	r3,PACA_EXSLB+EX_R3(r13)
879	mfspr	r3,SPRN_DAR
880	mfspr	r12,SPRN_SRR1
881#ifndef CONFIG_RELOCATABLE
882	b	slb_miss_realmode
883#else
884	/*
885	 * We can't just use a direct branch to slb_miss_realmode
886	 * because the distance from here to there depends on where
887	 * the kernel ends up being put.
888	 */
889	mfctr	r11
890	ld	r10,PACAKBASE(r13)
891	LOAD_HANDLER(r10, slb_miss_realmode)
892	mtctr	r10
893	bctr
894#endif
895
896	STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
897	. = 0x4480
898	.globl instruction_access_slb_relon_pSeries
899instruction_access_slb_relon_pSeries:
900	SET_SCRATCH0(r13)
901	EXCEPTION_PROLOG_0(PACA_EXSLB)
902	EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
903	std	r3,PACA_EXSLB+EX_R3(r13)
904	mfspr	r3,SPRN_SRR0		/* SRR0 is faulting address */
905	mfspr	r12,SPRN_SRR1
906#ifndef CONFIG_RELOCATABLE
907	b	slb_miss_realmode
908#else
909	mfctr	r11
910	ld	r10,PACAKBASE(r13)
911	LOAD_HANDLER(r10, slb_miss_realmode)
912	mtctr	r10
913	bctr
914#endif
915
916	. = 0x4500
917	.globl hardware_interrupt_relon_pSeries;
918	.globl hardware_interrupt_relon_hv;
919hardware_interrupt_relon_pSeries:
920hardware_interrupt_relon_hv:
921	BEGIN_FTR_SECTION
922		_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
923	FTR_SECTION_ELSE
924		_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
925	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
926	STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
927	STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
928	STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
929
930	. = 0x4900
931	.globl decrementer_relon_trampoline
932decrementer_relon_trampoline:
933	HMT_MEDIUM_PPR_DISCARD
934	SET_SCRATCH0(r13)
935	EXCEPTION_PROLOG_0(PACA_EXGEN)
936	b decrementer_relon_pSeries
937
938	STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
939	MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
940	STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
941
942	. = 0x4c00
943	.globl system_call_relon_pSeries
944system_call_relon_pSeries:
945	HMT_MEDIUM
946	SYSCALL_PSERIES_1
947	SYSCALL_PSERIES_2_DIRECT
948	SYSCALL_PSERIES_3
949
950	STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
951
952	. = 0x4e00
953	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
954
955	. = 0x4e20
956	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
957
958	. = 0x4e40
959emulation_assist_relon_trampoline:
960	SET_SCRATCH0(r13)
961	EXCEPTION_PROLOG_0(PACA_EXGEN)
962	b	emulation_assist_relon_hv
963
964	. = 0x4e60
965	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
966
967	. = 0x4e80
968h_doorbell_relon_trampoline:
969	SET_SCRATCH0(r13)
970	EXCEPTION_PROLOG_0(PACA_EXGEN)
971	b	h_doorbell_relon_hv
972
973	. = 0x4f00
974performance_monitor_relon_pseries_trampoline:
975	SET_SCRATCH0(r13)
976	EXCEPTION_PROLOG_0(PACA_EXGEN)
977	b	performance_monitor_relon_pSeries
978
979	. = 0x4f20
980altivec_unavailable_relon_pseries_trampoline:
981	SET_SCRATCH0(r13)
982	EXCEPTION_PROLOG_0(PACA_EXGEN)
983	b	altivec_unavailable_relon_pSeries
984
985	. = 0x4f40
986vsx_unavailable_relon_pseries_trampoline:
987	SET_SCRATCH0(r13)
988	EXCEPTION_PROLOG_0(PACA_EXGEN)
989	b	vsx_unavailable_relon_pSeries
990
991	. = 0x4f60
992facility_unavailable_relon_trampoline:
993	SET_SCRATCH0(r13)
994	EXCEPTION_PROLOG_0(PACA_EXGEN)
995	b	facility_unavailable_relon_pSeries
996
997	. = 0x4f80
998hv_facility_unavailable_relon_trampoline:
999	SET_SCRATCH0(r13)
1000	EXCEPTION_PROLOG_0(PACA_EXGEN)
1001	b	hv_facility_unavailable_relon_hv
1002
1003	STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
1004#ifdef CONFIG_PPC_DENORMALISATION
1005	. = 0x5500
1006	b	denorm_exception_hv
1007#endif
1008	STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
1009
1010	.align	7
1011system_call_entry:
1012	b	system_call_common
1013
1014ppc64_runlatch_on_trampoline:
1015	b	__ppc64_runlatch_on
1016
1017/*
1018 * Here r13 points to the paca, r9 contains the saved CR,
1019 * SRR0 and SRR1 are saved in r11 and r12,
1020 * r9 - r13 are saved in paca->exgen.
1021 */
1022	.align	7
1023	.globl data_access_common
1024data_access_common:
1025	mfspr	r10,SPRN_DAR
1026	std	r10,PACA_EXGEN+EX_DAR(r13)
1027	mfspr	r10,SPRN_DSISR
1028	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1029	EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
1030	RECONCILE_IRQ_STATE(r10, r11)
1031	ld	r12,_MSR(r1)
1032	ld	r3,PACA_EXGEN+EX_DAR(r13)
1033	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1034	li	r5,0x300
1035	b	do_hash_page		/* Try to handle as hpte fault */
1036
1037	.align  7
1038	.globl  h_data_storage_common
1039h_data_storage_common:
1040	mfspr   r10,SPRN_HDAR
1041	std     r10,PACA_EXGEN+EX_DAR(r13)
1042	mfspr   r10,SPRN_HDSISR
1043	stw     r10,PACA_EXGEN+EX_DSISR(r13)
1044	EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
1045	bl      save_nvgprs
1046	RECONCILE_IRQ_STATE(r10, r11)
1047	addi    r3,r1,STACK_FRAME_OVERHEAD
1048	bl      unknown_exception
1049	b       ret_from_except
1050
1051	.align	7
1052	.globl instruction_access_common
1053instruction_access_common:
1054	EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
1055	RECONCILE_IRQ_STATE(r10, r11)
1056	ld	r12,_MSR(r1)
1057	ld	r3,_NIP(r1)
1058	andis.	r4,r12,0x5820
1059	li	r5,0x400
1060	b	do_hash_page		/* Try to handle as hpte fault */
1061
1062	STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
1063
1064/*
1065 * Here is the common SLB miss user that is used when going to virtual
1066 * mode for SLB misses, that is currently not used
1067 */
1068#ifdef __DISABLED__
1069	.align	7
1070	.globl	slb_miss_user_common
1071slb_miss_user_common:
1072	mflr	r10
1073	std	r3,PACA_EXGEN+EX_DAR(r13)
1074	stw	r9,PACA_EXGEN+EX_CCR(r13)
1075	std	r10,PACA_EXGEN+EX_LR(r13)
1076	std	r11,PACA_EXGEN+EX_SRR0(r13)
1077	bl	slb_allocate_user
1078
1079	ld	r10,PACA_EXGEN+EX_LR(r13)
1080	ld	r3,PACA_EXGEN+EX_R3(r13)
1081	lwz	r9,PACA_EXGEN+EX_CCR(r13)
1082	ld	r11,PACA_EXGEN+EX_SRR0(r13)
1083	mtlr	r10
1084	beq-	slb_miss_fault
1085
1086	andi.	r10,r12,MSR_RI		/* check for unrecoverable exception */
1087	beq-	unrecov_user_slb
1088	mfmsr	r10
1089
1090.machine push
1091.machine "power4"
1092	mtcrf	0x80,r9
1093.machine pop
1094
1095	clrrdi	r10,r10,2		/* clear RI before setting SRR0/1 */
1096	mtmsrd	r10,1
1097
1098	mtspr	SRR0,r11
1099	mtspr	SRR1,r12
1100
1101	ld	r9,PACA_EXGEN+EX_R9(r13)
1102	ld	r10,PACA_EXGEN+EX_R10(r13)
1103	ld	r11,PACA_EXGEN+EX_R11(r13)
1104	ld	r12,PACA_EXGEN+EX_R12(r13)
1105	ld	r13,PACA_EXGEN+EX_R13(r13)
1106	rfid
1107	b	.
1108
1109slb_miss_fault:
1110	EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1111	ld	r4,PACA_EXGEN+EX_DAR(r13)
1112	li	r5,0
1113	std	r4,_DAR(r1)
1114	std	r5,_DSISR(r1)
1115	b	handle_page_fault
1116
1117unrecov_user_slb:
1118	EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1119	RECONCILE_IRQ_STATE(r10, r11)
1120	bl	save_nvgprs
11211:	addi	r3,r1,STACK_FRAME_OVERHEAD
1122	bl	unrecoverable_exception
1123	b	1b
1124
1125#endif /* __DISABLED__ */
1126
1127
1128	/*
1129	 * Machine check is different because we use a different
1130	 * save area: PACA_EXMC instead of PACA_EXGEN.
1131	 */
1132	.align	7
1133	.globl machine_check_common
1134machine_check_common:
1135
1136	mfspr	r10,SPRN_DAR
1137	std	r10,PACA_EXGEN+EX_DAR(r13)
1138	mfspr	r10,SPRN_DSISR
1139	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1140	EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
1141	FINISH_NAP
1142	RECONCILE_IRQ_STATE(r10, r11)
1143	ld	r3,PACA_EXGEN+EX_DAR(r13)
1144	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1145	std	r3,_DAR(r1)
1146	std	r4,_DSISR(r1)
1147	bl	save_nvgprs
1148	addi	r3,r1,STACK_FRAME_OVERHEAD
1149	bl	machine_check_exception
1150	b	ret_from_except
1151
1152	.align	7
1153	.globl alignment_common
1154alignment_common:
1155	mfspr	r10,SPRN_DAR
1156	std	r10,PACA_EXGEN+EX_DAR(r13)
1157	mfspr	r10,SPRN_DSISR
1158	stw	r10,PACA_EXGEN+EX_DSISR(r13)
1159	EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1160	ld	r3,PACA_EXGEN+EX_DAR(r13)
1161	lwz	r4,PACA_EXGEN+EX_DSISR(r13)
1162	std	r3,_DAR(r1)
1163	std	r4,_DSISR(r1)
1164	bl	save_nvgprs
1165	RECONCILE_IRQ_STATE(r10, r11)
1166	addi	r3,r1,STACK_FRAME_OVERHEAD
1167	bl	alignment_exception
1168	b	ret_from_except
1169
1170	.align	7
1171	.globl program_check_common
1172program_check_common:
1173	EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1174	bl	save_nvgprs
1175	RECONCILE_IRQ_STATE(r10, r11)
1176	addi	r3,r1,STACK_FRAME_OVERHEAD
1177	bl	program_check_exception
1178	b	ret_from_except
1179
1180	.align	7
1181	.globl fp_unavailable_common
1182fp_unavailable_common:
1183	EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1184	bne	1f			/* if from user, just load it up */
1185	bl	save_nvgprs
1186	RECONCILE_IRQ_STATE(r10, r11)
1187	addi	r3,r1,STACK_FRAME_OVERHEAD
1188	bl	kernel_fp_unavailable_exception
1189	BUG_OPCODE
11901:
1191#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1192BEGIN_FTR_SECTION
1193	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1194	 * transaction), go do TM stuff
1195	 */
1196	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1197	bne-	2f
1198END_FTR_SECTION_IFSET(CPU_FTR_TM)
1199#endif
1200	bl	load_up_fpu
1201	b	fast_exception_return
1202#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12032:	/* User process was in a transaction */
1204	bl	save_nvgprs
1205	RECONCILE_IRQ_STATE(r10, r11)
1206	addi	r3,r1,STACK_FRAME_OVERHEAD
1207	bl	fp_unavailable_tm
1208	b	ret_from_except
1209#endif
1210	.align	7
1211	.globl altivec_unavailable_common
1212altivec_unavailable_common:
1213	EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1214#ifdef CONFIG_ALTIVEC
1215BEGIN_FTR_SECTION
1216	beq	1f
1217#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1218  BEGIN_FTR_SECTION_NESTED(69)
1219	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1220	 * transaction), go do TM stuff
1221	 */
1222	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1223	bne-	2f
1224  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1225#endif
1226	bl	load_up_altivec
1227	b	fast_exception_return
1228#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12292:	/* User process was in a transaction */
1230	bl	save_nvgprs
1231	RECONCILE_IRQ_STATE(r10, r11)
1232	addi	r3,r1,STACK_FRAME_OVERHEAD
1233	bl	altivec_unavailable_tm
1234	b	ret_from_except
1235#endif
12361:
1237END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1238#endif
1239	bl	save_nvgprs
1240	RECONCILE_IRQ_STATE(r10, r11)
1241	addi	r3,r1,STACK_FRAME_OVERHEAD
1242	bl	altivec_unavailable_exception
1243	b	ret_from_except
1244
1245	.align	7
1246	.globl vsx_unavailable_common
1247vsx_unavailable_common:
1248	EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1249#ifdef CONFIG_VSX
1250BEGIN_FTR_SECTION
1251	beq	1f
1252#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1253  BEGIN_FTR_SECTION_NESTED(69)
1254	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1255	 * transaction), go do TM stuff
1256	 */
1257	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1258	bne-	2f
1259  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1260#endif
1261	b	load_up_vsx
1262#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
12632:	/* User process was in a transaction */
1264	bl	save_nvgprs
1265	RECONCILE_IRQ_STATE(r10, r11)
1266	addi	r3,r1,STACK_FRAME_OVERHEAD
1267	bl	vsx_unavailable_tm
1268	b	ret_from_except
1269#endif
12701:
1271END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1272#endif
1273	bl	save_nvgprs
1274	RECONCILE_IRQ_STATE(r10, r11)
1275	addi	r3,r1,STACK_FRAME_OVERHEAD
1276	bl	vsx_unavailable_exception
1277	b	ret_from_except
1278
1279	STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
1280	STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
1281
1282	.align	7
1283	.globl	__end_handlers
1284__end_handlers:
1285
1286	/* Equivalents to the above handlers for relocation-on interrupt vectors */
1287	MASKABLE_RELON_EXCEPTION_PSERIES_OOL(0x900, decrementer)
1288
1289	STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1290	MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1291
1292	STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1293	STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1294	STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1295	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1296	STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
1297
1298	/*
1299	 * The __end_interrupts marker must be past the out-of-line (OOL)
1300	 * handlers, so that they are copied to real address 0x100 when running
1301	 * a relocatable kernel. This ensures they can be reached from the short
1302	 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
1303	 * directly, without using LOAD_HANDLER().
1304	 */
1305	.align	7
1306	.globl	__end_interrupts
1307__end_interrupts:
1308
1309#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1310/*
1311 * Data area reserved for FWNMI option.
1312 * This address (0x7000) is fixed by the RPA.
1313 */
1314	.= 0x7000
1315	.globl fwnmi_data_area
1316fwnmi_data_area:
1317
1318	/* pseries and powernv need to keep the whole page from
1319	 * 0x7000 to 0x8000 free for use by the firmware
1320	 */
1321	. = 0x8000
1322#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1323
1324	.globl hmi_exception_early
1325hmi_exception_early:
1326	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0xe60)
1327	mr	r10,r1			/* Save r1			*/
1328	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack		*/
1329	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
1330	std	r9,_CCR(r1)		/* save CR in stackframe	*/
1331	mfspr	r11,SPRN_HSRR0		/* Save HSRR0 */
1332	std	r11,_NIP(r1)		/* save HSRR0 in stackframe	*/
1333	mfspr	r12,SPRN_HSRR1		/* Save SRR1 */
1334	std	r12,_MSR(r1)		/* save SRR1 in stackframe	*/
1335	std	r10,0(r1)		/* make stack chain pointer	*/
1336	std	r0,GPR0(r1)		/* save r0 in stackframe	*/
1337	std	r10,GPR1(r1)		/* save r1 in stackframe	*/
1338	EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
1339	EXCEPTION_PROLOG_COMMON_3(0xe60)
1340	addi	r3,r1,STACK_FRAME_OVERHEAD
1341	bl	hmi_exception_realmode
1342	/* Windup the stack. */
1343	/* Move original HSRR0 and HSRR1 into the respective regs */
1344	ld	r9,_MSR(r1)
1345	mtspr	SPRN_HSRR1,r9
1346	ld	r3,_NIP(r1)
1347	mtspr	SPRN_HSRR0,r3
1348	ld	r9,_CTR(r1)
1349	mtctr	r9
1350	ld	r9,_XER(r1)
1351	mtxer	r9
1352	ld	r9,_LINK(r1)
1353	mtlr	r9
1354	REST_GPR(0, r1)
1355	REST_8GPRS(2, r1)
1356	REST_GPR(10, r1)
1357	ld	r11,_CCR(r1)
1358	mtcr	r11
1359	REST_GPR(11, r1)
1360	REST_2GPRS(12, r1)
1361	/* restore original r1. */
1362	ld	r1,GPR1(r1)
1363
1364	/*
1365	 * Go to virtual mode and pull the HMI event information from
1366	 * firmware.
1367	 */
1368	.globl hmi_exception_after_realmode
1369hmi_exception_after_realmode:
1370	SET_SCRATCH0(r13)
1371	EXCEPTION_PROLOG_0(PACA_EXGEN)
1372	b	hmi_exception_hv
1373
1374
1375#define MACHINE_CHECK_HANDLER_WINDUP			\
1376	/* Clear MSR_RI before setting SRR0 and SRR1. */\
1377	li	r0,MSR_RI;				\
1378	mfmsr	r9;		/* get MSR value */	\
1379	andc	r9,r9,r0;				\
1380	mtmsrd	r9,1;		/* Clear MSR_RI */	\
1381	/* Move original SRR0 and SRR1 into the respective regs */	\
1382	ld	r9,_MSR(r1);				\
1383	mtspr	SPRN_SRR1,r9;				\
1384	ld	r3,_NIP(r1);				\
1385	mtspr	SPRN_SRR0,r3;				\
1386	ld	r9,_CTR(r1);				\
1387	mtctr	r9;					\
1388	ld	r9,_XER(r1);				\
1389	mtxer	r9;					\
1390	ld	r9,_LINK(r1);				\
1391	mtlr	r9;					\
1392	REST_GPR(0, r1);				\
1393	REST_8GPRS(2, r1);				\
1394	REST_GPR(10, r1);				\
1395	ld	r11,_CCR(r1);				\
1396	mtcr	r11;					\
1397	/* Decrement paca->in_mce. */			\
1398	lhz	r12,PACA_IN_MCE(r13);			\
1399	subi	r12,r12,1;				\
1400	sth	r12,PACA_IN_MCE(r13);			\
1401	REST_GPR(11, r1);				\
1402	REST_2GPRS(12, r1);				\
1403	/* restore original r1. */			\
1404	ld	r1,GPR1(r1)
1405
1406	/*
1407	 * Handle machine check early in real mode. We come here with
1408	 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1409	 */
1410	.align	7
1411	.globl machine_check_handle_early
1412machine_check_handle_early:
1413	std	r0,GPR0(r1)	/* Save r0 */
1414	EXCEPTION_PROLOG_COMMON_3(0x200)
1415	bl	save_nvgprs
1416	addi	r3,r1,STACK_FRAME_OVERHEAD
1417	bl	machine_check_early
1418	std	r3,RESULT(r1)	/* Save result */
1419	ld	r12,_MSR(r1)
1420#ifdef	CONFIG_PPC_P7_NAP
1421	/*
1422	 * Check if thread was in power saving mode. We come here when any
1423	 * of the following is true:
1424	 * a. thread wasn't in power saving mode
1425	 * b. thread was in power saving mode with no state loss or
1426	 *    supervisor state loss
1427	 *
1428	 * Go back to nap again if (b) is true.
1429	 */
1430	rlwinm.	r11,r12,47-31,30,31	/* Was it in power saving mode? */
1431	beq	4f			/* No, it wasn;t */
1432	/* Thread was in power saving mode. Go back to nap again. */
1433	cmpwi	r11,2
1434	bne	3f
1435	/* Supervisor state loss */
1436	li	r0,1
1437	stb	r0,PACA_NAPSTATELOST(r13)
14383:	bl	machine_check_queue_event
1439	MACHINE_CHECK_HANDLER_WINDUP
1440	GET_PACA(r13)
1441	ld	r1,PACAR1(r13)
1442	li	r3,PNV_THREAD_NAP
1443	b	power7_enter_nap_mode
14444:
1445#endif
1446	/*
1447	 * Check if we are coming from hypervisor userspace. If yes then we
1448	 * continue in host kernel in V mode to deliver the MC event.
1449	 */
1450	rldicl.	r11,r12,4,63		/* See if MC hit while in HV mode. */
1451	beq	5f
1452	andi.	r11,r12,MSR_PR		/* See if coming from user. */
1453	bne	9f			/* continue in V mode if we are. */
1454
14555:
1456#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1457	/*
1458	 * We are coming from kernel context. Check if we are coming from
1459	 * guest. if yes, then we can continue. We will fall through
1460	 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1461	 */
1462	lbz	r11,HSTATE_IN_GUEST(r13)
1463	cmpwi	r11,0			/* Check if coming from guest */
1464	bne	9f			/* continue if we are. */
1465#endif
1466	/*
1467	 * At this point we are not sure about what context we come from.
1468	 * Queue up the MCE event and return from the interrupt.
1469	 * But before that, check if this is an un-recoverable exception.
1470	 * If yes, then stay on emergency stack and panic.
1471	 */
1472	andi.	r11,r12,MSR_RI
1473	bne	2f
14741:	mfspr	r11,SPRN_SRR0
1475	ld	r10,PACAKBASE(r13)
1476	LOAD_HANDLER(r10,unrecover_mce)
1477	mtspr	SPRN_SRR0,r10
1478	ld	r10,PACAKMSR(r13)
1479	/*
1480	 * We are going down. But there are chances that we might get hit by
1481	 * another MCE during panic path and we may run into unstable state
1482	 * with no way out. Hence, turn ME bit off while going down, so that
1483	 * when another MCE is hit during panic path, system will checkstop
1484	 * and hypervisor will get restarted cleanly by SP.
1485	 */
1486	li	r3,MSR_ME
1487	andc	r10,r10,r3		/* Turn off MSR_ME */
1488	mtspr	SPRN_SRR1,r10
1489	RFI_TO_KERNEL
1490	b	.
14912:
1492	/*
1493	 * Check if we have successfully handled/recovered from error, if not
1494	 * then stay on emergency stack and panic.
1495	 */
1496	ld	r3,RESULT(r1)	/* Load result */
1497	cmpdi	r3,0		/* see if we handled MCE successfully */
1498
1499	beq	1b		/* if !handled then panic */
1500	/*
1501	 * Return from MC interrupt.
1502	 * Queue up the MCE event so that we can log it later, while
1503	 * returning from kernel or opal call.
1504	 */
1505	bl	machine_check_queue_event
1506	MACHINE_CHECK_HANDLER_WINDUP
1507	RFI_TO_USER_OR_KERNEL
15089:
1509	/* Deliver the machine check to host kernel in V mode. */
1510BEGIN_FTR_SECTION
1511	ld	r10,ORIG_GPR3(r1)
1512	mtspr	SPRN_CFAR,r10
1513END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1514	MACHINE_CHECK_HANDLER_WINDUP
1515	b	machine_check_pSeries
1516
1517unrecover_mce:
1518	/* Invoke machine_check_exception to print MCE event and panic. */
1519	addi	r3,r1,STACK_FRAME_OVERHEAD
1520	bl	machine_check_exception
1521	/*
1522	 * We will not reach here. Even if we did, there is no way out. Call
1523	 * unrecoverable_exception and die.
1524	 */
15251:	addi	r3,r1,STACK_FRAME_OVERHEAD
1526	bl	unrecoverable_exception
1527	b	1b
1528/*
1529 * r13 points to the PACA, r9 contains the saved CR,
1530 * r12 contain the saved SRR1, SRR0 is still ready for return
1531 * r3 has the faulting address
1532 * r9 - r13 are saved in paca->exslb.
1533 * r3 is saved in paca->slb_r3
1534 * We assume we aren't going to take any exceptions during this procedure.
1535 */
1536slb_miss_realmode:
1537	mflr	r10
1538#ifdef CONFIG_RELOCATABLE
1539	mtctr	r11
1540#endif
1541
1542	stw	r9,PACA_EXSLB+EX_CCR(r13)	/* save CR in exc. frame */
1543	std	r10,PACA_EXSLB+EX_LR(r13)	/* save LR */
1544
1545	bl	slb_allocate_realmode
1546
1547	/* All done -- return from exception. */
1548
1549	ld	r10,PACA_EXSLB+EX_LR(r13)
1550	ld	r3,PACA_EXSLB+EX_R3(r13)
1551	lwz	r9,PACA_EXSLB+EX_CCR(r13)	/* get saved CR */
1552
1553	mtlr	r10
1554
1555	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
1556	beq-	2f
1557	andi.	r10,r12,MSR_PR	/* check for user mode (PR != 0) */
1558	bne	1f
1559
1560.machine	push
1561.machine	"power4"
1562	mtcrf	0x80,r9
1563	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1564.machine	pop
1565
1566	RESTORE_PPR_PACA(PACA_EXSLB, r9)
1567	ld	r9,PACA_EXSLB+EX_R9(r13)
1568	ld	r10,PACA_EXSLB+EX_R10(r13)
1569	ld	r11,PACA_EXSLB+EX_R11(r13)
1570	ld	r12,PACA_EXSLB+EX_R12(r13)
1571	ld	r13,PACA_EXSLB+EX_R13(r13)
1572	RFI_TO_KERNEL
1573	b	.	/* prevent speculative execution */
1574
15751:
1576.machine	push
1577.machine	"power4"
1578	mtcrf	0x80,r9
1579	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
1580.machine	pop
1581
1582	RESTORE_PPR_PACA(PACA_EXSLB, r9)
1583	ld	r9,PACA_EXSLB+EX_R9(r13)
1584	ld	r10,PACA_EXSLB+EX_R10(r13)
1585	ld	r11,PACA_EXSLB+EX_R11(r13)
1586	ld	r12,PACA_EXSLB+EX_R12(r13)
1587	ld	r13,PACA_EXSLB+EX_R13(r13)
1588	RFI_TO_USER
1589	b	.	/* prevent speculative execution */
1590
15912:	mfspr	r11,SPRN_SRR0
1592	ld	r10,PACAKBASE(r13)
1593	LOAD_HANDLER(r10,unrecov_slb)
1594	mtspr	SPRN_SRR0,r10
1595	ld	r10,PACAKMSR(r13)
1596	mtspr	SPRN_SRR1,r10
1597	RFI_TO_KERNEL
1598	b	.
1599
1600unrecov_slb:
1601	EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1602	RECONCILE_IRQ_STATE(r10, r11)
1603	bl	save_nvgprs
16041:	addi	r3,r1,STACK_FRAME_OVERHEAD
1605	bl	unrecoverable_exception
1606	b	1b
1607
1608
1609#ifdef CONFIG_PPC_970_NAP
1610power4_fixup_nap:
1611	andc	r9,r9,r10
1612	std	r9,TI_LOCAL_FLAGS(r11)
1613	ld	r10,_LINK(r1)		/* make idle task do the */
1614	std	r10,_NIP(r1)		/* equivalent of a blr */
1615	blr
1616#endif
1617
1618	.balign 16
1619	.globl stf_barrier_fallback
1620stf_barrier_fallback:
1621	std	r9,PACA_EXRFI+EX_R9(r13)
1622	std	r10,PACA_EXRFI+EX_R10(r13)
1623	sync
1624	ld	r9,PACA_EXRFI+EX_R9(r13)
1625	ld	r10,PACA_EXRFI+EX_R10(r13)
1626	ori	31,31,0
1627	.rept 14
1628	b	1f
16291:
1630	.endr
1631	blr
1632
1633
1634/* Clobbers r10, r11, ctr */
1635.macro L1D_DISPLACEMENT_FLUSH
1636	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
1637	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
1638	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
1639	mtctr	r11
1640	DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
1641
1642	/* order ld/st prior to dcbt stop all streams with flushing */
1643	sync
1644
1645	/*
1646	 * The load adresses are at staggered offsets within cachelines,
1647	 * which suits some pipelines better (on others it should not
1648	 * hurt).
1649	 */
16501:
1651	ld	r11,(0x80 + 8)*0(r10)
1652	ld	r11,(0x80 + 8)*1(r10)
1653	ld	r11,(0x80 + 8)*2(r10)
1654	ld	r11,(0x80 + 8)*3(r10)
1655	ld	r11,(0x80 + 8)*4(r10)
1656	ld	r11,(0x80 + 8)*5(r10)
1657	ld	r11,(0x80 + 8)*6(r10)
1658	ld	r11,(0x80 + 8)*7(r10)
1659	addi	r10,r10,0x80*8
1660	bdnz	1b
1661.endm
1662
1663
1664	.globl rfi_flush_fallback
1665rfi_flush_fallback:
1666	SET_SCRATCH0(r13);
1667	GET_PACA(r13);
1668	std	r9,PACA_EXRFI+EX_R9(r13)
1669	std	r10,PACA_EXRFI+EX_R10(r13)
1670	std	r11,PACA_EXRFI+EX_R11(r13)
1671	mfctr	r9
1672	L1D_DISPLACEMENT_FLUSH
1673	mtctr	r9
1674	ld	r9,PACA_EXRFI+EX_R9(r13)
1675	ld	r10,PACA_EXRFI+EX_R10(r13)
1676	ld	r11,PACA_EXRFI+EX_R11(r13)
1677	GET_SCRATCH0(r13);
1678	rfid
1679
1680	.globl hrfi_flush_fallback
1681hrfi_flush_fallback:
1682	SET_SCRATCH0(r13);
1683	GET_PACA(r13);
1684	std	r9,PACA_EXRFI+EX_R9(r13)
1685	std	r10,PACA_EXRFI+EX_R10(r13)
1686	std	r11,PACA_EXRFI+EX_R11(r13)
1687	mfctr	r9
1688	L1D_DISPLACEMENT_FLUSH
1689	mtctr	r9
1690	ld	r9,PACA_EXRFI+EX_R9(r13)
1691	ld	r10,PACA_EXRFI+EX_R10(r13)
1692	ld	r11,PACA_EXRFI+EX_R11(r13)
1693	GET_SCRATCH0(r13);
1694	hrfid
1695
1696	.globl entry_flush_fallback
1697entry_flush_fallback:
1698	std	r9,PACA_EXRFI+EX_R9(r13)
1699	std	r10,PACA_EXRFI+EX_R10(r13)
1700	std	r11,PACA_EXRFI+EX_R11(r13)
1701	mfctr	r9
1702	L1D_DISPLACEMENT_FLUSH
1703	mtctr	r9
1704	ld	r9,PACA_EXRFI+EX_R9(r13)
1705	ld	r10,PACA_EXRFI+EX_R10(r13)
1706	ld	r11,PACA_EXRFI+EX_R11(r13)
1707	blr
1708
1709
1710/*
1711 * Hash table stuff
1712 */
1713	.align	7
1714do_hash_page:
1715	std	r3,_DAR(r1)
1716	std	r4,_DSISR(r1)
1717
1718	andis.	r0,r4,0xa410		/* weird error? */
1719	bne-	handle_page_fault	/* if not, try to insert a HPTE */
1720	andis.  r0,r4,DSISR_DABRMATCH@h
1721	bne-    handle_dabr_fault
1722	CURRENT_THREAD_INFO(r11, r1)
1723	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
1724	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
1725	bne	77f			/* then don't call hash_page now */
1726	/*
1727	 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1728	 * accessing a userspace segment (even from the kernel). We assume
1729	 * kernel addresses always have the high bit set.
1730	 */
1731	rlwinm	r4,r4,32-25+9,31-9,31-9	/* DSISR_STORE -> _PAGE_RW */
1732	rotldi	r0,r3,15		/* Move high bit into MSR_PR posn */
1733	orc	r0,r12,r0		/* MSR_PR | ~high_bit */
1734	rlwimi	r4,r0,32-13,30,30	/* becomes _PAGE_USER access bit */
1735	ori	r4,r4,1			/* add _PAGE_PRESENT */
1736	rlwimi	r4,r5,22+2,31-2,31-2	/* Set _PAGE_EXEC if trap is 0x400 */
1737
1738	/*
1739	 * r3 contains the faulting address
1740	 * r4 contains the required access permissions
1741	 * r5 contains the trap number
1742	 * r6 contains dsisr
1743	 *
1744	 * at return r3 = 0 for success, 1 for page fault, negative for error
1745	 */
1746	ld      r6,_DSISR(r1)
1747	bl	hash_page		/* build HPTE if possible */
1748	cmpdi	r3,0			/* see if hash_page succeeded */
1749
1750	/* Success */
1751	beq	fast_exc_return_irq	/* Return from exception on success */
1752
1753	/* Error */
1754	blt-	13f
1755
1756/* Here we have a page fault that hash_page can't handle. */
1757handle_page_fault:
175811:	ld	r4,_DAR(r1)
1759	ld	r5,_DSISR(r1)
1760	addi	r3,r1,STACK_FRAME_OVERHEAD
1761	bl	do_page_fault
1762	cmpdi	r3,0
1763	beq+	ret_from_except_lite
1764	bl	save_nvgprs
1765	mr	r5,r3
1766	addi	r3,r1,STACK_FRAME_OVERHEAD
1767	lwz	r4,_DAR(r1)
1768	bl	bad_page_fault
1769	b	ret_from_except
1770
1771/* We have a data breakpoint exception - handle it */
1772handle_dabr_fault:
1773	bl	save_nvgprs
1774	ld      r4,_DAR(r1)
1775	ld      r5,_DSISR(r1)
1776	addi    r3,r1,STACK_FRAME_OVERHEAD
1777	bl      do_break
1778	/*
1779	 * do_break() may have changed the NV GPRS while handling a breakpoint.
1780	 * If so, we need to restore them with their updated values. Don't use
1781	 * ret_from_except_lite here.
1782	 */
1783	b       ret_from_except
1784
1785
1786/* We have a page fault that hash_page could handle but HV refused
1787 * the PTE insertion
1788 */
178913:	bl	save_nvgprs
1790	mr	r5,r3
1791	addi	r3,r1,STACK_FRAME_OVERHEAD
1792	ld	r4,_DAR(r1)
1793	bl	low_hash_fault
1794	b	ret_from_except
1795
1796/*
1797 * We come here as a result of a DSI at a point where we don't want
1798 * to call hash_page, such as when we are accessing memory (possibly
1799 * user memory) inside a PMU interrupt that occurred while interrupts
1800 * were soft-disabled.  We want to invoke the exception handler for
1801 * the access, or panic if there isn't a handler.
1802 */
180377:	bl	save_nvgprs
1804	mr	r4,r3
1805	addi	r3,r1,STACK_FRAME_OVERHEAD
1806	li	r5,SIGSEGV
1807	bl	bad_page_fault
1808	b	ret_from_except
1809
1810/*
1811 * Here we have detected that the kernel stack pointer is bad.
1812 * R9 contains the saved CR, r13 points to the paca,
1813 * r10 contains the (bad) kernel stack pointer,
1814 * r11 and r12 contain the saved SRR0 and SRR1.
1815 * We switch to using an emergency stack, save the registers there,
1816 * and call kernel_bad_stack(), which panics.
1817 */
1818bad_stack:
1819	ld	r1,PACAEMERGSP(r13)
1820	subi	r1,r1,64+INT_FRAME_SIZE
1821	std	r9,_CCR(r1)
1822	std	r10,GPR1(r1)
1823	std	r11,_NIP(r1)
1824	std	r12,_MSR(r1)
1825	mfspr	r11,SPRN_DAR
1826	mfspr	r12,SPRN_DSISR
1827	std	r11,_DAR(r1)
1828	std	r12,_DSISR(r1)
1829	mflr	r10
1830	mfctr	r11
1831	mfxer	r12
1832	std	r10,_LINK(r1)
1833	std	r11,_CTR(r1)
1834	std	r12,_XER(r1)
1835	SAVE_GPR(0,r1)
1836	SAVE_GPR(2,r1)
1837	ld	r10,EX_R3(r3)
1838	std	r10,GPR3(r1)
1839	SAVE_GPR(4,r1)
1840	SAVE_4GPRS(5,r1)
1841	ld	r9,EX_R9(r3)
1842	ld	r10,EX_R10(r3)
1843	SAVE_2GPRS(9,r1)
1844	ld	r9,EX_R11(r3)
1845	ld	r10,EX_R12(r3)
1846	ld	r11,EX_R13(r3)
1847	std	r9,GPR11(r1)
1848	std	r10,GPR12(r1)
1849	std	r11,GPR13(r1)
1850BEGIN_FTR_SECTION
1851	ld	r10,EX_CFAR(r3)
1852	std	r10,ORIG_GPR3(r1)
1853END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1854	SAVE_8GPRS(14,r1)
1855	SAVE_10GPRS(22,r1)
1856	lhz	r12,PACA_TRAP_SAVE(r13)
1857	std	r12,_TRAP(r1)
1858	addi	r11,r1,INT_FRAME_SIZE
1859	std	r11,0(r1)
1860	li	r12,0
1861	std	r12,0(r11)
1862	ld	r2,PACATOC(r13)
1863	ld	r11,exception_marker@toc(r2)
1864	std	r12,RESULT(r1)
1865	std	r11,STACK_FRAME_OVERHEAD-16(r1)
18661:	addi	r3,r1,STACK_FRAME_OVERHEAD
1867	bl	kernel_bad_stack
1868	b	1b
1869
1870_KPROBE(do_uaccess_flush)
1871	UACCESS_FLUSH_FIXUP_SECTION
1872	nop
1873	nop
1874	nop
1875	blr
1876	L1D_DISPLACEMENT_FLUSH
1877	blr
1878