• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19
20#include <asm/alternative.h>
21#include <asm/asm-offsets.h>
22#include <asm/assembler.h>
23#include <asm/fpsimdmacros.h>
24#include <asm/kvm.h>
25#include <asm/kvm_arm.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_mmu.h>
28
29#define CPU_GP_REG_OFFSET(x)	(CPU_GP_REGS + x)
30#define CPU_XREG_OFFSET(x)	CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
31
32	.text
33	.pushsection	.hyp.text, "ax"
34
35.macro save_callee_saved_regs ctxt
36	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
37	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
38	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
39	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
40	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
41	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
42.endm
43
44.macro restore_callee_saved_regs ctxt
45	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
46	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
47	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
48	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
49	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
50	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
51.endm
52
53/*
54 * u64 __guest_enter(struct kvm_vcpu *vcpu,
55 *		     struct kvm_cpu_context *host_ctxt);
56 */
57ENTRY(__guest_enter)
58	// x0: vcpu
59	// x1: host context
60	// x2-x17: clobbered by macros
61	// x18: guest context
62
63	// Store the host regs
64	save_callee_saved_regs x1
65
66	// Now the host state is stored if we have a pending RAS SError it must
67	// affect the host. If any asynchronous exception is pending we defer
68	// the guest entry. The DSB isn't necessary before v8.2 as any SError
69	// would be fatal.
70alternative_if ARM64_HAS_RAS_EXTN
71	dsb	nshst
72	isb
73alternative_else_nop_endif
74	mrs	x1, isr_el1
75	cbz	x1,  1f
76	mov	x0, #ARM_EXCEPTION_IRQ
77	ret
78
791:
80	add	x18, x0, #VCPU_CONTEXT
81
82	// Restore guest regs x0-x17
83	ldp	x0, x1,   [x18, #CPU_XREG_OFFSET(0)]
84	ldp	x2, x3,   [x18, #CPU_XREG_OFFSET(2)]
85	ldp	x4, x5,   [x18, #CPU_XREG_OFFSET(4)]
86	ldp	x6, x7,   [x18, #CPU_XREG_OFFSET(6)]
87	ldp	x8, x9,   [x18, #CPU_XREG_OFFSET(8)]
88	ldp	x10, x11, [x18, #CPU_XREG_OFFSET(10)]
89	ldp	x12, x13, [x18, #CPU_XREG_OFFSET(12)]
90	ldp	x14, x15, [x18, #CPU_XREG_OFFSET(14)]
91	ldp	x16, x17, [x18, #CPU_XREG_OFFSET(16)]
92
93	// Restore guest regs x19-x29, lr
94	restore_callee_saved_regs x18
95
96	// Restore guest reg x18
97	ldr	x18,      [x18, #CPU_XREG_OFFSET(18)]
98
99	// Do not touch any register after this!
100	eret
101ENDPROC(__guest_enter)
102
103ENTRY(__guest_exit)
104	// x0: return code
105	// x1: vcpu
106	// x2-x29,lr: vcpu regs
107	// vcpu x0-x1 on the stack
108
109	add	x1, x1, #VCPU_CONTEXT
110
111	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
112
113	// Store the guest regs x2 and x3
114	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(2)]
115
116	// Retrieve the guest regs x0-x1 from the stack
117	ldp	x2, x3, [sp], #16	// x0, x1
118
119	// Store the guest regs x0-x1 and x4-x18
120	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
121	stp	x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
122	stp	x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
123	stp	x8, x9,   [x1, #CPU_XREG_OFFSET(8)]
124	stp	x10, x11, [x1, #CPU_XREG_OFFSET(10)]
125	stp	x12, x13, [x1, #CPU_XREG_OFFSET(12)]
126	stp	x14, x15, [x1, #CPU_XREG_OFFSET(14)]
127	stp	x16, x17, [x1, #CPU_XREG_OFFSET(16)]
128	str	x18,      [x1, #CPU_XREG_OFFSET(18)]
129
130	// Store the guest regs x19-x29, lr
131	save_callee_saved_regs x1
132
133	get_host_ctxt	x2, x3
134
135	// Now restore the host regs
136	restore_callee_saved_regs x2
137
138alternative_if ARM64_HAS_RAS_EXTN
139	// If we have the RAS extensions we can consume a pending error
140	// without an unmask-SError and isb.
141	esb
142	mrs_s	x2, SYS_DISR_EL1
143	str	x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
144	cbz	x2, 1f
145	msr_s	SYS_DISR_EL1, xzr
146	orr	x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1471:	ret
148alternative_else
149	// If we have a pending asynchronous abort, now is the
150	// time to find out. From your VAXorcist book, page 666:
151	// "Threaten me not, oh Evil one!  For I speak with
152	// the power of DEC, and I command thee to show thyself!"
153	mrs	x2, elr_el2
154	mrs	x3, esr_el2
155	mrs	x4, spsr_el2
156	mov	x5, x0
157
158	dsb	sy		// Synchronize against in-flight ld/st
159	nop
160	msr	daifclr, #4	// Unmask aborts
161alternative_endif
162
163	// This is our single instruction exception window. A pending
164	// SError is guaranteed to occur at the earliest when we unmask
165	// it, and at the latest just after the ISB.
166abort_guest_exit_start:
167
168	isb
169
170abort_guest_exit_end:
171	msr	daifset, #4	// Mask aborts
172	ret
173
174	_kvm_extable	abort_guest_exit_start, 9997f
175	_kvm_extable	abort_guest_exit_end, 9997f
1769997:
177	msr	daifset, #4	// Mask aborts
178	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
179
180	// restore the EL1 exception context so that we can report some
181	// information. Merge the exception code with the SError pending bit.
182	msr	elr_el2, x2
183	msr	esr_el2, x3
184	msr	spsr_el2, x4
185	orr	x0, x0, x5
1861:	ret
187ENDPROC(__guest_exit)
188