• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2020 - Google Inc
4 * Author: Andrew Scull <ascull@google.com>
5 */
6
7#include <linux/linkage.h>
8
9#include <asm/assembler.h>
10#include <asm/kvm_asm.h>
11#include <asm/kvm_mmu.h>
12
13	.text
14
15SYM_FUNC_START(__host_exit)
16	stp	x0, x1, [sp, #-16]!
17
18	get_host_ctxt	x0, x1
19
20	/* Store the host regs x2 and x3 */
21	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(2)]
22
23	/* Retrieve the host regs x0-x1 from the stack */
24	ldp	x2, x3, [sp], #16	// x0, x1
25
26	/* Store the host regs x0-x1 and x4-x17 */
27	stp	x2, x3,   [x0, #CPU_XREG_OFFSET(0)]
28	stp	x4, x5,   [x0, #CPU_XREG_OFFSET(4)]
29	stp	x6, x7,   [x0, #CPU_XREG_OFFSET(6)]
30	stp	x8, x9,   [x0, #CPU_XREG_OFFSET(8)]
31	stp	x10, x11, [x0, #CPU_XREG_OFFSET(10)]
32	stp	x12, x13, [x0, #CPU_XREG_OFFSET(12)]
33	stp	x14, x15, [x0, #CPU_XREG_OFFSET(14)]
34	stp	x16, x17, [x0, #CPU_XREG_OFFSET(16)]
35
36	/* Store the host regs x18-x29, lr */
37	save_callee_saved_regs x0
38
39	/* Save the host context pointer in x29 across the function call */
40	mov	x29, x0
41	bl	handle_trap
42
43	/* Restore host regs x0-x17 */
44	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
45	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
46	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
47	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
48
49	/* x0-7 are use for panic arguments */
50__host_enter_for_panic:
51	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
52	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
53	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
54	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
55	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
56
57	/* Restore host regs x18-x29, lr */
58	restore_callee_saved_regs x29
59
60	/* Do not touch any register after this! */
61__host_enter_without_restoring:
62	eret
63	sb
64SYM_FUNC_END(__host_exit)
65
66/*
67 * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
68 * 				  u64 elr, u64 par);
69 */
70SYM_FUNC_START(__hyp_do_panic)
71	mov	x29, x0
72
73	/* Load the format string into x0 and arguments into x1-7 */
74	ldr	x0, =__hyp_panic_string
75
76	mov	x6, x3
77	get_vcpu_ptr x7, x3
78
79	mrs	x3, esr_el2
80	mrs	x4, far_el2
81	mrs	x5, hpfar_el2
82
83	/* Prepare and exit to the host's panic funciton. */
84	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
85		      PSR_MODE_EL1h)
86	msr	spsr_el2, lr
87	ldr	lr, =panic
88	msr	elr_el2, lr
89
90	/* Enter the host, conditionally restoring the host context. */
91	cbz	x29, __host_enter_without_restoring
92	b	__host_enter_for_panic
93SYM_FUNC_END(__hyp_do_panic)
94
95.macro host_el1_sync_vect
96	.align 7
97.L__vect_start\@:
98	stp	x0, x1, [sp, #-16]!
99	mrs	x0, esr_el2
100	ubfx	x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
101	cmp	x0, #ESR_ELx_EC_HVC64
102	ldp	x0, x1, [sp], #16
103	b.ne	__host_exit
104
105	/* Check for a stub HVC call */
106	cmp	x0, #HVC_STUB_HCALL_NR
107	b.hs	__host_exit
108
109	/*
110	 * Compute the idmap address of __kvm_handle_stub_hvc and
111	 * jump there. Since we use kimage_voffset, do not use the
112	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
113	 * (by loading it from the constant pool).
114	 *
115	 * Preserve x0-x4, which may contain stub parameters.
116	 */
117	ldr	x5, =__kvm_handle_stub_hvc
118	ldr_l	x6, kimage_voffset
119
120	/* x5 = __pa(x5) */
121	sub	x5, x5, x6
122	br	x5
123.L__vect_end\@:
124.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
125	.error "host_el1_sync_vect larger than vector entry"
126.endif
127.endm
128
129.macro invalid_host_el2_vect
130	.align 7
131	/* If a guest is loaded, panic out of it. */
132	stp	x0, x1, [sp, #-16]!
133	get_loaded_vcpu x0, x1
134	cbnz	x0, __guest_exit_panic
135	add	sp, sp, #16
136
137	/*
138	 * The panic may not be clean if the exception is taken before the host
139	 * context has been saved by __host_exit or after the hyp context has
140	 * been partially clobbered by __host_enter.
141	 */
142	b	hyp_panic
143.endm
144
145.macro invalid_host_el1_vect
146	.align 7
147	mov	x0, xzr		/* host_ctxt = NULL */
148	mrs	x1, spsr_el2
149	mrs	x2, elr_el2
150	mrs	x3, par_el1
151	b	__hyp_do_panic
152.endm
153
154/*
155 * The host vector does not use an ESB instruction in order to avoid consuming
156 * SErrors that should only be consumed by the host. Guest entry is deferred by
157 * __guest_enter if there are any pending asynchronous exceptions so hyp will
158 * always return to the host without having consumerd host SErrors.
159 *
160 * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
161 * host knows about the EL2 vectors already, and there is no point in hiding
162 * them.
163 */
164	.align 11
165SYM_CODE_START(__kvm_hyp_host_vector)
166	invalid_host_el2_vect			// Synchronous EL2t
167	invalid_host_el2_vect			// IRQ EL2t
168	invalid_host_el2_vect			// FIQ EL2t
169	invalid_host_el2_vect			// Error EL2t
170
171	invalid_host_el2_vect			// Synchronous EL2h
172	invalid_host_el2_vect			// IRQ EL2h
173	invalid_host_el2_vect			// FIQ EL2h
174	invalid_host_el2_vect			// Error EL2h
175
176	host_el1_sync_vect			// Synchronous 64-bit EL1
177	invalid_host_el1_vect			// IRQ 64-bit EL1
178	invalid_host_el1_vect			// FIQ 64-bit EL1
179	invalid_host_el1_vect			// Error 64-bit EL1
180
181	invalid_host_el1_vect			// Synchronous 32-bit EL1
182	invalid_host_el1_vect			// IRQ 32-bit EL1
183	invalid_host_el1_vect			// FIQ 32-bit EL1
184	invalid_host_el1_vect			// Error 32-bit EL1
185SYM_CODE_END(__kvm_hyp_host_vector)
186