• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17 */
18
19#include <linux/linkage.h>
20#include <linux/const.h>
21#include <asm/unified.h>
22#include <asm/page.h>
23#include <asm/ptrace.h>
24#include <asm/asm-offsets.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_arm.h>
27#include <asm/vfpmacros.h>
28#include "interrupts_head.S"
29
30	.text
31
32__kvm_hyp_code_start:
33	.globl __kvm_hyp_code_start
34
35/********************************************************************
36 * Flush per-VMID TLBs
37 *
38 * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
39 *
40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
41 * inside the inner-shareable domain (which is the case for all v7
42 * implementations).  If we come across a non-IS SMP implementation, we'll
43 * have to use an IPI based mechanism. Until then, we stick to the simple
44 * hardware assisted version.
45 *
46 * As v7 does not support flushing per IPA, just nuke the whole TLB
47 * instead, ignoring the ipa value.
48 */
49ENTRY(__kvm_tlb_flush_vmid_ipa)
50	push	{r2, r3}
51
52	dsb	ishst
53	add	r0, r0, #KVM_VTTBR
54	ldrd	r2, r3, [r0]
55	mcrr	p15, 6, rr_lo_hi(r2, r3), c2	@ Write VTTBR
56	isb
57	mcr     p15, 0, r0, c8, c3, 0	@ TLBIALLIS (rt ignored)
58	dsb	ish
59	isb
60	mov	r2, #0
61	mov	r3, #0
62	mcrr	p15, 6, r2, r3, c2	@ Back to VMID #0
63	isb				@ Not necessary if followed by eret
64
65	pop	{r2, r3}
66	bx	lr
67ENDPROC(__kvm_tlb_flush_vmid_ipa)
68
69/**
70 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
71 *
72 * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address
73 * parameter
74 */
75
76ENTRY(__kvm_tlb_flush_vmid)
77	b	__kvm_tlb_flush_vmid_ipa
78ENDPROC(__kvm_tlb_flush_vmid)
79
80/********************************************************************
81 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
82 * domain, for all VMIDs
83 *
84 * void __kvm_flush_vm_context(void);
85 */
86ENTRY(__kvm_flush_vm_context)
87	mov	r0, #0			@ rn parameter for c15 flushes is SBZ
88
89	/* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
90	mcr     p15, 4, r0, c8, c3, 4
91	/* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
92	mcr     p15, 0, r0, c7, c1, 0
93	dsb	ish
94	isb				@ Not necessary if followed by eret
95
96	bx	lr
97ENDPROC(__kvm_flush_vm_context)
98
99
100/********************************************************************
101 *  Hypervisor world-switch code
102 *
103 *
104 * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
105 */
106ENTRY(__kvm_vcpu_run)
107	@ Save the vcpu pointer
108	mcr	p15, 4, vcpu, c13, c0, 2	@ HTPIDR
109
110	save_host_regs
111
112	restore_vgic_state
113	restore_timer_state
114
115	@ Store hardware CP15 state and load guest state
116	read_cp15_state store_to_vcpu = 0
117	write_cp15_state read_from_vcpu = 1
118
119	@ If the host kernel has not been configured with VFPv3 support,
120	@ then it is safer if we deny guests from using it as well.
121#ifdef CONFIG_VFPv3
122	@ Set FPEXC_EN so the guest doesn't trap floating point instructions
123	VFPFMRX r2, FPEXC		@ VMRS
124	push	{r2}
125	orr	r2, r2, #FPEXC_EN
126	VFPFMXR FPEXC, r2		@ VMSR
127#endif
128
129	@ Configure Hyp-role
130	configure_hyp_role vmentry
131
132	@ Trap coprocessor CRx accesses
133	set_hstr vmentry
134	set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
135	set_hdcr vmentry
136
137	@ Write configured ID register into MIDR alias
138	ldr	r1, [vcpu, #VCPU_MIDR]
139	mcr	p15, 4, r1, c0, c0, 0
140
141	@ Write guest view of MPIDR into VMPIDR
142	ldr	r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
143	mcr	p15, 4, r1, c0, c0, 5
144
145	@ Set up guest memory translation
146	ldr	r1, [vcpu, #VCPU_KVM]
147	add	r1, r1, #KVM_VTTBR
148	ldrd	r2, r3, [r1]
149	mcrr	p15, 6, rr_lo_hi(r2, r3), c2	@ Write VTTBR
150
151	@ We're all done, just restore the GPRs and go to the guest
152	restore_guest_regs
153	clrex				@ Clear exclusive monitor
154	eret
155
156__kvm_vcpu_return:
157	/*
158	 * return convention:
159	 * guest r0, r1, r2 saved on the stack
160	 * r0: vcpu pointer
161	 * r1: exception code
162	 */
163	save_guest_regs
164
165	@ Set VMID == 0
166	mov	r2, #0
167	mov	r3, #0
168	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR
169
170	@ Don't trap coprocessor accesses for host kernel
171	set_hstr vmexit
172	set_hdcr vmexit
173	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
174
175#ifdef CONFIG_VFPv3
176	@ Switch VFP/NEON hardware state to the host's
177	add	r7, vcpu, #VCPU_VFP_GUEST
178	store_vfp_state r7
179	add	r7, vcpu, #VCPU_VFP_HOST
180	ldr	r7, [r7]
181	restore_vfp_state r7
182
183after_vfp_restore:
184	@ Restore FPEXC_EN which we clobbered on entry
185	pop	{r2}
186	VFPFMXR FPEXC, r2
187#else
188after_vfp_restore:
189#endif
190
191	@ Reset Hyp-role
192	configure_hyp_role vmexit
193
194	@ Let host read hardware MIDR
195	mrc	p15, 0, r2, c0, c0, 0
196	mcr	p15, 4, r2, c0, c0, 0
197
198	@ Back to hardware MPIDR
199	mrc	p15, 0, r2, c0, c0, 5
200	mcr	p15, 4, r2, c0, c0, 5
201
202	@ Store guest CP15 state and restore host state
203	read_cp15_state store_to_vcpu = 1
204	write_cp15_state read_from_vcpu = 0
205
206	save_timer_state
207	save_vgic_state
208
209	restore_host_regs
210	clrex				@ Clear exclusive monitor
211#ifndef CONFIG_CPU_ENDIAN_BE8
212	mov	r0, r1			@ Return the return code
213	mov	r1, #0			@ Clear upper bits in return value
214#else
215	@ r1 already has return code
216	mov	r0, #0			@ Clear upper bits in return value
217#endif /* CONFIG_CPU_ENDIAN_BE8 */
218	bx	lr			@ return to IOCTL
219
220/********************************************************************
221 *  Call function in Hyp mode
222 *
223 *
224 * u64 kvm_call_hyp(void *hypfn, ...);
225 *
226 * This is not really a variadic function in the classic C-way and care must
227 * be taken when calling this to ensure parameters are passed in registers
228 * only, since the stack will change between the caller and the callee.
229 *
230 * Call the function with the first argument containing a pointer to the
231 * function you wish to call in Hyp mode, and subsequent arguments will be
232 * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
233 * function pointer can be passed).  The function being called must be mapped
234 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
235 * passed in r0 and r1.
236 *
237 * A function pointer with a value of 0xffffffff has a special meaning,
238 * and is used to implement __hyp_get_vectors in the same way as in
239 * arch/arm/kernel/hyp_stub.S.
240 *
241 * The calling convention follows the standard AAPCS:
242 *   r0 - r3: caller save
243 *   r12:     caller save
244 *   rest:    callee save
245 */
246ENTRY(kvm_call_hyp)
247	hvc	#0
248	bx	lr
249
250/********************************************************************
251 * Hypervisor exception vector and handlers
252 *
253 *
254 * The KVM/ARM Hypervisor ABI is defined as follows:
255 *
256 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
257 * instruction is issued since all traps are disabled when running the host
258 * kernel as per the Hyp-mode initialization at boot time.
259 *
260 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
261 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
262 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
263 * instructions are called from within Hyp-mode.
264 *
265 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
266 *    Switching to Hyp mode is done through a simple HVC #0 instruction. The
267 *    exception vector code will check that the HVC comes from VMID==0 and if
268 *    so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
269 *    - r0 contains a pointer to a HYP function
270 *    - r1, r2, and r3 contain arguments to the above function.
271 *    - The HYP function will be called with its arguments in r0, r1 and r2.
272 *    On HYP function return, we return directly to SVC.
273 *
274 * Note that the above is used to execute code in Hyp-mode from a host-kernel
275 * point of view, and is a different concept from performing a world-switch and
276 * executing guest code SVC mode (with a VMID != 0).
277 */
278
279/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
280.macro bad_exception exception_code, panic_str
281	push	{r0-r2}
282	mrrc	p15, 6, r0, r1, c2	@ Read VTTBR
283	lsr	r1, r1, #16
284	ands	r1, r1, #0xff
285	beq	99f
286
287	load_vcpu			@ Load VCPU pointer
288	.if \exception_code == ARM_EXCEPTION_DATA_ABORT
289	mrc	p15, 4, r2, c5, c2, 0	@ HSR
290	mrc	p15, 4, r1, c6, c0, 0	@ HDFAR
291	str	r2, [vcpu, #VCPU_HSR]
292	str	r1, [vcpu, #VCPU_HxFAR]
293	.endif
294	.if \exception_code == ARM_EXCEPTION_PREF_ABORT
295	mrc	p15, 4, r2, c5, c2, 0	@ HSR
296	mrc	p15, 4, r1, c6, c0, 2	@ HIFAR
297	str	r2, [vcpu, #VCPU_HSR]
298	str	r1, [vcpu, #VCPU_HxFAR]
299	.endif
300	mov	r1, #\exception_code
301	b	__kvm_vcpu_return
302
303	@ We were in the host already. Let's craft a panic-ing return to SVC.
30499:	mrs	r2, cpsr
305	bic	r2, r2, #MODE_MASK
306	orr	r2, r2, #SVC_MODE
307THUMB(	orr	r2, r2, #PSR_T_BIT	)
308	msr	spsr_cxsf, r2
309	mrs	r1, ELR_hyp
310	ldr	r2, =panic
311	msr	ELR_hyp, r2
312	ldr	r0, =\panic_str
313	clrex				@ Clear exclusive monitor
314	eret
315.endm
316
317	.text
318
319	.align 5
320__kvm_hyp_vector:
321	.globl __kvm_hyp_vector
322
323	@ Hyp-mode exception vector
324	W(b)	hyp_reset
325	W(b)	hyp_undef
326	W(b)	hyp_svc
327	W(b)	hyp_pabt
328	W(b)	hyp_dabt
329	W(b)	hyp_hvc
330	W(b)	hyp_irq
331	W(b)	hyp_fiq
332
333	.align
334hyp_reset:
335	b	hyp_reset
336
337	.align
338hyp_undef:
339	bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
340
341	.align
342hyp_svc:
343	bad_exception ARM_EXCEPTION_HVC, svc_die_str
344
345	.align
346hyp_pabt:
347	bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
348
349	.align
350hyp_dabt:
351	bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
352
353	.align
354hyp_hvc:
355	/*
356	 * Getting here is either becuase of a trap from a guest or from calling
357	 * HVC from the host kernel, which means "switch to Hyp mode".
358	 */
359	push	{r0, r1, r2}
360
361	@ Check syndrome register
362	mrc	p15, 4, r1, c5, c2, 0	@ HSR
363	lsr	r0, r1, #HSR_EC_SHIFT
364	cmp	r0, #HSR_EC_HVC
365	bne	guest_trap		@ Not HVC instr.
366
367	/*
368	 * Let's check if the HVC came from VMID 0 and allow simple
369	 * switch to Hyp mode
370	 */
371	mrrc    p15, 6, r0, r2, c2
372	lsr     r2, r2, #16
373	and     r2, r2, #0xff
374	cmp     r2, #0
375	bne	guest_trap		@ Guest called HVC
376
377	/*
378	 * Getting here means host called HVC, we shift parameters and branch
379	 * to Hyp function.
380	 */
381	pop	{r0, r1, r2}
382
383	/* Check for __hyp_get_vectors */
384	cmp	r0, #-1
385	mrceq	p15, 4, r0, c12, c0, 0	@ get HVBAR
386	beq	1f
387
388	push	{lr}
389	mrs	lr, SPSR
390	push	{lr}
391
392	mov	lr, r0
393	mov	r0, r1
394	mov	r1, r2
395	mov	r2, r3
396
397THUMB(	orr	lr, #1)
398	blx	lr			@ Call the HYP function
399
400	pop	{lr}
401	msr	SPSR_csxf, lr
402	pop	{lr}
4031:	eret
404
405guest_trap:
406	load_vcpu			@ Load VCPU pointer to r0
407	str	r1, [vcpu, #VCPU_HSR]
408
409	@ Check if we need the fault information
410	lsr	r1, r1, #HSR_EC_SHIFT
411#ifdef CONFIG_VFPv3
412	cmp	r1, #HSR_EC_CP_0_13
413	beq	switch_to_guest_vfp
414#endif
415	cmp	r1, #HSR_EC_IABT
416	mrceq	p15, 4, r2, c6, c0, 2	@ HIFAR
417	beq	2f
418	cmp	r1, #HSR_EC_DABT
419	bne	1f
420	mrc	p15, 4, r2, c6, c0, 0	@ HDFAR
421
4222:	str	r2, [vcpu, #VCPU_HxFAR]
423
424	/*
425	 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
426	 *
427	 * Abort on the stage 2 translation for a memory access from a
428	 * Non-secure PL1 or PL0 mode:
429	 *
430	 * For any Access flag fault or Translation fault, and also for any
431	 * Permission fault on the stage 2 translation of a memory access
432	 * made as part of a translation table walk for a stage 1 translation,
433	 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
434	 * is UNKNOWN.
435	 */
436
437	/* Check for permission fault, and S1PTW */
438	mrc	p15, 4, r1, c5, c2, 0	@ HSR
439	and	r0, r1, #HSR_FSC_TYPE
440	cmp	r0, #FSC_PERM
441	tsteq	r1, #(1 << 7)		@ S1PTW
442	mrcne	p15, 4, r2, c6, c0, 4	@ HPFAR
443	bne	3f
444
445	/* Preserve PAR */
446	mrrc	p15, 0, r0, r1, c7	@ PAR
447	push	{r0, r1}
448
449	/* Resolve IPA using the xFAR */
450	mcr	p15, 0, r2, c7, c8, 0	@ ATS1CPR
451	isb
452	mrrc	p15, 0, r0, r1, c7	@ PAR
453	tst	r0, #1
454	bne	4f			@ Failed translation
455	ubfx	r2, r0, #12, #20
456	lsl	r2, r2, #4
457	orr	r2, r2, r1, lsl #24
458
459	/* Restore PAR */
460	pop	{r0, r1}
461	mcrr	p15, 0, r0, r1, c7	@ PAR
462
4633:	load_vcpu			@ Load VCPU pointer to r0
464	str	r2, [r0, #VCPU_HPFAR]
465
4661:	mov	r1, #ARM_EXCEPTION_HVC
467	b	__kvm_vcpu_return
468
4694:	pop	{r0, r1}		@ Failed translation, return to guest
470	mcrr	p15, 0, r0, r1, c7	@ PAR
471	clrex
472	pop	{r0, r1, r2}
473	eret
474
475/*
476 * If VFPv3 support is not available, then we will not switch the VFP
477 * registers; however cp10 and cp11 accesses will still trap and fallback
478 * to the regular coprocessor emulation code, which currently will
479 * inject an undefined exception to the guest.
480 */
481#ifdef CONFIG_VFPv3
482switch_to_guest_vfp:
483	push	{r3-r7}
484
485	@ NEON/VFP used.  Turn on VFP access.
486	set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
487
488	@ Switch VFP/NEON hardware state to the guest's
489	add	r7, r0, #VCPU_VFP_HOST
490	ldr	r7, [r7]
491	store_vfp_state r7
492	add	r7, r0, #VCPU_VFP_GUEST
493	restore_vfp_state r7
494
495	pop	{r3-r7}
496	pop	{r0-r2}
497	clrex
498	eret
499#endif
500
501	.align
502hyp_irq:
503	push	{r0, r1, r2}
504	mov	r1, #ARM_EXCEPTION_IRQ
505	load_vcpu			@ Load VCPU pointer to r0
506	b	__kvm_vcpu_return
507
508	.align
509hyp_fiq:
510	b	hyp_fiq
511
512	.ltorg
513
514__kvm_hyp_code_end:
515	.globl	__kvm_hyp_code_end
516
517	.section ".rodata"
518
519und_die_str:
520	.ascii	"unexpected undefined exception in Hyp mode at: %#08x\n"
521pabt_die_str:
522	.ascii	"unexpected prefetch abort in Hyp mode at: %#08x\n"
523dabt_die_str:
524	.ascii	"unexpected data abort in Hyp mode at: %#08x\n"
525svc_die_str:
526	.ascii	"unexpected HVC/SVC trap in Hyp mode at: %#08x\n"
527