• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17 */
18
19#include <linux/linkage.h>
20#include <linux/const.h>
21#include <asm/unified.h>
22#include <asm/page.h>
23#include <asm/ptrace.h>
24#include <asm/asm-offsets.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_arm.h>
27#include <asm/vfpmacros.h>
28#include "interrupts_head.S"
29
30	.text
31
32__kvm_hyp_code_start:
33	.globl __kvm_hyp_code_start
34
35/********************************************************************
36 * Flush per-VMID TLBs
37 *
38 * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
39 *
40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
41 * inside the inner-shareable domain (which is the case for all v7
42 * implementations).  If we come across a non-IS SMP implementation, we'll
43 * have to use an IPI based mechanism. Until then, we stick to the simple
44 * hardware assisted version.
45 *
46 * As v7 does not support flushing per IPA, just nuke the whole TLB
47 * instead, ignoring the ipa value.
48 */
49ENTRY(__kvm_tlb_flush_vmid_ipa)
50	push	{r2, r3}
51
52	dsb	ishst
53	add	r0, r0, #KVM_VTTBR
54	ldrd	r2, r3, [r0]
55	mcrr	p15, 6, rr_lo_hi(r2, r3), c2	@ Write VTTBR
56	isb
57	mcr     p15, 0, r0, c8, c3, 0	@ TLBIALLIS (rt ignored)
58	dsb	ish
59	isb
60	mov	r2, #0
61	mov	r3, #0
62	mcrr	p15, 6, r2, r3, c2	@ Back to VMID #0
63	isb				@ Not necessary if followed by eret
64
65	pop	{r2, r3}
66	bx	lr
67ENDPROC(__kvm_tlb_flush_vmid_ipa)
68
69/********************************************************************
70 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
71 * domain, for all VMIDs
72 *
73 * void __kvm_flush_vm_context(void);
74 */
75ENTRY(__kvm_flush_vm_context)
76	mov	r0, #0			@ rn parameter for c15 flushes is SBZ
77
78	/* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
79	mcr     p15, 4, r0, c8, c3, 4
80	/* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
81	mcr     p15, 0, r0, c7, c1, 0
82	dsb	ish
83	isb				@ Not necessary if followed by eret
84
85	bx	lr
86ENDPROC(__kvm_flush_vm_context)
87
88
89/********************************************************************
90 *  Hypervisor world-switch code
91 *
92 *
93 * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
94 */
95ENTRY(__kvm_vcpu_run)
96	@ Save the vcpu pointer
97	mcr	p15, 4, vcpu, c13, c0, 2	@ HTPIDR
98
99	save_host_regs
100
101	restore_vgic_state
102	restore_timer_state
103
104	@ Store hardware CP15 state and load guest state
105	read_cp15_state store_to_vcpu = 0
106	write_cp15_state read_from_vcpu = 1
107
108	@ If the host kernel has not been configured with VFPv3 support,
109	@ then it is safer if we deny guests from using it as well.
110#ifdef CONFIG_VFPv3
111	@ Set FPEXC_EN so the guest doesn't trap floating point instructions
112	VFPFMRX r2, FPEXC		@ VMRS
113	push	{r2}
114	orr	r2, r2, #FPEXC_EN
115	VFPFMXR FPEXC, r2		@ VMSR
116#endif
117
118	@ Configure Hyp-role
119	configure_hyp_role vmentry
120
121	@ Trap coprocessor CRx accesses
122	set_hstr vmentry
123	set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
124	set_hdcr vmentry
125
126	@ Write configured ID register into MIDR alias
127	ldr	r1, [vcpu, #VCPU_MIDR]
128	mcr	p15, 4, r1, c0, c0, 0
129
130	@ Write guest view of MPIDR into VMPIDR
131	ldr	r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
132	mcr	p15, 4, r1, c0, c0, 5
133
134	@ Set up guest memory translation
135	ldr	r1, [vcpu, #VCPU_KVM]
136	add	r1, r1, #KVM_VTTBR
137	ldrd	r2, r3, [r1]
138	mcrr	p15, 6, rr_lo_hi(r2, r3), c2	@ Write VTTBR
139
140	@ We're all done, just restore the GPRs and go to the guest
141	restore_guest_regs
142	clrex				@ Clear exclusive monitor
143	eret
144
145__kvm_vcpu_return:
146	/*
147	 * return convention:
148	 * guest r0, r1, r2 saved on the stack
149	 * r0: vcpu pointer
150	 * r1: exception code
151	 */
152	save_guest_regs
153
154	@ Set VMID == 0
155	mov	r2, #0
156	mov	r3, #0
157	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR
158
159	@ Don't trap coprocessor accesses for host kernel
160	set_hstr vmexit
161	set_hdcr vmexit
162	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
163
164#ifdef CONFIG_VFPv3
165	@ Switch VFP/NEON hardware state to the host's
166	add	r7, vcpu, #VCPU_VFP_GUEST
167	store_vfp_state r7
168	add	r7, vcpu, #VCPU_VFP_HOST
169	ldr	r7, [r7]
170	restore_vfp_state r7
171
172after_vfp_restore:
173	@ Restore FPEXC_EN which we clobbered on entry
174	pop	{r2}
175	VFPFMXR FPEXC, r2
176#else
177after_vfp_restore:
178#endif
179
180	@ Reset Hyp-role
181	configure_hyp_role vmexit
182
183	@ Let host read hardware MIDR
184	mrc	p15, 0, r2, c0, c0, 0
185	mcr	p15, 4, r2, c0, c0, 0
186
187	@ Back to hardware MPIDR
188	mrc	p15, 0, r2, c0, c0, 5
189	mcr	p15, 4, r2, c0, c0, 5
190
191	@ Store guest CP15 state and restore host state
192	read_cp15_state store_to_vcpu = 1
193	write_cp15_state read_from_vcpu = 0
194
195	save_timer_state
196	save_vgic_state
197
198	restore_host_regs
199	clrex				@ Clear exclusive monitor
200#ifndef CONFIG_CPU_ENDIAN_BE8
201	mov	r0, r1			@ Return the return code
202	mov	r1, #0			@ Clear upper bits in return value
203#else
204	@ r1 already has return code
205	mov	r0, #0			@ Clear upper bits in return value
206#endif /* CONFIG_CPU_ENDIAN_BE8 */
207	bx	lr			@ return to IOCTL
208
209/********************************************************************
210 *  Call function in Hyp mode
211 *
212 *
213 * u64 kvm_call_hyp(void *hypfn, ...);
214 *
215 * This is not really a variadic function in the classic C-way and care must
216 * be taken when calling this to ensure parameters are passed in registers
217 * only, since the stack will change between the caller and the callee.
218 *
219 * Call the function with the first argument containing a pointer to the
220 * function you wish to call in Hyp mode, and subsequent arguments will be
221 * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
222 * function pointer can be passed).  The function being called must be mapped
223 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
224 * passed in r0 and r1.
225 *
226 * A function pointer with a value of 0xffffffff has a special meaning,
227 * and is used to implement __hyp_get_vectors in the same way as in
228 * arch/arm/kernel/hyp_stub.S.
229 *
230 * The calling convention follows the standard AAPCS:
231 *   r0 - r3: caller save
232 *   r12:     caller save
233 *   rest:    callee save
234 */
235ENTRY(kvm_call_hyp)
236	hvc	#0
237	bx	lr
238
239/********************************************************************
240 * Hypervisor exception vector and handlers
241 *
242 *
243 * The KVM/ARM Hypervisor ABI is defined as follows:
244 *
245 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
246 * instruction is issued since all traps are disabled when running the host
247 * kernel as per the Hyp-mode initialization at boot time.
248 *
249 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
250 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
251 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
252 * instructions are called from within Hyp-mode.
253 *
254 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
255 *    Switching to Hyp mode is done through a simple HVC #0 instruction. The
256 *    exception vector code will check that the HVC comes from VMID==0 and if
257 *    so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
258 *    - r0 contains a pointer to a HYP function
259 *    - r1, r2, and r3 contain arguments to the above function.
260 *    - The HYP function will be called with its arguments in r0, r1 and r2.
261 *    On HYP function return, we return directly to SVC.
262 *
263 * Note that the above is used to execute code in Hyp-mode from a host-kernel
264 * point of view, and is a different concept from performing a world-switch and
265 * executing guest code SVC mode (with a VMID != 0).
266 */
267
268/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
269.macro bad_exception exception_code, panic_str
270	push	{r0-r2}
271	mrrc	p15, 6, r0, r1, c2	@ Read VTTBR
272	lsr	r1, r1, #16
273	ands	r1, r1, #0xff
274	beq	99f
275
276	load_vcpu			@ Load VCPU pointer
277	.if \exception_code == ARM_EXCEPTION_DATA_ABORT
278	mrc	p15, 4, r2, c5, c2, 0	@ HSR
279	mrc	p15, 4, r1, c6, c0, 0	@ HDFAR
280	str	r2, [vcpu, #VCPU_HSR]
281	str	r1, [vcpu, #VCPU_HxFAR]
282	.endif
283	.if \exception_code == ARM_EXCEPTION_PREF_ABORT
284	mrc	p15, 4, r2, c5, c2, 0	@ HSR
285	mrc	p15, 4, r1, c6, c0, 2	@ HIFAR
286	str	r2, [vcpu, #VCPU_HSR]
287	str	r1, [vcpu, #VCPU_HxFAR]
288	.endif
289	mov	r1, #\exception_code
290	b	__kvm_vcpu_return
291
292	@ We were in the host already. Let's craft a panic-ing return to SVC.
29399:	mrs	r2, cpsr
294	bic	r2, r2, #MODE_MASK
295	orr	r2, r2, #SVC_MODE
296THUMB(	orr	r2, r2, #PSR_T_BIT	)
297	msr	spsr_cxsf, r2
298	mrs	r1, ELR_hyp
299	ldr	r2, =BSYM(panic)
300	msr	ELR_hyp, r2
301	ldr	r0, =\panic_str
302	clrex				@ Clear exclusive monitor
303	eret
304.endm
305
306	.text
307
308	.align 5
309__kvm_hyp_vector:
310	.globl __kvm_hyp_vector
311
312	@ Hyp-mode exception vector
313	W(b)	hyp_reset
314	W(b)	hyp_undef
315	W(b)	hyp_svc
316	W(b)	hyp_pabt
317	W(b)	hyp_dabt
318	W(b)	hyp_hvc
319	W(b)	hyp_irq
320	W(b)	hyp_fiq
321
322	.align
323hyp_reset:
324	b	hyp_reset
325
326	.align
327hyp_undef:
328	bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
329
330	.align
331hyp_svc:
332	bad_exception ARM_EXCEPTION_HVC, svc_die_str
333
334	.align
335hyp_pabt:
336	bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
337
338	.align
339hyp_dabt:
340	bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
341
342	.align
343hyp_hvc:
344	/*
345	 * Getting here is either becuase of a trap from a guest or from calling
346	 * HVC from the host kernel, which means "switch to Hyp mode".
347	 */
348	push	{r0, r1, r2}
349
350	@ Check syndrome register
351	mrc	p15, 4, r1, c5, c2, 0	@ HSR
352	lsr	r0, r1, #HSR_EC_SHIFT
353#ifdef CONFIG_VFPv3
354	cmp	r0, #HSR_EC_CP_0_13
355	beq	switch_to_guest_vfp
356#endif
357	cmp	r0, #HSR_EC_HVC
358	bne	guest_trap		@ Not HVC instr.
359
360	/*
361	 * Let's check if the HVC came from VMID 0 and allow simple
362	 * switch to Hyp mode
363	 */
364	mrrc    p15, 6, r0, r2, c2
365	lsr     r2, r2, #16
366	and     r2, r2, #0xff
367	cmp     r2, #0
368	bne	guest_trap		@ Guest called HVC
369
370host_switch_to_hyp:
371	pop	{r0, r1, r2}
372
373	/* Check for __hyp_get_vectors */
374	cmp	r0, #-1
375	mrceq	p15, 4, r0, c12, c0, 0	@ get HVBAR
376	beq	1f
377
378	push	{lr}
379	mrs	lr, SPSR
380	push	{lr}
381
382	mov	lr, r0
383	mov	r0, r1
384	mov	r1, r2
385	mov	r2, r3
386
387THUMB(	orr	lr, #1)
388	blx	lr			@ Call the HYP function
389
390	pop	{lr}
391	msr	SPSR_csxf, lr
392	pop	{lr}
3931:	eret
394
395guest_trap:
396	load_vcpu			@ Load VCPU pointer to r0
397	str	r1, [vcpu, #VCPU_HSR]
398
399	@ Check if we need the fault information
400	lsr	r1, r1, #HSR_EC_SHIFT
401	cmp	r1, #HSR_EC_IABT
402	mrceq	p15, 4, r2, c6, c0, 2	@ HIFAR
403	beq	2f
404	cmp	r1, #HSR_EC_DABT
405	bne	1f
406	mrc	p15, 4, r2, c6, c0, 0	@ HDFAR
407
4082:	str	r2, [vcpu, #VCPU_HxFAR]
409
410	/*
411	 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
412	 *
413	 * Abort on the stage 2 translation for a memory access from a
414	 * Non-secure PL1 or PL0 mode:
415	 *
416	 * For any Access flag fault or Translation fault, and also for any
417	 * Permission fault on the stage 2 translation of a memory access
418	 * made as part of a translation table walk for a stage 1 translation,
419	 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
420	 * is UNKNOWN.
421	 */
422
423	/* Check for permission fault, and S1PTW */
424	mrc	p15, 4, r1, c5, c2, 0	@ HSR
425	and	r0, r1, #HSR_FSC_TYPE
426	cmp	r0, #FSC_PERM
427	tsteq	r1, #(1 << 7)		@ S1PTW
428	mrcne	p15, 4, r2, c6, c0, 4	@ HPFAR
429	bne	3f
430
431	/* Preserve PAR */
432	mrrc	p15, 0, r0, r1, c7	@ PAR
433	push	{r0, r1}
434
435	/* Resolve IPA using the xFAR */
436	mcr	p15, 0, r2, c7, c8, 0	@ ATS1CPR
437	isb
438	mrrc	p15, 0, r0, r1, c7	@ PAR
439	tst	r0, #1
440	bne	4f			@ Failed translation
441	ubfx	r2, r0, #12, #20
442	lsl	r2, r2, #4
443	orr	r2, r2, r1, lsl #24
444
445	/* Restore PAR */
446	pop	{r0, r1}
447	mcrr	p15, 0, r0, r1, c7	@ PAR
448
4493:	load_vcpu			@ Load VCPU pointer to r0
450	str	r2, [r0, #VCPU_HPFAR]
451
4521:	mov	r1, #ARM_EXCEPTION_HVC
453	b	__kvm_vcpu_return
454
4554:	pop	{r0, r1}		@ Failed translation, return to guest
456	mcrr	p15, 0, r0, r1, c7	@ PAR
457	clrex
458	pop	{r0, r1, r2}
459	eret
460
461/*
462 * If VFPv3 support is not available, then we will not switch the VFP
463 * registers; however cp10 and cp11 accesses will still trap and fallback
464 * to the regular coprocessor emulation code, which currently will
465 * inject an undefined exception to the guest.
466 */
467#ifdef CONFIG_VFPv3
468switch_to_guest_vfp:
469	load_vcpu			@ Load VCPU pointer to r0
470	push	{r3-r7}
471
472	@ NEON/VFP used.  Turn on VFP access.
473	set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
474
475	@ Switch VFP/NEON hardware state to the guest's
476	add	r7, r0, #VCPU_VFP_HOST
477	ldr	r7, [r7]
478	store_vfp_state r7
479	add	r7, r0, #VCPU_VFP_GUEST
480	restore_vfp_state r7
481
482	pop	{r3-r7}
483	pop	{r0-r2}
484	clrex
485	eret
486#endif
487
488	.align
489hyp_irq:
490	push	{r0, r1, r2}
491	mov	r1, #ARM_EXCEPTION_IRQ
492	load_vcpu			@ Load VCPU pointer to r0
493	b	__kvm_vcpu_return
494
495	.align
496hyp_fiq:
497	b	hyp_fiq
498
499	.ltorg
500
501__kvm_hyp_code_end:
502	.globl	__kvm_hyp_code_end
503
504	.section ".rodata"
505
506und_die_str:
507	.ascii	"unexpected undefined exception in Hyp mode at: %#08x\n"
508pabt_die_str:
509	.ascii	"unexpected prefetch abort in Hyp mode at: %#08x\n"
510dabt_die_str:
511	.ascii	"unexpected data abort in Hyp mode at: %#08x\n"
512svc_die_str:
513	.ascii	"unexpected HVC/SVC trap in Hyp mode at: %#08x\n"
514