• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2021 Google LLC
4  * Author: Fuad Tabba <tabba@google.com>
5  */
6 
7 #ifndef __ARM64_KVM_NVHE_PKVM_H__
8 #define __ARM64_KVM_NVHE_PKVM_H__
9 
10 #include <asm/kvm_pkvm.h>
11 
12 #include <nvhe/gfp.h>
13 #include <nvhe/spinlock.h>
14 
15 /*
16  * A container for the vcpu state that hyp needs to maintain for protected VMs.
17  */
18 struct shadow_vcpu_state {
19 	struct kvm_shadow_vm *vm;
20 	struct kvm_vcpu vcpu;
21 };
22 
23 /*
24  * Holds the relevant data for running a protected vm.
25  */
26 struct kvm_shadow_vm {
27 	/* A unique id to the shadow structs in the hyp shadow area. */
28 	int shadow_handle;
29 
30 	/* Number of vcpus for the vm. */
31 	int created_vcpus;
32 
33 	/* Pointers to the shadow vcpus of the shadow vm. */
34 	struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
35 
36 	/* Primary vCPU pending entry to the pvmfw */
37 	struct kvm_vcpu *pvmfw_entry_vcpu;
38 
39 	/* The host's kvm structure. */
40 	struct kvm *host_kvm;
41 
42 	/* The total size of the donated shadow area. */
43 	size_t shadow_area_size;
44 
45 	/*
46 	 * The number of vcpus initialized and ready to run in the shadow vm.
47 	 * Modifying this is protected by shadow_lock.
48 	 */
49 	unsigned int nr_vcpus;
50 
51 	struct kvm_arch arch;
52 	struct kvm_pgtable pgt;
53 	struct kvm_pgtable_mm_ops mm_ops;
54 	struct hyp_pool pool;
55 	hyp_spinlock_t lock;
56 
57 	/* Array of the shadow state pointers per vcpu. */
58 	struct shadow_vcpu_state *shadow_vcpus[0];
59 };
60 
vcpu_is_protected(struct kvm_vcpu * vcpu)61 static inline bool vcpu_is_protected(struct kvm_vcpu *vcpu)
62 {
63 	if (!is_protected_kvm_enabled())
64 		return false;
65 
66 	return vcpu->arch.pkvm.shadow_vm->arch.pkvm.enabled;
67 }
68 
69 extern phys_addr_t pvmfw_base;
70 extern phys_addr_t pvmfw_size;
71 
72 void hyp_shadow_table_init(void *tbl);
73 int __pkvm_init_shadow(struct kvm *kvm, void *shadow_va, size_t size, void *pgd);
74 int __pkvm_init_shadow_vcpu(unsigned int shadow_handle,
75 			    struct kvm_vcpu *host_vcpu,
76 			    void *shadow_vcpu_hva);
77 int __pkvm_teardown_shadow(int shadow_handle);
78 struct kvm_vcpu *get_shadow_vcpu(int shadow_handle, unsigned int vcpu_idx);
79 void put_shadow_vcpu(struct kvm_vcpu *vcpu);
80 
81 u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
82 bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
83 bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
84 void kvm_reset_pvm_sys_regs(struct kvm_vcpu *vcpu);
85 int kvm_check_pvm_sysreg_table(void);
86 
87 void pkvm_reset_vcpu(struct kvm_vcpu *vcpu);
88 
89 bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code);
90 
91 struct kvm_vcpu *pvm_mpidr_to_vcpu(struct kvm_shadow_vm *vm, unsigned long mpidr);
92 
pvm_has_pvmfw(struct kvm_shadow_vm * vm)93 static inline bool pvm_has_pvmfw(struct kvm_shadow_vm *vm)
94 {
95 	return vm->arch.pkvm.pvmfw_load_addr != PVMFW_INVALID_LOAD_ADDR;
96 }
97 
ipa_in_pvmfw_region(struct kvm_shadow_vm * vm,u64 ipa)98 static inline bool ipa_in_pvmfw_region(struct kvm_shadow_vm *vm, u64 ipa)
99 {
100 	struct kvm_protected_vm *pkvm = &vm->arch.pkvm;
101 
102 	if (!pvm_has_pvmfw(vm))
103 		return false;
104 
105 	return ipa - pkvm->pvmfw_load_addr < pvmfw_size;
106 }
107 
108 int pkvm_load_pvmfw_pages(struct kvm_shadow_vm *vm, u64 ipa, phys_addr_t phys,
109 			  u64 size);
110 void pkvm_clear_pvmfw_pages(void);
111 
112 #endif /* __ARM64_KVM_NVHE_PKVM_H__ */
113