1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Support for the hypercall interface exposed to protected guests by
4 * pKVM.
5 *
6 * Author: Will Deacon <will@kernel.org>
7 * Copyright (C) 2024 Google LLC
8 */
9
10 #include <linux/arm-smccc.h>
11 #include <linux/array_size.h>
12 #include <linux/io.h>
13 #include <linux/mem_encrypt.h>
14 #include <linux/memblock.h>
15 #include <linux/mm.h>
16 #include <linux/pgtable.h>
17 #include <linux/virtio_balloon.h>
18
19 #include <asm/hypervisor.h>
20
21 static size_t pkvm_granule;
22 static bool pkvm_func_range;
23
__arm_smccc_do(u32 func_id,phys_addr_t phys,int numgranules)24 static int __arm_smccc_do(u32 func_id, phys_addr_t phys, int numgranules)
25 {
26 while (numgranules--) {
27 struct arm_smccc_res res;
28
29 arm_smccc_1_1_invoke(func_id, phys, 0, 0, &res);
30 if (res.a0 != SMCCC_RET_SUCCESS)
31 return -EPERM;
32
33 phys += pkvm_granule;
34 }
35
36 return 0;
37 }
38
__arm_smccc_do_range(u32 func_id,phys_addr_t phys,int numgranules)39 static int __arm_smccc_do_range(u32 func_id, phys_addr_t phys, int numgranules)
40 {
41 while (numgranules) {
42 struct arm_smccc_res res;
43
44 arm_smccc_1_1_invoke(func_id, phys, numgranules, 0, &res);
45 if (res.a0 != SMCCC_RET_SUCCESS)
46 return -EPERM;
47
48 phys += pkvm_granule * res.a1;
49 numgranules -= res.a1;
50 }
51
52 return 0;
53 }
54
55 /*
56 * Apply func_id on the range [phys : phys + numpages * PAGE_SIZE)
57 */
arm_smccc_do_range(u32 func_id,phys_addr_t phys,int numpages,bool func_has_range)58 static int arm_smccc_do_range(u32 func_id, phys_addr_t phys, int numpages,
59 bool func_has_range)
60 {
61 size_t size = numpages * PAGE_SIZE;
62 int numgranules;
63
64 if (!IS_ALIGNED(phys, PAGE_SIZE))
65 return -EINVAL;
66
67 if (!IS_ALIGNED(phys | size, pkvm_granule))
68 return -EINVAL;
69
70 numgranules = size / pkvm_granule;
71
72 if (func_has_range)
73 return __arm_smccc_do_range(func_id, phys, numgranules);
74
75 return __arm_smccc_do(func_id, phys, numgranules);
76 }
77
pkvm_set_memory_encrypted(unsigned long addr,int numpages)78 static int pkvm_set_memory_encrypted(unsigned long addr, int numpages)
79 {
80 return arm_smccc_do_range(ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID,
81 virt_to_phys((void *)addr), numpages, pkvm_func_range);
82 }
83
pkvm_set_memory_decrypted(unsigned long addr,int numpages)84 static int pkvm_set_memory_decrypted(unsigned long addr, int numpages)
85 {
86 return arm_smccc_do_range(ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID,
87 virt_to_phys((void *)addr), numpages, pkvm_func_range);
88 }
89
90 static const struct arm64_mem_crypt_ops pkvm_crypt_ops = {
91 .encrypt = pkvm_set_memory_encrypted,
92 .decrypt = pkvm_set_memory_decrypted,
93 };
94
mmio_guard_ioremap_hook(phys_addr_t phys,size_t size,pgprot_t * prot)95 static int mmio_guard_ioremap_hook(phys_addr_t phys, size_t size,
96 pgprot_t *prot)
97 {
98 pteval_t protval = pgprot_val(*prot);
99 u32 func_id = pkvm_func_range ?
100 ARM_SMCCC_VENDOR_HYP_KVM_MMIO_RGUARD_MAP_FUNC_ID :
101 ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_MAP_FUNC_ID;
102 phys_addr_t end;
103
104 /*
105 * We only expect MMIO emulation for regions mapped with device
106 * attributes.
107 */
108 if (protval != PROT_DEVICE_nGnRE && protval != PROT_DEVICE_nGnRnE)
109 return 0;
110
111 end = ALIGN(phys + size, PAGE_SIZE);
112 phys = ALIGN_DOWN(phys, PAGE_SIZE);
113 size = end - phys;
114
115 /*
116 * It is fine to overshoot MMIO guard requests. Its sole purpose is to
117 * indicate the hypervisor where are the MMIO regions and we have
118 * validated the alignment of the memory regions beforehand.
119 */
120 end = ALIGN(phys + size, pkvm_granule);
121 phys = ALIGN_DOWN(phys, pkvm_granule);
122
123 WARN_ON_ONCE(arm_smccc_do_range(func_id, phys, (end - phys) >> PAGE_SHIFT,
124 pkvm_func_range));
125 return 0;
126 }
127
128 #ifdef CONFIG_VIRTIO_BALLOON_HYP_OPS
129
130 static bool mem_relinquish_available;
131
pkvm_page_relinquish_disallowed(void)132 static bool pkvm_page_relinquish_disallowed(void)
133 {
134 return mem_relinquish_available && (pkvm_granule > PAGE_SIZE);
135 }
136
pkvm_page_relinquish(struct page * page,unsigned int nr)137 static void pkvm_page_relinquish(struct page *page, unsigned int nr)
138 {
139 phys_addr_t phys, end;
140 u32 func_id = ARM_SMCCC_VENDOR_HYP_KVM_MEM_RELINQUISH_FUNC_ID;
141
142 if (!mem_relinquish_available)
143 return;
144
145 phys = page_to_phys(page);
146 end = phys + PAGE_SIZE * nr;
147
148 while (phys < end) {
149 struct arm_smccc_res res;
150
151 arm_smccc_1_1_invoke(func_id, phys, 0, 0, &res);
152 BUG_ON(res.a0 != SMCCC_RET_SUCCESS);
153
154 phys += pkvm_granule;
155 }
156 }
157
158 static struct virtio_balloon_hyp_ops pkvm_virtio_balloon_hyp_ops = {
159 .page_relinquish_disallowed = pkvm_page_relinquish_disallowed,
160 .page_relinquish = pkvm_page_relinquish
161 };
162
163 #endif
164
__dram_is_aligned(size_t pkvm_granule)165 static bool __dram_is_aligned(size_t pkvm_granule)
166 {
167 struct memblock_region *region, *prev = NULL;
168
169 for_each_mem_region(region) {
170 phys_addr_t prev_end;
171
172 if (!prev)
173 goto discontinuous;
174
175 prev_end = prev->base + prev->size;
176 if (prev_end == region->base)
177 goto contiguous;
178
179 if (!IS_ALIGNED(prev_end, pkvm_granule))
180 return false;
181 discontinuous:
182 if (!IS_ALIGNED(region->base, pkvm_granule))
183 return false;
184 contiguous:
185 prev = region;
186 }
187
188 return IS_ALIGNED(region->base + region->size, pkvm_granule);
189 }
190
pkvm_init_hyp_services(void)191 void pkvm_init_hyp_services(void)
192 {
193 struct arm_smccc_res res;
194
195 if (!kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO))
196 return;
197
198 arm_smccc_1_1_invoke(ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID,
199 0, 0, 0, &res);
200 if ((long)res.a0 < 0)
201 return;
202
203 pkvm_granule = res.a0;
204 pkvm_func_range = !!res.a1;
205
206 if (kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MEM_SHARE) &&
207 kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MEM_UNSHARE))
208 arm64_mem_crypt_ops_register(&pkvm_crypt_ops);
209
210 if (kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MMIO_GUARD_MAP) &&
211 __dram_is_aligned(pkvm_granule))
212 arm64_ioremap_prot_hook_register(&mmio_guard_ioremap_hook);
213
214 #ifdef CONFIG_VIRTIO_BALLOON_HYP_OPS
215 virtio_balloon_hyp_ops = &pkvm_virtio_balloon_hyp_ops;
216 if (kvm_arm_hyp_service_available(ARM_SMCCC_KVM_FUNC_MEM_RELINQUISH))
217 mem_relinquish_available = true;
218 #endif
219 }
220