1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 Google LLC
4 * Author: Quentin Perret <qperret@google.com>
5 */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 #include <asm/spectre.h>
13
14 #include <nvhe/early_alloc.h>
15 #include <nvhe/gfp.h>
16 #include <nvhe/memory.h>
17 #include <nvhe/mem_protect.h>
18 #include <nvhe/mm.h>
19 #include <nvhe/spinlock.h>
20
21 struct kvm_pgtable pkvm_pgtable;
22 hyp_spinlock_t pkvm_pgd_lock;
23
24 struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
25 unsigned int hyp_memblock_nr;
26
27 static u64 __io_map_base;
28 static DEFINE_PER_CPU(void *, hyp_fixmap_base);
29
__pkvm_create_mappings(unsigned long start,unsigned long size,unsigned long phys,enum kvm_pgtable_prot prot)30 static int __pkvm_create_mappings(unsigned long start, unsigned long size,
31 unsigned long phys, enum kvm_pgtable_prot prot)
32 {
33 int err;
34
35 hyp_spin_lock(&pkvm_pgd_lock);
36 err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
37 hyp_spin_unlock(&pkvm_pgd_lock);
38
39 return err;
40 }
41
hyp_alloc_private_va_range(size_t size)42 static unsigned long hyp_alloc_private_va_range(size_t size)
43 {
44 unsigned long addr = __io_map_base;
45
46 hyp_assert_lock_held(&pkvm_pgd_lock);
47 __io_map_base += PAGE_ALIGN(size);
48
49 /* Are we overflowing on the vmemmap ? */
50 if (__io_map_base > __hyp_vmemmap) {
51 __io_map_base = addr;
52 addr = (unsigned long)ERR_PTR(-ENOMEM);
53 }
54
55 return addr;
56 }
57
__pkvm_create_private_mapping(phys_addr_t phys,size_t size,enum kvm_pgtable_prot prot)58 unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
59 enum kvm_pgtable_prot prot)
60 {
61 unsigned long addr;
62 int err;
63
64 hyp_spin_lock(&pkvm_pgd_lock);
65
66 size = size + offset_in_page(phys);
67 addr = hyp_alloc_private_va_range(size);
68 if (IS_ERR((void *)addr))
69 goto out;
70
71 err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot);
72 if (err) {
73 addr = (unsigned long)ERR_PTR(err);
74 goto out;
75 }
76
77 addr = addr + offset_in_page(phys);
78 out:
79 hyp_spin_unlock(&pkvm_pgd_lock);
80
81 return addr;
82 }
83
pkvm_create_mappings_locked(void * from,void * to,enum kvm_pgtable_prot prot)84 int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
85 {
86 unsigned long start = (unsigned long)from;
87 unsigned long end = (unsigned long)to;
88 unsigned long virt_addr;
89 phys_addr_t phys;
90
91 hyp_assert_lock_held(&pkvm_pgd_lock);
92
93 start = start & PAGE_MASK;
94 end = PAGE_ALIGN(end);
95
96 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
97 int err;
98
99 phys = hyp_virt_to_phys((void *)virt_addr);
100 err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
101 phys, prot);
102 if (err)
103 return err;
104 }
105
106 return 0;
107 }
108
pkvm_create_mappings(void * from,void * to,enum kvm_pgtable_prot prot)109 int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
110 {
111 int ret;
112
113 hyp_spin_lock(&pkvm_pgd_lock);
114 ret = pkvm_create_mappings_locked(from, to, prot);
115 hyp_spin_unlock(&pkvm_pgd_lock);
116
117 return ret;
118 }
119
hyp_back_vmemmap(phys_addr_t back)120 int hyp_back_vmemmap(phys_addr_t back)
121 {
122 unsigned long i, start, size, end = 0;
123 int ret;
124
125 for (i = 0; i < hyp_memblock_nr; i++) {
126 start = hyp_memory[i].base;
127 start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
128 /*
129 * The begining of the hyp_vmemmap region for the current
130 * memblock may already be backed by the page backing the end
131 * the previous region, so avoid mapping it twice.
132 */
133 start = max(start, end);
134
135 end = hyp_memory[i].base + hyp_memory[i].size;
136 end = PAGE_ALIGN((u64)hyp_phys_to_page(end));
137 if (start >= end)
138 continue;
139
140 size = end - start;
141 ret = __pkvm_create_mappings(start, size, back, PAGE_HYP);
142 if (ret)
143 return ret;
144
145 memset(hyp_phys_to_virt(back), 0, size);
146 back += size;
147 }
148
149 return 0;
150 }
151
152 static void *__hyp_bp_vect_base;
pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)153 int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
154 {
155 void *vector;
156
157 switch (slot) {
158 case HYP_VECTOR_DIRECT: {
159 vector = __kvm_hyp_vector;
160 break;
161 }
162 case HYP_VECTOR_SPECTRE_DIRECT: {
163 vector = __bp_harden_hyp_vecs;
164 break;
165 }
166 case HYP_VECTOR_INDIRECT:
167 case HYP_VECTOR_SPECTRE_INDIRECT: {
168 vector = (void *)__hyp_bp_vect_base;
169 break;
170 }
171 default:
172 return -EINVAL;
173 }
174
175 vector = __kvm_vector_slot2addr(vector, slot);
176 *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
177
178 return 0;
179 }
180
hyp_map_vectors(void)181 int hyp_map_vectors(void)
182 {
183 phys_addr_t phys;
184 void *bp_base;
185
186 if (!kvm_system_needs_idmapped_vectors()) {
187 __hyp_bp_vect_base = __bp_harden_hyp_vecs;
188 return 0;
189 }
190
191 phys = __hyp_pa(__bp_harden_hyp_vecs);
192 bp_base = (void *)__pkvm_create_private_mapping(phys,
193 __BP_HARDEN_HYP_VECS_SZ,
194 PAGE_HYP_EXEC);
195 if (IS_ERR_OR_NULL(bp_base))
196 return PTR_ERR(bp_base);
197
198 __hyp_bp_vect_base = bp_base;
199
200 return 0;
201 }
202
hyp_fixmap_map(phys_addr_t phys)203 void *hyp_fixmap_map(phys_addr_t phys)
204 {
205 void *addr = *this_cpu_ptr(&hyp_fixmap_base);
206 int ret = kvm_pgtable_hyp_map(&pkvm_pgtable, (u64)addr, PAGE_SIZE,
207 phys, PAGE_HYP);
208 return ret ? NULL : addr;
209 }
210
hyp_fixmap_unmap(void)211 int hyp_fixmap_unmap(void)
212 {
213 void *addr = *this_cpu_ptr(&hyp_fixmap_base);
214 int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, (u64)addr, PAGE_SIZE);
215
216 return (ret != PAGE_SIZE) ? -EINVAL : 0;
217 }
218
__pin_pgtable_cb(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)219 static int __pin_pgtable_cb(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
220 enum kvm_pgtable_walk_flags flag, void * const arg)
221 {
222 if (!kvm_pte_valid(*ptep) || level != KVM_PGTABLE_MAX_LEVELS - 1)
223 return -EINVAL;
224 hyp_page_ref_inc(hyp_virt_to_page(ptep));
225
226 return 0;
227 }
228
hyp_pin_pgtable_pages(u64 addr)229 static int hyp_pin_pgtable_pages(u64 addr)
230 {
231 struct kvm_pgtable_walker walker = {
232 .cb = __pin_pgtable_cb,
233 .flags = KVM_PGTABLE_WALK_LEAF,
234 };
235
236 return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
237 }
238
hyp_create_pcpu_fixmap(void)239 int hyp_create_pcpu_fixmap(void)
240 {
241 unsigned long i;
242 int ret = 0;
243 u64 addr;
244
245 hyp_spin_lock(&pkvm_pgd_lock);
246
247 for (i = 0; i < hyp_nr_cpus; i++) {
248 addr = hyp_alloc_private_va_range(PAGE_SIZE);
249 if (IS_ERR((void *)addr)) {
250 ret = -ENOMEM;
251 goto unlock;
252 }
253
254 /*
255 * Create a dummy mapping, to get the intermediate page-table
256 * pages allocated, then take a reference on the last level
257 * page to keep it around at all times.
258 */
259 ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
260 __hyp_pa(__hyp_bss_start), PAGE_HYP);
261 if (ret) {
262 ret = -EINVAL;
263 goto unlock;
264 }
265
266 ret = hyp_pin_pgtable_pages(addr);
267 if (ret)
268 goto unlock;
269
270 ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, PAGE_SIZE);
271 if (ret != PAGE_SIZE) {
272 ret = -EINVAL;
273 goto unlock;
274 } else {
275 ret = 0;
276 }
277
278 *per_cpu_ptr(&hyp_fixmap_base, i) = (void *)addr;
279 }
280 unlock:
281 hyp_spin_unlock(&pkvm_pgd_lock);
282
283 return ret;
284 }
285
hyp_create_idmap(u32 hyp_va_bits)286 int hyp_create_idmap(u32 hyp_va_bits)
287 {
288 unsigned long start, end;
289
290 start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
291 start = ALIGN_DOWN(start, PAGE_SIZE);
292
293 end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
294 end = ALIGN(end, PAGE_SIZE);
295
296 /*
297 * One half of the VA space is reserved to linearly map portions of
298 * memory -- see va_layout.c for more details. The other half of the VA
299 * space contains the trampoline page, and needs some care. Split that
300 * second half in two and find the quarter of VA space not conflicting
301 * with the idmap to place the IOs and the vmemmap. IOs use the lower
302 * half of the quarter and the vmemmap the upper half.
303 */
304 __io_map_base = start & BIT(hyp_va_bits - 2);
305 __io_map_base ^= BIT(hyp_va_bits - 2);
306 __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
307
308 return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
309 }
310
admit_host_page(void * arg)311 static void *admit_host_page(void *arg)
312 {
313 struct kvm_hyp_memcache *host_mc = arg;
314
315 if (!host_mc->nr_pages)
316 return NULL;
317
318 /*
319 * The host still owns the pages in its memcache, so we need to go
320 * through a full host-to-hyp donation cycle to change it. Fortunately,
321 * __pkvm_host_donate_hyp() takes care of races for us, so if it
322 * succeeds we're good to go.
323 */
324 if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
325 return NULL;
326
327 return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
328 }
329
330 /* Refill our local memcache by poping pages from the one provided by the host. */
refill_memcache(struct kvm_hyp_memcache * mc,unsigned long min_pages,struct kvm_hyp_memcache * host_mc)331 int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
332 struct kvm_hyp_memcache *host_mc)
333 {
334 struct kvm_hyp_memcache tmp = *host_mc;
335 int ret;
336
337 ret = __topup_hyp_memcache(mc, min_pages, admit_host_page,
338 hyp_virt_to_phys, &tmp);
339 *host_mc = tmp;
340
341 return ret;
342 }
343