• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 
13 #include <nvhe/alloc.h>
14 #include <nvhe/early_alloc.h>
15 #include <nvhe/ffa.h>
16 #include <nvhe/gfp.h>
17 #include <nvhe/memory.h>
18 #include <nvhe/mem_protect.h>
19 #include <nvhe/mm.h>
20 #include <nvhe/pkvm.h>
21 #include <nvhe/serial.h>
22 #include <nvhe/trace.h>
23 #include <nvhe/trap_handler.h>
24 
25 unsigned long hyp_nr_cpus;
26 
27 phys_addr_t pvmfw_base;
28 phys_addr_t pvmfw_size;
29 
30 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
31 			 (unsigned long)__per_cpu_start)
32 
33 u64 hyp_lm_size_mb;
34 
35 static void *vmemmap_base;
36 static void *vm_table_base;
37 static void *hyp_pgt_base;
38 static void *host_s2_pgt_base;
39 static void *selftest_base;
40 static void *ffa_proxy_pages;
41 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
42 static struct hyp_pool hpool;
43 
divide_memory_pool(void * virt,unsigned long size)44 static int divide_memory_pool(void *virt, unsigned long size)
45 {
46 	unsigned long nr_pages;
47 
48 	hyp_early_alloc_init(virt, size);
49 
50 	nr_pages = pkvm_selftest_pages();
51 	selftest_base = hyp_early_alloc_contig(nr_pages);
52 	if (nr_pages && !selftest_base)
53 		return -ENOMEM;
54 
55 	nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
56 	vmemmap_base = hyp_early_alloc_contig(nr_pages);
57 	if (!vmemmap_base)
58 		return -ENOMEM;
59 
60 	nr_pages = hyp_vm_table_pages();
61 	vm_table_base = hyp_early_alloc_contig(nr_pages);
62 	if (!vm_table_base)
63 		return -ENOMEM;
64 
65 	nr_pages = hyp_s1_pgtable_pages();
66 	hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
67 	if (!hyp_pgt_base)
68 		return -ENOMEM;
69 
70 	nr_pages = host_s2_pgtable_pages();
71 	host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
72 	if (!host_s2_pgt_base)
73 		return -ENOMEM;
74 
75 	nr_pages = hyp_ffa_proxy_pages();
76 	ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
77 	if (!ffa_proxy_pages)
78 		return -ENOMEM;
79 
80 	hyp_ppages = hyp_early_alloc_contig(1);
81 	if (!hyp_ppages)
82 		return -ENOMEM;
83 
84 	return 0;
85 }
86 
pkvm_create_host_sve_mappings(void)87 static int pkvm_create_host_sve_mappings(void)
88 {
89 	void *start, *end;
90 	int ret, i;
91 
92 	if (!system_supports_sve())
93 		return 0;
94 
95 	for (i = 0; i < hyp_nr_cpus; i++) {
96 		struct kvm_host_data *host_data = per_cpu_ptr(&kvm_host_data, i);
97 		struct cpu_sve_state *sve_state = host_data->sve_state;
98 
99 		start = kern_hyp_va(sve_state);
100 		end = start + PAGE_ALIGN(pkvm_host_sve_state_size());
101 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
102 		if (ret)
103 			return ret;
104 	}
105 
106 	return 0;
107 }
108 
recreate_hyp_mappings(phys_addr_t phys,unsigned long size,unsigned long * per_cpu_base,u32 hyp_va_bits)109 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
110 				 unsigned long *per_cpu_base,
111 				 u32 hyp_va_bits)
112 {
113 	void *start, *end, *virt = hyp_phys_to_virt(phys);
114 	unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
115 	enum kvm_pgtable_prot prot;
116 	int ret, i;
117 
118 	/* Recreate the hyp page-table using the early page allocator */
119 	hyp_early_alloc_init(hyp_pgt_base, pgt_size);
120 	ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
121 				   &hyp_early_alloc_mm_ops);
122 	if (ret)
123 		return ret;
124 
125 	ret = hyp_create_idmap(hyp_va_bits);
126 	if (ret)
127 		return ret;
128 
129 	ret = hyp_map_vectors();
130 	if (ret)
131 		return ret;
132 
133 	ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
134 	if (ret)
135 		return ret;
136 
137 	ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
138 	if (ret)
139 		return ret;
140 
141 	ret = pkvm_create_mappings(__hyp_data_start, __hyp_data_end, PAGE_HYP);
142 	if (ret)
143 		return ret;
144 
145 	ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
146 	if (ret)
147 		return ret;
148 
149 	ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
150 	if (ret)
151 		return ret;
152 
153 	ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
154 	if (ret)
155 		return ret;
156 
157 	for (i = 0; i < hyp_nr_cpus; i++) {
158 		struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
159 
160 		start = (void *)kern_hyp_va(per_cpu_base[i]);
161 		end = start + PAGE_ALIGN(hyp_percpu_size);
162 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
163 		if (ret)
164 			return ret;
165 
166 		ret = pkvm_create_stack(params->stack_pa, &params->stack_hyp_va);
167 		if (ret)
168 			return ret;
169 	}
170 
171 	ret = pkvm_create_host_sve_mappings();
172 	if (ret)
173 		return ret;
174 
175 	/*
176 	 * Map the pvmfw section RO in the hypervisor, but transfer the
177 	 * ownership from the host to the hypervisor itself to make sure that it
178 	 * can't be donated or shared with another entity.
179 	 *
180 	 * The ownership transition requires matching changes in the host
181 	 * stage-2. This will be done later (see finalize_host_mappings()) once
182 	 * the hyp_vmemmap is addressable.
183 	 */
184 	start = hyp_phys_to_virt(pvmfw_base);
185 	end = start + pvmfw_size;
186 	prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_OWNED);
187 	ret = pkvm_create_mappings(start, end, prot);
188 	if (ret)
189 		return ret;
190 
191 	return 0;
192 }
193 
update_nvhe_init_params(void)194 static void update_nvhe_init_params(void)
195 {
196 	struct kvm_nvhe_init_params *params;
197 	unsigned long i;
198 
199 	for (i = 0; i < hyp_nr_cpus; i++) {
200 		params = per_cpu_ptr(&kvm_init_params, i);
201 		params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
202 		dcache_clean_inval_poc((unsigned long)params,
203 				    (unsigned long)params + sizeof(*params));
204 	}
205 }
206 
hyp_zalloc_hyp_page(void * arg)207 static void *hyp_zalloc_hyp_page(void *arg)
208 {
209 	return hyp_alloc_pages(&hpool, 0);
210 }
211 
hpool_get_page(void * addr)212 static void hpool_get_page(void *addr)
213 {
214 	hyp_get_page(&hpool, addr);
215 }
216 
hpool_put_page(void * addr)217 static void hpool_put_page(void *addr)
218 {
219 	hyp_put_page(&hpool, addr);
220 }
221 
fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)222 static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
223 				     enum kvm_pgtable_walk_flags visit)
224 {
225 	enum pkvm_page_state state;
226 	phys_addr_t phys;
227 
228 	if (!kvm_pte_valid(ctx->old))
229 		return 0;
230 
231 	if (ctx->level != KVM_PGTABLE_LAST_LEVEL)
232 		return -EINVAL;
233 
234 	phys = kvm_pte_to_phys(ctx->old);
235 	if (!addr_is_memory(phys))
236 		return -EINVAL;
237 
238 	/*
239 	 * Adjust the host stage-2 mappings to match the ownership attributes
240 	 * configured in the hypervisor stage-1.
241 	 */
242 	state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
243 	switch (state) {
244 	case PKVM_PAGE_OWNED:
245 		return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
246 	case PKVM_PAGE_SHARED_OWNED:
247 		hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_BORROWED;
248 		break;
249 	case PKVM_PAGE_SHARED_BORROWED:
250 		hyp_phys_to_page(phys)->host_state = PKVM_PAGE_SHARED_OWNED;
251 		break;
252 	default:
253 		return -EINVAL;
254 	}
255 
256 	return 0;
257 }
258 
fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)259 static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
260 					 enum kvm_pgtable_walk_flags visit)
261 {
262 	/*
263 	 * Fix-up the refcount for the page-table pages as the early allocator
264 	 * was unable to access the hyp_vmemmap and so the buddy allocator has
265 	 * initialised the refcount to '1'.
266 	 */
267 	if (kvm_pte_valid(ctx->old))
268 		ctx->mm_ops->get_page(ctx->ptep);
269 
270 	return 0;
271 }
272 
pin_table_walker(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)273 static int pin_table_walker(const struct kvm_pgtable_visit_ctx *ctx,
274 			    enum kvm_pgtable_walk_flags visit)
275 {
276 	struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
277 	kvm_pte_t pte = *(ctx->ptep);
278 
279 	if (kvm_pte_valid(pte))
280 		mm_ops->get_page(kvm_pte_follow(pte, mm_ops));
281 
282 	return 0;
283 }
284 
pin_host_tables(void)285 static int pin_host_tables(void)
286 {
287 	struct kvm_pgtable_walker walker = {
288 		.cb	= pin_table_walker,
289 		.flags	= KVM_PGTABLE_WALK_TABLE_POST,
290 		.arg	= &host_mmu.mm_ops,
291 	};
292 
293 	return kvm_pgtable_walk(&host_mmu.pgt, 0, BIT(host_mmu.pgt.ia_bits), &walker);
294 }
295 
fix_host_ownership(void)296 static int fix_host_ownership(void)
297 {
298 	struct kvm_pgtable_walker walker = {
299 		.cb	= fix_host_ownership_walker,
300 		.flags	= KVM_PGTABLE_WALK_LEAF,
301 	};
302 	int i, ret;
303 
304 	for (i = 0; i < hyp_memblock_nr; i++) {
305 		struct memblock_region *reg = &hyp_memory[i];
306 		u64 start = (u64)hyp_phys_to_virt(reg->base);
307 
308 		ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
309 		if (ret)
310 			return ret;
311 	}
312 
313 	return 0;
314 }
315 
fix_hyp_pgtable_refcnt(void)316 static int fix_hyp_pgtable_refcnt(void)
317 {
318 	struct kvm_pgtable_walker walker = {
319 		.cb	= fix_hyp_pgtable_refcnt_walker,
320 		.flags	= KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
321 		.arg	= pkvm_pgtable.mm_ops,
322 	};
323 
324 	return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
325 				&walker);
326 }
327 
unmap_protected_regions(void)328 static int unmap_protected_regions(void)
329 {
330 	struct pkvm_moveable_reg *reg;
331 	int i, ret;
332 
333 	for (i = 0; i < pkvm_moveable_regs_nr; i++) {
334 		reg = &pkvm_moveable_regs[i];
335 		if (reg->type != PKVM_MREG_PROTECTED_RANGE)
336 			continue;
337 
338 		ret = host_stage2_set_owner_locked(reg->start, reg->size,
339 						   PKVM_ID_PROTECTED);
340 		if (ret)
341 			return ret;
342 	}
343 
344 	return 0;
345 }
346 
__pkvm_init_finalise(void)347 void __noreturn __pkvm_init_finalise(void)
348 {
349 	struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt);
350 	unsigned long nr_pages, reserved_pages, pfn;
351 	int ret;
352 
353 	/* Now that the vmemmap is backed, install the full-fledged allocator */
354 	pfn = hyp_virt_to_pfn(hyp_pgt_base);
355 	nr_pages = hyp_s1_pgtable_pages();
356 	reserved_pages = hyp_early_alloc_nr_used_pages();
357 	ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
358 	if (ret)
359 		goto out;
360 
361 	ret = kvm_host_prepare_stage2(host_s2_pgt_base);
362 	if (ret)
363 		goto out;
364 
365 	pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
366 		.zalloc_page = hyp_zalloc_hyp_page,
367 		.phys_to_virt = hyp_phys_to_virt,
368 		.virt_to_phys = hyp_virt_to_phys,
369 		.get_page = hpool_get_page,
370 		.put_page = hpool_put_page,
371 		.page_count = hyp_page_count,
372 	};
373 	pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
374 
375 	ret = fix_hyp_pgtable_refcnt();
376 	if (ret)
377 		goto out;
378 
379 	ret = hyp_create_fixmap();
380 	if (ret)
381 		goto out;
382 
383 	ret = pkvm_timer_init();
384 	if (ret)
385 		goto out;
386 
387 	hyp_ftrace_setup_core();
388 
389 	ret = fix_host_ownership();
390 	if (ret)
391 		goto out;
392 
393 	ret = unmap_protected_regions();
394 	if (ret)
395 		goto out;
396 
397 	ret = pin_host_tables();
398 	if (ret)
399 		goto out;
400 
401 	ret = hyp_ffa_init(ffa_proxy_pages);
402 	if (ret)
403 		goto out;
404 
405 	pkvm_hyp_vm_table_init(vm_table_base);
406 
407 	pkvm_ownership_selftest(selftest_base);
408 out:
409 	/*
410 	 * We tail-called to here from handle___pkvm_init() and will not return,
411 	 * so make sure to propagate the return value to the host.
412 	 */
413 	cpu_reg(host_ctxt, 1) = ret;
414 
415 	__host_enter(host_ctxt);
416 }
417 
__pkvm_init(phys_addr_t phys,unsigned long size,unsigned long nr_cpus,unsigned long * per_cpu_base,u32 hyp_va_bits)418 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
419 		unsigned long *per_cpu_base, u32 hyp_va_bits)
420 {
421 	struct kvm_nvhe_init_params *params;
422 	void *virt = hyp_phys_to_virt(phys);
423 	typeof(__pkvm_init_switch_pgd) *fn;
424 	int ret;
425 
426 	BUG_ON(kvm_check_pvm_sysreg_table());
427 
428 	if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
429 		return -EINVAL;
430 
431 	hyp_spin_lock_init(&pkvm_pgd_lock);
432 	hyp_nr_cpus = nr_cpus;
433 
434 	ret = divide_memory_pool(virt, size);
435 	if (ret)
436 		return ret;
437 
438 	ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
439 	if (ret)
440 		return ret;
441 
442 	ret = hyp_alloc_init(SZ_128M);
443 	if (ret)
444 		return ret;
445 
446 	update_nvhe_init_params();
447 
448 	/* Jump in the idmap page to switch to the new page-tables */
449 	params = this_cpu_ptr(&kvm_init_params);
450 	fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
451 	fn(params->pgd_pa, params->stack_hyp_va, __pkvm_init_finalise);
452 
453 	unreachable();
454 }
455