• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11 #include <asm/kvm_pkvm.h>
12 
13 #include <nvhe/early_alloc.h>
14 #include <nvhe/ffa.h>
15 #include <nvhe/gfp.h>
16 #include <nvhe/iommu.h>
17 #include <nvhe/memory.h>
18 #include <nvhe/mem_protect.h>
19 #include <nvhe/mm.h>
20 #include <nvhe/pkvm.h>
21 #include <nvhe/trap_handler.h>
22 
23 unsigned long hyp_nr_cpus;
24 
25 phys_addr_t pvmfw_base;
26 phys_addr_t pvmfw_size;
27 
28 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
29 			 (unsigned long)__per_cpu_start)
30 
31 static void *vmemmap_base;
32 static void *shadow_table_base;
33 static void *hyp_pgt_base;
34 static void *host_s2_pgt_base;
35 static void *ffa_proxy_pages;
36 static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
37 static struct hyp_pool hpool;
38 
divide_memory_pool(void * virt,unsigned long size)39 static int divide_memory_pool(void *virt, unsigned long size)
40 {
41 	unsigned long nr_pages;
42 
43 	hyp_early_alloc_init(virt, size);
44 
45 	nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
46 	vmemmap_base = hyp_early_alloc_contig(nr_pages);
47 	if (!vmemmap_base)
48 		return -ENOMEM;
49 
50 	nr_pages = hyp_shadow_table_pages(sizeof(struct kvm_shadow_vm));
51 	shadow_table_base = hyp_early_alloc_contig(nr_pages);
52 	if (!shadow_table_base)
53 		return -ENOMEM;
54 
55 	nr_pages = hyp_s1_pgtable_pages();
56 	hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
57 	if (!hyp_pgt_base)
58 		return -ENOMEM;
59 
60 	nr_pages = host_s2_pgtable_pages();
61 	host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
62 	if (!host_s2_pgt_base)
63 		return -ENOMEM;
64 
65 	nr_pages = hyp_ffa_proxy_pages();
66 	ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
67 	if (!ffa_proxy_pages)
68 		return -ENOMEM;
69 
70 	return 0;
71 }
72 
recreate_hyp_mappings(phys_addr_t phys,unsigned long size,unsigned long * per_cpu_base,u32 hyp_va_bits)73 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
74 				 unsigned long *per_cpu_base,
75 				 u32 hyp_va_bits)
76 {
77 	void *start, *end, *virt = hyp_phys_to_virt(phys);
78 	unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
79 	enum kvm_pgtable_prot prot;
80 	int ret, i;
81 
82 	/* Recreate the hyp page-table using the early page allocator */
83 	hyp_early_alloc_init(hyp_pgt_base, pgt_size);
84 	ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
85 				   &hyp_early_alloc_mm_ops);
86 	if (ret)
87 		return ret;
88 
89 	ret = hyp_create_idmap(hyp_va_bits);
90 	if (ret)
91 		return ret;
92 
93 	ret = hyp_map_vectors();
94 	if (ret)
95 		return ret;
96 
97 	ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
98 	if (ret)
99 		return ret;
100 
101 	ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
102 	if (ret)
103 		return ret;
104 
105 	ret = pkvm_create_mappings(__hyp_data_start, __hyp_data_end, PAGE_HYP);
106 	if (ret)
107 		return ret;
108 
109 	ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
110 	if (ret)
111 		return ret;
112 
113 	ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
114 	if (ret)
115 		return ret;
116 
117 	ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
118 	if (ret)
119 		return ret;
120 
121 	for (i = 0; i < hyp_nr_cpus; i++) {
122 		start = (void *)kern_hyp_va(per_cpu_base[i]);
123 		end = start + PAGE_ALIGN(hyp_percpu_size);
124 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
125 		if (ret)
126 			return ret;
127 
128 		end = (void *)per_cpu_ptr(&kvm_init_params, i)->stack_hyp_va;
129 		start = end - PAGE_SIZE;
130 		ret = pkvm_create_mappings(start, end, PAGE_HYP);
131 		if (ret)
132 			return ret;
133 	}
134 
135 	/*
136 	 * Map the host sections RO in the hypervisor, but transfer the
137 	 * ownership from the host to the hypervisor itself to make sure they
138 	 * can't be donated or shared with another entity.
139 	 *
140 	 * The ownership transition requires matching changes in the host
141 	 * stage-2. This will be done later (see finalize_host_mappings()) once
142 	 * the hyp_vmemmap is addressable.
143 	 */
144 	prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
145 	ret = pkvm_create_mappings(&kvm_vgic_global_state, &kvm_vgic_global_state + 1, prot);
146 	if (ret)
147 		return ret;
148 
149 	start = hyp_phys_to_virt(pvmfw_base);
150 	end = start + pvmfw_size;
151 	prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
152 	ret = pkvm_create_mappings(start, end, prot);
153 	if (ret)
154 		return ret;
155 
156 	return 0;
157 }
158 
update_nvhe_init_params(void)159 static void update_nvhe_init_params(void)
160 {
161 	struct kvm_nvhe_init_params *params;
162 	unsigned long i;
163 
164 	for (i = 0; i < hyp_nr_cpus; i++) {
165 		params = per_cpu_ptr(&kvm_init_params, i);
166 		params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
167 		dcache_clean_inval_poc((unsigned long)params,
168 				    (unsigned long)params + sizeof(*params));
169 	}
170 }
171 
hyp_zalloc_hyp_page(void * arg)172 static void *hyp_zalloc_hyp_page(void *arg)
173 {
174 	return hyp_alloc_pages(&hpool, 0);
175 }
176 
hpool_get_page(void * addr)177 static void hpool_get_page(void *addr)
178 {
179 	hyp_get_page(&hpool, addr);
180 }
181 
hpool_put_page(void * addr)182 static void hpool_put_page(void *addr)
183 {
184 	hyp_put_page(&hpool, addr);
185 }
186 
fix_host_ownership_walker(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)187 static int fix_host_ownership_walker(u64 addr, u64 end, u32 level,
188 				     kvm_pte_t *ptep,
189 				     enum kvm_pgtable_walk_flags flag,
190 				     void * const arg)
191 {
192 	enum kvm_pgtable_prot prot;
193 	enum pkvm_page_state state;
194 	kvm_pte_t pte = *ptep;
195 	phys_addr_t phys;
196 
197 	if (!kvm_pte_valid(pte))
198 		return 0;
199 
200 	if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
201 		return -EINVAL;
202 
203 	phys = kvm_pte_to_phys(pte);
204 	if (!addr_is_memory(phys))
205 		return -EINVAL;
206 
207 	/*
208 	 * Adjust the host stage-2 mappings to match the ownership attributes
209 	 * configured in the hypervisor stage-1.
210 	 */
211 	state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
212 	switch (state) {
213 	case PKVM_PAGE_OWNED:
214 		return host_stage2_set_owner_locked(phys, PAGE_SIZE, pkvm_hyp_id);
215 	case PKVM_PAGE_SHARED_OWNED:
216 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
217 		break;
218 	case PKVM_PAGE_SHARED_BORROWED:
219 		prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
220 		break;
221 	default:
222 		return -EINVAL;
223 	}
224 
225 	return host_stage2_idmap_locked(phys, PAGE_SIZE, prot, false);
226 }
227 
fix_hyp_pgtable_refcnt_walker(u64 addr,u64 end,u32 level,kvm_pte_t * ptep,enum kvm_pgtable_walk_flags flag,void * const arg)228 static int fix_hyp_pgtable_refcnt_walker(u64 addr, u64 end, u32 level,
229 					 kvm_pte_t *ptep,
230 					 enum kvm_pgtable_walk_flags flag,
231 					 void * const arg)
232 {
233 	struct kvm_pgtable_mm_ops *mm_ops = arg;
234 	kvm_pte_t pte = *ptep;
235 
236 	/*
237 	 * Fix-up the refcount for the page-table pages as the early allocator
238 	 * was unable to access the hyp_vmemmap and so the buddy allocator has
239 	 * initialised the refcount to '1'.
240 	 */
241 	if (kvm_pte_valid(pte))
242 		mm_ops->get_page(ptep);
243 
244 	return 0;
245 }
246 
fix_host_ownership(void)247 static int fix_host_ownership(void)
248 {
249 	struct kvm_pgtable_walker walker = {
250 		.cb	= fix_host_ownership_walker,
251 		.flags	= KVM_PGTABLE_WALK_LEAF,
252 	};
253 	int i, ret;
254 
255 	for (i = 0; i < hyp_memblock_nr; i++) {
256 		struct memblock_region *reg = &hyp_memory[i];
257 		u64 start = (u64)hyp_phys_to_virt(reg->base);
258 
259 		ret = kvm_pgtable_walk(&pkvm_pgtable, start, reg->size, &walker);
260 		if (ret)
261 			return ret;
262 	}
263 
264 	return 0;
265 }
266 
fix_hyp_pgtable_refcnt(void)267 static int fix_hyp_pgtable_refcnt(void)
268 {
269 	struct kvm_pgtable_walker walker = {
270 		.cb	= fix_hyp_pgtable_refcnt_walker,
271 		.flags	= KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
272 		.arg	= pkvm_pgtable.mm_ops,
273 	};
274 
275 	return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
276 				&walker);
277 }
278 
__pkvm_init_finalise(void)279 void __noreturn __pkvm_init_finalise(void)
280 {
281 	struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
282 	struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
283 	unsigned long nr_pages, reserved_pages, pfn;
284 	int ret;
285 
286 	/* Now that the vmemmap is backed, install the full-fledged allocator */
287 	pfn = hyp_virt_to_pfn(hyp_pgt_base);
288 	nr_pages = hyp_s1_pgtable_pages();
289 	reserved_pages = hyp_early_alloc_nr_used_pages();
290 	ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
291 	if (ret)
292 		goto out;
293 
294 	ret = kvm_host_prepare_stage2(host_s2_pgt_base);
295 	if (ret)
296 		goto out;
297 
298 	pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
299 		.zalloc_page = hyp_zalloc_hyp_page,
300 		.phys_to_virt = hyp_phys_to_virt,
301 		.virt_to_phys = hyp_virt_to_phys,
302 		.get_page = hpool_get_page,
303 		.put_page = hpool_put_page,
304 		.page_count = hyp_page_count,
305 	};
306 	pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
307 
308 	ret = fix_host_ownership();
309 	if (ret)
310 		goto out;
311 
312 	ret = fix_hyp_pgtable_refcnt();
313 	if (ret)
314 		goto out;
315 
316 	ret = hyp_create_pcpu_fixmap();
317 	if (ret)
318 		goto out;
319 
320 	ret = hyp_ffa_init(ffa_proxy_pages);
321 	if (ret)
322 		goto out;
323 
324 	hyp_shadow_table_init(shadow_table_base);
325 out:
326 	/*
327 	 * We tail-called to here from handle___pkvm_init() and will not return,
328 	 * so make sure to propagate the return value to the host.
329 	 */
330 	cpu_reg(host_ctxt, 1) = ret;
331 
332 	__host_enter(host_ctxt);
333 }
334 
__pkvm_init(phys_addr_t phys,unsigned long size,unsigned long nr_cpus,unsigned long * per_cpu_base,u32 hyp_va_bits)335 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
336 		unsigned long *per_cpu_base, u32 hyp_va_bits)
337 {
338 	struct kvm_nvhe_init_params *params;
339 	void *virt = hyp_phys_to_virt(phys);
340 	void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
341 	int ret;
342 
343 	BUG_ON(kvm_check_pvm_sysreg_table());
344 
345 	if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
346 		return -EINVAL;
347 
348 	hyp_spin_lock_init(&pkvm_pgd_lock);
349 	hyp_nr_cpus = nr_cpus;
350 
351 	ret = divide_memory_pool(virt, size);
352 	if (ret)
353 		return ret;
354 
355 	ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
356 	if (ret)
357 		return ret;
358 
359 	update_nvhe_init_params();
360 
361 	/* Jump in the idmap page to switch to the new page-tables */
362 	params = this_cpu_ptr(&kvm_init_params);
363 	fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
364 	fn(__hyp_pa(params), __pkvm_init_finalise);
365 
366 	unreachable();
367 }
368