• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_emulate.h>
9 #include <asm/kvm_hyp.h>
10 #include <asm/kvm_mmu.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
13 
14 #include <hyp/switch.h>
15 
16 #include <nvhe/gfp.h>
17 #include <nvhe/memory.h>
18 #include <nvhe/mem_protect.h>
19 #include <nvhe/mm.h>
20 
21 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
22 
23 extern unsigned long hyp_nr_cpus;
24 struct host_kvm host_kvm;
25 
26 static struct hyp_pool host_s2_mem;
27 static struct hyp_pool host_s2_dev;
28 
29 /*
30  * Copies of the host's CPU features registers holding sanitized values.
31  */
32 u64 id_aa64mmfr0_el1_sys_val;
33 u64 id_aa64mmfr1_el1_sys_val;
34 
35 static const u8 pkvm_hyp_id = 1;
36 
host_s2_zalloc_pages_exact(size_t size)37 static void *host_s2_zalloc_pages_exact(size_t size)
38 {
39 	void *addr = hyp_alloc_pages(&host_s2_mem, get_order(size));
40 
41 	hyp_split_page(hyp_virt_to_page(addr));
42 
43 	/*
44 	 * The size of concatenated PGDs is always a power of two of PAGE_SIZE,
45 	 * so there should be no need to free any of the tail pages to make the
46 	 * allocation exact.
47 	 */
48 	WARN_ON(size != (PAGE_SIZE << get_order(size)));
49 
50 	return addr;
51 }
52 
host_s2_zalloc_page(void * pool)53 static void *host_s2_zalloc_page(void *pool)
54 {
55 	return hyp_alloc_pages(pool, 0);
56 }
57 
prepare_s2_pools(void * mem_pgt_pool,void * dev_pgt_pool)58 static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
59 {
60 	unsigned long nr_pages, pfn;
61 	int ret;
62 
63 	pfn = hyp_virt_to_pfn(mem_pgt_pool);
64 	nr_pages = host_s2_mem_pgtable_pages();
65 	ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0);
66 	if (ret)
67 		return ret;
68 
69 	pfn = hyp_virt_to_pfn(dev_pgt_pool);
70 	nr_pages = host_s2_dev_pgtable_pages();
71 	ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0);
72 	if (ret)
73 		return ret;
74 
75 	host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
76 		.zalloc_pages_exact = host_s2_zalloc_pages_exact,
77 		.zalloc_page = host_s2_zalloc_page,
78 		.phys_to_virt = hyp_phys_to_virt,
79 		.virt_to_phys = hyp_virt_to_phys,
80 		.page_count = hyp_page_count,
81 		.get_page = hyp_get_page,
82 		.put_page = hyp_put_page,
83 	};
84 
85 	return 0;
86 }
87 
prepare_host_vtcr(void)88 static void prepare_host_vtcr(void)
89 {
90 	u32 parange, phys_shift;
91 
92 	/* The host stage 2 is id-mapped, so use parange for T0SZ */
93 	parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
94 	phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
95 
96 	host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
97 					  id_aa64mmfr1_el1_sys_val, phys_shift);
98 }
99 
kvm_host_prepare_stage2(void * mem_pgt_pool,void * dev_pgt_pool)100 int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
101 {
102 	struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
103 	int ret;
104 
105 	prepare_host_vtcr();
106 	hyp_spin_lock_init(&host_kvm.lock);
107 
108 	ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool);
109 	if (ret)
110 		return ret;
111 
112 	ret = kvm_pgtable_stage2_init_flags(&host_kvm.pgt, &host_kvm.arch,
113 					    &host_kvm.mm_ops, KVM_HOST_S2_FLAGS);
114 	if (ret)
115 		return ret;
116 
117 	mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
118 	mmu->arch = &host_kvm.arch;
119 	mmu->pgt = &host_kvm.pgt;
120 	mmu->vmid.vmid_gen = 0;
121 	mmu->vmid.vmid = 0;
122 
123 	return 0;
124 }
125 
__pkvm_prot_finalize(void)126 int __pkvm_prot_finalize(void)
127 {
128 	struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
129 	struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
130 
131 	params->vttbr = kvm_get_vttbr(mmu);
132 	params->vtcr = host_kvm.arch.vtcr;
133 	params->hcr_el2 |= HCR_VM;
134 	kvm_flush_dcache_to_poc(params, sizeof(*params));
135 
136 	write_sysreg(params->hcr_el2, hcr_el2);
137 	__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
138 
139 	/*
140 	 * Make sure to have an ISB before the TLB maintenance below but only
141 	 * when __load_stage2() doesn't include one already.
142 	 */
143 	asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
144 
145 	/* Invalidate stale HCR bits that may be cached in TLBs */
146 	__tlbi(vmalls12e1);
147 	dsb(nsh);
148 	isb();
149 
150 	return 0;
151 }
152 
host_stage2_unmap_dev_all(void)153 static int host_stage2_unmap_dev_all(void)
154 {
155 	struct kvm_pgtable *pgt = &host_kvm.pgt;
156 	struct memblock_region *reg;
157 	u64 addr = 0;
158 	int i, ret;
159 
160 	/* Unmap all non-memory regions to recycle the pages */
161 	for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
162 		reg = &hyp_memory[i];
163 		ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
164 		if (ret)
165 			return ret;
166 	}
167 	return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
168 }
169 
find_mem_range(phys_addr_t addr,struct kvm_mem_range * range)170 static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
171 {
172 	int cur, left = 0, right = hyp_memblock_nr;
173 	struct memblock_region *reg;
174 	phys_addr_t end;
175 
176 	range->start = 0;
177 	range->end = ULONG_MAX;
178 
179 	/* The list of memblock regions is sorted, binary search it */
180 	while (left < right) {
181 		cur = (left + right) >> 1;
182 		reg = &hyp_memory[cur];
183 		end = reg->base + reg->size;
184 		if (addr < reg->base) {
185 			right = cur;
186 			range->end = reg->base;
187 		} else if (addr >= end) {
188 			left = cur + 1;
189 			range->start = end;
190 		} else {
191 			range->start = reg->base;
192 			range->end = end;
193 			return true;
194 		}
195 	}
196 
197 	return false;
198 }
199 
range_is_memory(u64 start,u64 end)200 static bool range_is_memory(u64 start, u64 end)
201 {
202 	struct kvm_mem_range r1, r2;
203 
204 	if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2))
205 		return false;
206 	if (r1.start != r2.start)
207 		return false;
208 
209 	return true;
210 }
211 
__host_stage2_idmap(u64 start,u64 end,enum kvm_pgtable_prot prot,struct hyp_pool * pool)212 static inline int __host_stage2_idmap(u64 start, u64 end,
213 				      enum kvm_pgtable_prot prot,
214 				      struct hyp_pool *pool)
215 {
216 	return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
217 				      prot, pool);
218 }
219 
host_stage2_idmap(u64 addr)220 static int host_stage2_idmap(u64 addr)
221 {
222 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
223 	struct kvm_mem_range range;
224 	bool is_memory = find_mem_range(addr, &range);
225 	struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev;
226 	int ret;
227 
228 	if (is_memory)
229 		prot |= KVM_PGTABLE_PROT_X;
230 
231 	hyp_spin_lock(&host_kvm.lock);
232 	ret = kvm_pgtable_stage2_find_range(&host_kvm.pgt, addr, prot, &range);
233 	if (ret)
234 		goto unlock;
235 
236 	ret = __host_stage2_idmap(range.start, range.end, prot, pool);
237 	if (is_memory || ret != -ENOMEM)
238 		goto unlock;
239 
240 	/*
241 	 * host_s2_mem has been provided with enough pages to cover all of
242 	 * memory with page granularity, so we should never hit the ENOMEM case.
243 	 * However, it is difficult to know how much of the MMIO range we will
244 	 * need to cover upfront, so we may need to 'recycle' the pages if we
245 	 * run out.
246 	 */
247 	ret = host_stage2_unmap_dev_all();
248 	if (ret)
249 		goto unlock;
250 
251 	ret = __host_stage2_idmap(range.start, range.end, prot, pool);
252 
253 unlock:
254 	hyp_spin_unlock(&host_kvm.lock);
255 
256 	return ret;
257 }
258 
__pkvm_mark_hyp(phys_addr_t start,phys_addr_t end)259 int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
260 {
261 	int ret;
262 
263 	/*
264 	 * host_stage2_unmap_dev_all() currently relies on MMIO mappings being
265 	 * non-persistent, so don't allow changing page ownership in MMIO range.
266 	 */
267 	if (!range_is_memory(start, end))
268 		return -EINVAL;
269 
270 	hyp_spin_lock(&host_kvm.lock);
271 	ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
272 					   &host_s2_mem, pkvm_hyp_id);
273 	hyp_spin_unlock(&host_kvm.lock);
274 
275 	return ret != -EAGAIN ? ret : 0;
276 }
277 
handle_host_mem_abort(struct kvm_cpu_context * host_ctxt)278 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
279 {
280 	struct kvm_vcpu_fault_info fault;
281 	u64 esr, addr;
282 	int ret = 0;
283 
284 	esr = read_sysreg_el2(SYS_ESR);
285 	if (!__get_fault_info(esr, &fault))
286 		hyp_panic();
287 
288 	addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
289 	ret = host_stage2_idmap(addr);
290 	if (ret && ret != -EAGAIN)
291 		hyp_panic();
292 }
293