1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * AArch64 code
4 *
5 * Copyright (C) 2018, Red Hat, Inc.
6 */
7
8 #define _GNU_SOURCE /* for program_invocation_name */
9
10 #include <linux/compiler.h>
11
12 #include "kvm_util.h"
13 #include "../kvm_util_internal.h"
14 #include "processor.h"
15
16 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
17 #define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
18
page_align(struct kvm_vm * vm,uint64_t v)19 static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
20 {
21 return (v + vm->page_size) & ~(vm->page_size - 1);
22 }
23
pgd_index(struct kvm_vm * vm,vm_vaddr_t gva)24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
25 {
26 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
27 uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
28
29 return (gva >> shift) & mask;
30 }
31
pud_index(struct kvm_vm * vm,vm_vaddr_t gva)32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
33 {
34 unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
35 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
36
37 TEST_ASSERT(vm->pgtable_levels == 4,
38 "Mode %d does not have 4 page table levels", vm->mode);
39
40 return (gva >> shift) & mask;
41 }
42
pmd_index(struct kvm_vm * vm,vm_vaddr_t gva)43 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
44 {
45 unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
46 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
47
48 TEST_ASSERT(vm->pgtable_levels >= 3,
49 "Mode %d does not have >= 3 page table levels", vm->mode);
50
51 return (gva >> shift) & mask;
52 }
53
pte_index(struct kvm_vm * vm,vm_vaddr_t gva)54 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
55 {
56 uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
57 return (gva >> vm->page_shift) & mask;
58 }
59
pte_addr(struct kvm_vm * vm,uint64_t entry)60 static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
61 {
62 uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
63 return entry & mask;
64 }
65
ptrs_per_pgd(struct kvm_vm * vm)66 static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
67 {
68 unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
69 return 1 << (vm->va_bits - shift);
70 }
71
ptrs_per_pte(struct kvm_vm * vm)72 static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
73 {
74 return 1 << (vm->page_shift - 3);
75 }
76
virt_pgd_alloc(struct kvm_vm * vm,uint32_t pgd_memslot)77 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
78 {
79 if (!vm->pgd_created) {
80 vm_paddr_t paddr = vm_phy_pages_alloc(vm,
81 page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
82 KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
83 vm->pgd = paddr;
84 vm->pgd_created = true;
85 }
86 }
87
_virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,uint32_t pgd_memslot,uint64_t flags)88 void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
89 uint32_t pgd_memslot, uint64_t flags)
90 {
91 uint8_t attr_idx = flags & 7;
92 uint64_t *ptep;
93
94 TEST_ASSERT((vaddr % vm->page_size) == 0,
95 "Virtual address not on page boundary,\n"
96 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
97 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
98 (vaddr >> vm->page_shift)),
99 "Invalid virtual address, vaddr: 0x%lx", vaddr);
100 TEST_ASSERT((paddr % vm->page_size) == 0,
101 "Physical address not on page boundary,\n"
102 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
103 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
104 "Physical address beyond beyond maximum supported,\n"
105 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
106 paddr, vm->max_gfn, vm->page_size);
107
108 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
109 if (!*ptep) {
110 *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
111 *ptep |= 3;
112 }
113
114 switch (vm->pgtable_levels) {
115 case 4:
116 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
117 if (!*ptep) {
118 *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
119 *ptep |= 3;
120 }
121 /* fall through */
122 case 3:
123 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
124 if (!*ptep) {
125 *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
126 *ptep |= 3;
127 }
128 /* fall through */
129 case 2:
130 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
131 break;
132 default:
133 TEST_FAIL("Page table levels must be 2, 3, or 4");
134 }
135
136 *ptep = paddr | 3;
137 *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
138 }
139
virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,uint32_t pgd_memslot)140 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
141 uint32_t pgd_memslot)
142 {
143 uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
144
145 _virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx);
146 }
147
addr_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)148 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
149 {
150 uint64_t *ptep;
151
152 if (!vm->pgd_created)
153 goto unmapped_gva;
154
155 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
156 if (!ptep)
157 goto unmapped_gva;
158
159 switch (vm->pgtable_levels) {
160 case 4:
161 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
162 if (!ptep)
163 goto unmapped_gva;
164 /* fall through */
165 case 3:
166 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
167 if (!ptep)
168 goto unmapped_gva;
169 /* fall through */
170 case 2:
171 ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
172 if (!ptep)
173 goto unmapped_gva;
174 break;
175 default:
176 TEST_FAIL("Page table levels must be 2, 3, or 4");
177 }
178
179 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
180
181 unmapped_gva:
182 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
183 exit(1);
184 }
185
pte_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t page,int level)186 static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
187 {
188 #ifdef DEBUG
189 static const char * const type[] = { "", "pud", "pmd", "pte" };
190 uint64_t pte, *ptep;
191
192 if (level == 4)
193 return;
194
195 for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
196 ptep = addr_gpa2hva(vm, pte);
197 if (!*ptep)
198 continue;
199 fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
200 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
201 }
202 #endif
203 }
204
virt_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)205 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
206 {
207 int level = 4 - (vm->pgtable_levels - 1);
208 uint64_t pgd, *ptep;
209
210 if (!vm->pgd_created)
211 return;
212
213 for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
214 ptep = addr_gpa2hva(vm, pgd);
215 if (!*ptep)
216 continue;
217 fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
218 pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
219 }
220 }
221
vm_create_default(uint32_t vcpuid,uint64_t extra_mem_pages,void * guest_code)222 struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
223 void *guest_code)
224 {
225 uint64_t ptrs_per_4k_pte = 512;
226 uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2;
227 struct kvm_vm *vm;
228
229 vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
230
231 kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
232 vm_vcpu_add_default(vm, vcpuid, guest_code);
233
234 return vm;
235 }
236
aarch64_vcpu_setup(struct kvm_vm * vm,int vcpuid,struct kvm_vcpu_init * init)237 void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init)
238 {
239 struct kvm_vcpu_init default_init = { .target = -1, };
240 uint64_t sctlr_el1, tcr_el1;
241
242 if (!init)
243 init = &default_init;
244
245 if (init->target == -1) {
246 struct kvm_vcpu_init preferred;
247 vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
248 init->target = preferred.target;
249 }
250
251 vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, init);
252
253 /*
254 * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
255 * registers, which the variable argument list macros do.
256 */
257 set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20);
258
259 get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1);
260 get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1);
261
262 switch (vm->mode) {
263 case VM_MODE_P52V48_4K:
264 TEST_FAIL("AArch64 does not support 4K sized pages "
265 "with 52-bit physical address ranges");
266 case VM_MODE_PXXV48_4K:
267 TEST_FAIL("AArch64 does not support 4K sized pages "
268 "with ANY-bit physical address ranges");
269 case VM_MODE_P52V48_64K:
270 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
271 tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
272 break;
273 case VM_MODE_P48V48_4K:
274 tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
275 tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
276 break;
277 case VM_MODE_P48V48_64K:
278 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
279 tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
280 break;
281 case VM_MODE_P40V48_4K:
282 tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
283 tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
284 break;
285 case VM_MODE_P40V48_64K:
286 tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
287 tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
288 break;
289 default:
290 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
291 }
292
293 sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
294 /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
295 tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
296 tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
297
298 set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1);
299 set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1);
300 set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1);
301 set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd);
302 }
303
vcpu_dump(FILE * stream,struct kvm_vm * vm,uint32_t vcpuid,uint8_t indent)304 void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
305 {
306 uint64_t pstate, pc;
307
308 get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
309 get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
310
311 fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
312 indent, "", pstate, pc);
313 }
314
aarch64_vcpu_add_default(struct kvm_vm * vm,uint32_t vcpuid,struct kvm_vcpu_init * init,void * guest_code)315 void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
316 struct kvm_vcpu_init *init, void *guest_code)
317 {
318 size_t stack_size = vm->page_size == 4096 ?
319 DEFAULT_STACK_PGS * vm->page_size :
320 vm->page_size;
321 uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
322 DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 0, 0);
323
324 vm_vcpu_add(vm, vcpuid);
325 aarch64_vcpu_setup(vm, vcpuid, init);
326
327 set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
328 set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
329 }
330
vm_vcpu_add_default(struct kvm_vm * vm,uint32_t vcpuid,void * guest_code)331 void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
332 {
333 aarch64_vcpu_add_default(vm, vcpuid, NULL, guest_code);
334 }
335
vcpu_args_set(struct kvm_vm * vm,uint32_t vcpuid,unsigned int num,...)336 void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
337 {
338 va_list ap;
339 int i;
340
341 TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
342 " num: %u\n", num);
343
344 va_start(ap, num);
345
346 for (i = 0; i < num; i++) {
347 set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]),
348 va_arg(ap, uint64_t));
349 }
350
351 va_end(ap);
352 }
353
assert_on_unhandled_exception(struct kvm_vm * vm,uint32_t vcpuid)354 void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
355 {
356 }
357