1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Check for KVM_GET_REG_LIST regressions.
4 *
5 * Copyright (C) 2020, Red Hat, Inc.
6 *
7 * When attempting to migrate from a host with an older kernel to a host
8 * with a newer kernel we allow the newer kernel on the destination to
9 * list new registers with get-reg-list. We assume they'll be unused, at
10 * least until the guest reboots, and so they're relatively harmless.
11 * However, if the destination host with the newer kernel is missing
12 * registers which the source host with the older kernel has, then that's
13 * a regression in get-reg-list. This test checks for that regression by
14 * checking the current list against a blessed list. We should never have
15 * missing registers, but if new ones appear then they can probably be
16 * added to the blessed list. A completely new blessed list can be created
17 * by running the test with the --list command line argument.
18 *
19 * Note, the blessed list should be created from the oldest possible
20 * kernel. We can't go older than v4.15, though, because that's the first
21 * release to expose the ID system registers in KVM_GET_REG_LIST, see
22 * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features
23 * from guests"). Also, one must use the --core-reg-fixup command line
24 * option when running on an older kernel that doesn't include df205b5c6328
25 * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST")
26 */
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 #include "kvm_util.h"
31 #include "test_util.h"
32 #include "processor.h"
33
34 #ifdef REG_LIST_SVE
35 #define reg_list_sve() (true)
36 #else
37 #define reg_list_sve() (false)
38 #endif
39
40 #define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
41
42 #define for_each_reg(i) \
43 for ((i) = 0; (i) < reg_list->n; ++(i))
44
45 #define for_each_missing_reg(i) \
46 for ((i) = 0; (i) < blessed_n; ++(i)) \
47 if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i]))
48
49 #define for_each_new_reg(i) \
50 for ((i) = 0; (i) < reg_list->n; ++(i)) \
51 if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
52
53
54 static struct kvm_reg_list *reg_list;
55
56 static __u64 base_regs[], vregs[], sve_regs[], rejects_set[];
57 static __u64 base_regs_n, vregs_n, sve_regs_n, rejects_set_n;
58 static __u64 *blessed_reg, blessed_n;
59
find_reg(__u64 regs[],__u64 nr_regs,__u64 reg)60 static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
61 {
62 int i;
63
64 for (i = 0; i < nr_regs; ++i)
65 if (reg == regs[i])
66 return true;
67 return false;
68 }
69
str_with_index(const char * template,__u64 index)70 static const char *str_with_index(const char *template, __u64 index)
71 {
72 char *str, *p;
73 int n;
74
75 str = strdup(template);
76 p = strstr(str, "##");
77 n = sprintf(p, "%lld", index);
78 strcat(p + n, strstr(template, "##") + 2);
79
80 return (const char *)str;
81 }
82
83 #define CORE_REGS_XX_NR_WORDS 2
84 #define CORE_SPSR_XX_NR_WORDS 2
85 #define CORE_FPREGS_XX_NR_WORDS 4
86
core_id_to_str(__u64 id)87 static const char *core_id_to_str(__u64 id)
88 {
89 __u64 core_off = id & ~REG_MASK, idx;
90
91 /*
92 * core_off is the offset into struct kvm_regs
93 */
94 switch (core_off) {
95 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
96 KVM_REG_ARM_CORE_REG(regs.regs[30]):
97 idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
98 TEST_ASSERT(idx < 31, "Unexpected regs.regs index: %lld", idx);
99 return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx);
100 case KVM_REG_ARM_CORE_REG(regs.sp):
101 return "KVM_REG_ARM_CORE_REG(regs.sp)";
102 case KVM_REG_ARM_CORE_REG(regs.pc):
103 return "KVM_REG_ARM_CORE_REG(regs.pc)";
104 case KVM_REG_ARM_CORE_REG(regs.pstate):
105 return "KVM_REG_ARM_CORE_REG(regs.pstate)";
106 case KVM_REG_ARM_CORE_REG(sp_el1):
107 return "KVM_REG_ARM_CORE_REG(sp_el1)";
108 case KVM_REG_ARM_CORE_REG(elr_el1):
109 return "KVM_REG_ARM_CORE_REG(elr_el1)";
110 case KVM_REG_ARM_CORE_REG(spsr[0]) ...
111 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
112 idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
113 TEST_ASSERT(idx < KVM_NR_SPSR, "Unexpected spsr index: %lld", idx);
114 return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx);
115 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
116 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
117 idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
118 TEST_ASSERT(idx < 32, "Unexpected fp_regs.vregs index: %lld", idx);
119 return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx);
120 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
121 return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
122 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
123 return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
124 }
125
126 TEST_FAIL("Unknown core reg id: 0x%llx", id);
127 return NULL;
128 }
129
sve_id_to_str(__u64 id)130 static const char *sve_id_to_str(__u64 id)
131 {
132 __u64 sve_off, n, i;
133
134 if (id == KVM_REG_ARM64_SVE_VLS)
135 return "KVM_REG_ARM64_SVE_VLS";
136
137 sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
138 i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
139
140 TEST_ASSERT(i == 0, "Currently we don't expect slice > 0, reg id 0x%llx", id);
141
142 switch (sve_off) {
143 case KVM_REG_ARM64_SVE_ZREG_BASE ...
144 KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
145 n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
146 TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
147 "Unexpected bits set in SVE ZREG id: 0x%llx", id);
148 return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n);
149 case KVM_REG_ARM64_SVE_PREG_BASE ...
150 KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
151 n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
152 TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
153 "Unexpected bits set in SVE PREG id: 0x%llx", id);
154 return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n);
155 case KVM_REG_ARM64_SVE_FFR_BASE:
156 TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
157 "Unexpected bits set in SVE FFR id: 0x%llx", id);
158 return "KVM_REG_ARM64_SVE_FFR(0)";
159 }
160
161 return NULL;
162 }
163
print_reg(__u64 id)164 static void print_reg(__u64 id)
165 {
166 unsigned op0, op1, crn, crm, op2;
167 const char *reg_size = NULL;
168
169 TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
170 "KVM_REG_ARM64 missing in reg id: 0x%llx", id);
171
172 switch (id & KVM_REG_SIZE_MASK) {
173 case KVM_REG_SIZE_U8:
174 reg_size = "KVM_REG_SIZE_U8";
175 break;
176 case KVM_REG_SIZE_U16:
177 reg_size = "KVM_REG_SIZE_U16";
178 break;
179 case KVM_REG_SIZE_U32:
180 reg_size = "KVM_REG_SIZE_U32";
181 break;
182 case KVM_REG_SIZE_U64:
183 reg_size = "KVM_REG_SIZE_U64";
184 break;
185 case KVM_REG_SIZE_U128:
186 reg_size = "KVM_REG_SIZE_U128";
187 break;
188 case KVM_REG_SIZE_U256:
189 reg_size = "KVM_REG_SIZE_U256";
190 break;
191 case KVM_REG_SIZE_U512:
192 reg_size = "KVM_REG_SIZE_U512";
193 break;
194 case KVM_REG_SIZE_U1024:
195 reg_size = "KVM_REG_SIZE_U1024";
196 break;
197 case KVM_REG_SIZE_U2048:
198 reg_size = "KVM_REG_SIZE_U2048";
199 break;
200 default:
201 TEST_FAIL("Unexpected reg size: 0x%llx in reg id: 0x%llx",
202 (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
203 }
204
205 switch (id & KVM_REG_ARM_COPROC_MASK) {
206 case KVM_REG_ARM_CORE:
207 printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(id));
208 break;
209 case KVM_REG_ARM_DEMUX:
210 TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
211 "Unexpected bits set in DEMUX reg id: 0x%llx", id);
212 printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
213 reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
214 break;
215 case KVM_REG_ARM64_SYSREG:
216 op0 = (id & KVM_REG_ARM64_SYSREG_OP0_MASK) >> KVM_REG_ARM64_SYSREG_OP0_SHIFT;
217 op1 = (id & KVM_REG_ARM64_SYSREG_OP1_MASK) >> KVM_REG_ARM64_SYSREG_OP1_SHIFT;
218 crn = (id & KVM_REG_ARM64_SYSREG_CRN_MASK) >> KVM_REG_ARM64_SYSREG_CRN_SHIFT;
219 crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
220 op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
221 TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
222 "Unexpected bits set in SYSREG reg id: 0x%llx", id);
223 printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
224 break;
225 case KVM_REG_ARM_FW:
226 TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
227 "Unexpected bits set in FW reg id: 0x%llx", id);
228 printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
229 break;
230 case KVM_REG_ARM64_SVE:
231 if (reg_list_sve())
232 printf("\t%s,\n", sve_id_to_str(id));
233 else
234 TEST_FAIL("KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", id);
235 break;
236 default:
237 TEST_FAIL("Unexpected coproc type: 0x%llx in reg id: 0x%llx",
238 (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
239 }
240 }
241
242 /*
243 * Older kernels listed each 32-bit word of CORE registers separately.
244 * For 64 and 128-bit registers we need to ignore the extra words. We
245 * also need to fixup the sizes, because the older kernels stated all
246 * registers were 64-bit, even when they weren't.
247 */
core_reg_fixup(void)248 static void core_reg_fixup(void)
249 {
250 struct kvm_reg_list *tmp;
251 __u64 id, core_off;
252 int i;
253
254 tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64));
255
256 for (i = 0; i < reg_list->n; ++i) {
257 id = reg_list->reg[i];
258
259 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) {
260 tmp->reg[tmp->n++] = id;
261 continue;
262 }
263
264 core_off = id & ~REG_MASK;
265
266 switch (core_off) {
267 case 0x52: case 0xd2: case 0xd6:
268 /*
269 * These offsets are pointing at padding.
270 * We need to ignore them too.
271 */
272 continue;
273 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
274 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
275 if (core_off & 3)
276 continue;
277 id &= ~KVM_REG_SIZE_MASK;
278 id |= KVM_REG_SIZE_U128;
279 tmp->reg[tmp->n++] = id;
280 continue;
281 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
282 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
283 id &= ~KVM_REG_SIZE_MASK;
284 id |= KVM_REG_SIZE_U32;
285 tmp->reg[tmp->n++] = id;
286 continue;
287 default:
288 if (core_off & 1)
289 continue;
290 tmp->reg[tmp->n++] = id;
291 break;
292 }
293 }
294
295 free(reg_list);
296 reg_list = tmp;
297 }
298
prepare_vcpu_init(struct kvm_vcpu_init * init)299 static void prepare_vcpu_init(struct kvm_vcpu_init *init)
300 {
301 if (reg_list_sve())
302 init->features[0] |= 1 << KVM_ARM_VCPU_SVE;
303 }
304
finalize_vcpu(struct kvm_vm * vm,uint32_t vcpuid)305 static void finalize_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
306 {
307 int feature;
308
309 if (reg_list_sve()) {
310 feature = KVM_ARM_VCPU_SVE;
311 vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_FINALIZE, &feature);
312 }
313 }
314
check_supported(void)315 static void check_supported(void)
316 {
317 if (reg_list_sve() && !kvm_check_cap(KVM_CAP_ARM_SVE)) {
318 fprintf(stderr, "SVE not available, skipping tests\n");
319 exit(KSFT_SKIP);
320 }
321 }
322
main(int ac,char ** av)323 int main(int ac, char **av)
324 {
325 struct kvm_vcpu_init init = { .target = -1, };
326 int new_regs = 0, missing_regs = 0, i;
327 int failed_get = 0, failed_set = 0, failed_reject = 0;
328 bool print_list = false, fixup_core_regs = false;
329 struct kvm_vm *vm;
330 __u64 *vec_regs;
331
332 check_supported();
333
334 for (i = 1; i < ac; ++i) {
335 if (strcmp(av[i], "--core-reg-fixup") == 0)
336 fixup_core_regs = true;
337 else if (strcmp(av[i], "--list") == 0)
338 print_list = true;
339 else
340 fprintf(stderr, "Ignoring unknown option: %s\n", av[i]);
341 }
342
343 vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
344 prepare_vcpu_init(&init);
345 aarch64_vcpu_add_default(vm, 0, &init, NULL);
346 finalize_vcpu(vm, 0);
347
348 reg_list = vcpu_get_reg_list(vm, 0);
349
350 if (fixup_core_regs)
351 core_reg_fixup();
352
353 if (print_list) {
354 putchar('\n');
355 for_each_reg(i)
356 print_reg(reg_list->reg[i]);
357 putchar('\n');
358 return 0;
359 }
360
361 /*
362 * We only test that we can get the register and then write back the
363 * same value. Some registers may allow other values to be written
364 * back, but others only allow some bits to be changed, and at least
365 * for ID registers set will fail if the value does not exactly match
366 * what was returned by get. If registers that allow other values to
367 * be written need to have the other values tested, then we should
368 * create a new set of tests for those in a new independent test
369 * executable.
370 */
371 for_each_reg(i) {
372 uint8_t addr[2048 / 8];
373 struct kvm_one_reg reg = {
374 .id = reg_list->reg[i],
375 .addr = (__u64)&addr,
376 };
377 int ret;
378
379 ret = _vcpu_ioctl(vm, 0, KVM_GET_ONE_REG, ®);
380 if (ret) {
381 puts("Failed to get ");
382 print_reg(reg.id);
383 putchar('\n');
384 ++failed_get;
385 }
386
387 /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
388 if (find_reg(rejects_set, rejects_set_n, reg.id)) {
389 ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, ®);
390 if (ret != -1 || errno != EPERM) {
391 printf("Failed to reject (ret=%d, errno=%d) ", ret, errno);
392 print_reg(reg.id);
393 putchar('\n');
394 ++failed_reject;
395 }
396 continue;
397 }
398
399 ret = _vcpu_ioctl(vm, 0, KVM_SET_ONE_REG, ®);
400 if (ret) {
401 puts("Failed to set ");
402 print_reg(reg.id);
403 putchar('\n');
404 ++failed_set;
405 }
406 }
407
408 if (reg_list_sve()) {
409 blessed_n = base_regs_n + sve_regs_n;
410 vec_regs = sve_regs;
411 } else {
412 blessed_n = base_regs_n + vregs_n;
413 vec_regs = vregs;
414 }
415
416 blessed_reg = calloc(blessed_n, sizeof(__u64));
417 for (i = 0; i < base_regs_n; ++i)
418 blessed_reg[i] = base_regs[i];
419 for (i = 0; i < blessed_n - base_regs_n; ++i)
420 blessed_reg[base_regs_n + i] = vec_regs[i];
421
422 for_each_new_reg(i)
423 ++new_regs;
424
425 for_each_missing_reg(i)
426 ++missing_regs;
427
428 if (new_regs || missing_regs) {
429 printf("Number blessed registers: %5lld\n", blessed_n);
430 printf("Number registers: %5lld\n", reg_list->n);
431 }
432
433 if (new_regs) {
434 printf("\nThere are %d new registers.\n"
435 "Consider adding them to the blessed reg "
436 "list with the following lines:\n\n", new_regs);
437 for_each_new_reg(i)
438 print_reg(reg_list->reg[i]);
439 putchar('\n');
440 }
441
442 if (missing_regs) {
443 printf("\nThere are %d missing registers.\n"
444 "The following lines are missing registers:\n\n", missing_regs);
445 for_each_missing_reg(i)
446 print_reg(blessed_reg[i]);
447 putchar('\n');
448 }
449
450 TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
451 "There are %d missing registers; "
452 "%d registers failed get; %d registers failed set; %d registers failed reject",
453 missing_regs, failed_get, failed_set, failed_reject);
454
455 return 0;
456 }
457
458 /*
459 * The current blessed list was primed with the output of kernel version
460 * v4.15 with --core-reg-fixup and then later updated with new registers.
461 */
462 static __u64 base_regs[] = {
463 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[0]),
464 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[1]),
465 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[2]),
466 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[3]),
467 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[4]),
468 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[5]),
469 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[6]),
470 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[7]),
471 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[8]),
472 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[9]),
473 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[10]),
474 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[11]),
475 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[12]),
476 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[13]),
477 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[14]),
478 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[15]),
479 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[16]),
480 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[17]),
481 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[18]),
482 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[19]),
483 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[20]),
484 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[21]),
485 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[22]),
486 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[23]),
487 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[24]),
488 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[25]),
489 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[26]),
490 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[27]),
491 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[28]),
492 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[29]),
493 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.regs[30]),
494 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.sp),
495 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pc),
496 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(regs.pstate),
497 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(sp_el1),
498 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(elr_el1),
499 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[0]),
500 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[1]),
501 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[2]),
502 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[3]),
503 KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
504 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
505 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
506 KVM_REG_ARM_FW_REG(0),
507 KVM_REG_ARM_FW_REG(1),
508 KVM_REG_ARM_FW_REG(2),
509 ARM64_SYS_REG(3, 3, 14, 3, 1), /* CNTV_CTL_EL0 */
510 ARM64_SYS_REG(3, 3, 14, 3, 2), /* CNTV_CVAL_EL0 */
511 ARM64_SYS_REG(3, 3, 14, 0, 2),
512 ARM64_SYS_REG(3, 0, 0, 0, 0), /* MIDR_EL1 */
513 ARM64_SYS_REG(3, 0, 0, 0, 6), /* REVIDR_EL1 */
514 ARM64_SYS_REG(3, 1, 0, 0, 1), /* CLIDR_EL1 */
515 ARM64_SYS_REG(3, 1, 0, 0, 7), /* AIDR_EL1 */
516 ARM64_SYS_REG(3, 3, 0, 0, 1), /* CTR_EL0 */
517 ARM64_SYS_REG(2, 0, 0, 0, 4),
518 ARM64_SYS_REG(2, 0, 0, 0, 5),
519 ARM64_SYS_REG(2, 0, 0, 0, 6),
520 ARM64_SYS_REG(2, 0, 0, 0, 7),
521 ARM64_SYS_REG(2, 0, 0, 1, 4),
522 ARM64_SYS_REG(2, 0, 0, 1, 5),
523 ARM64_SYS_REG(2, 0, 0, 1, 6),
524 ARM64_SYS_REG(2, 0, 0, 1, 7),
525 ARM64_SYS_REG(2, 0, 0, 2, 0), /* MDCCINT_EL1 */
526 ARM64_SYS_REG(2, 0, 0, 2, 2), /* MDSCR_EL1 */
527 ARM64_SYS_REG(2, 0, 0, 2, 4),
528 ARM64_SYS_REG(2, 0, 0, 2, 5),
529 ARM64_SYS_REG(2, 0, 0, 2, 6),
530 ARM64_SYS_REG(2, 0, 0, 2, 7),
531 ARM64_SYS_REG(2, 0, 0, 3, 4),
532 ARM64_SYS_REG(2, 0, 0, 3, 5),
533 ARM64_SYS_REG(2, 0, 0, 3, 6),
534 ARM64_SYS_REG(2, 0, 0, 3, 7),
535 ARM64_SYS_REG(2, 0, 0, 4, 4),
536 ARM64_SYS_REG(2, 0, 0, 4, 5),
537 ARM64_SYS_REG(2, 0, 0, 4, 6),
538 ARM64_SYS_REG(2, 0, 0, 4, 7),
539 ARM64_SYS_REG(2, 0, 0, 5, 4),
540 ARM64_SYS_REG(2, 0, 0, 5, 5),
541 ARM64_SYS_REG(2, 0, 0, 5, 6),
542 ARM64_SYS_REG(2, 0, 0, 5, 7),
543 ARM64_SYS_REG(2, 0, 0, 6, 4),
544 ARM64_SYS_REG(2, 0, 0, 6, 5),
545 ARM64_SYS_REG(2, 0, 0, 6, 6),
546 ARM64_SYS_REG(2, 0, 0, 6, 7),
547 ARM64_SYS_REG(2, 0, 0, 7, 4),
548 ARM64_SYS_REG(2, 0, 0, 7, 5),
549 ARM64_SYS_REG(2, 0, 0, 7, 6),
550 ARM64_SYS_REG(2, 0, 0, 7, 7),
551 ARM64_SYS_REG(2, 0, 0, 8, 4),
552 ARM64_SYS_REG(2, 0, 0, 8, 5),
553 ARM64_SYS_REG(2, 0, 0, 8, 6),
554 ARM64_SYS_REG(2, 0, 0, 8, 7),
555 ARM64_SYS_REG(2, 0, 0, 9, 4),
556 ARM64_SYS_REG(2, 0, 0, 9, 5),
557 ARM64_SYS_REG(2, 0, 0, 9, 6),
558 ARM64_SYS_REG(2, 0, 0, 9, 7),
559 ARM64_SYS_REG(2, 0, 0, 10, 4),
560 ARM64_SYS_REG(2, 0, 0, 10, 5),
561 ARM64_SYS_REG(2, 0, 0, 10, 6),
562 ARM64_SYS_REG(2, 0, 0, 10, 7),
563 ARM64_SYS_REG(2, 0, 0, 11, 4),
564 ARM64_SYS_REG(2, 0, 0, 11, 5),
565 ARM64_SYS_REG(2, 0, 0, 11, 6),
566 ARM64_SYS_REG(2, 0, 0, 11, 7),
567 ARM64_SYS_REG(2, 0, 0, 12, 4),
568 ARM64_SYS_REG(2, 0, 0, 12, 5),
569 ARM64_SYS_REG(2, 0, 0, 12, 6),
570 ARM64_SYS_REG(2, 0, 0, 12, 7),
571 ARM64_SYS_REG(2, 0, 0, 13, 4),
572 ARM64_SYS_REG(2, 0, 0, 13, 5),
573 ARM64_SYS_REG(2, 0, 0, 13, 6),
574 ARM64_SYS_REG(2, 0, 0, 13, 7),
575 ARM64_SYS_REG(2, 0, 0, 14, 4),
576 ARM64_SYS_REG(2, 0, 0, 14, 5),
577 ARM64_SYS_REG(2, 0, 0, 14, 6),
578 ARM64_SYS_REG(2, 0, 0, 14, 7),
579 ARM64_SYS_REG(2, 0, 0, 15, 4),
580 ARM64_SYS_REG(2, 0, 0, 15, 5),
581 ARM64_SYS_REG(2, 0, 0, 15, 6),
582 ARM64_SYS_REG(2, 0, 0, 15, 7),
583 ARM64_SYS_REG(2, 4, 0, 7, 0), /* DBGVCR32_EL2 */
584 ARM64_SYS_REG(3, 0, 0, 0, 5), /* MPIDR_EL1 */
585 ARM64_SYS_REG(3, 0, 0, 1, 0), /* ID_PFR0_EL1 */
586 ARM64_SYS_REG(3, 0, 0, 1, 1), /* ID_PFR1_EL1 */
587 ARM64_SYS_REG(3, 0, 0, 1, 2), /* ID_DFR0_EL1 */
588 ARM64_SYS_REG(3, 0, 0, 1, 3), /* ID_AFR0_EL1 */
589 ARM64_SYS_REG(3, 0, 0, 1, 4), /* ID_MMFR0_EL1 */
590 ARM64_SYS_REG(3, 0, 0, 1, 5), /* ID_MMFR1_EL1 */
591 ARM64_SYS_REG(3, 0, 0, 1, 6), /* ID_MMFR2_EL1 */
592 ARM64_SYS_REG(3, 0, 0, 1, 7), /* ID_MMFR3_EL1 */
593 ARM64_SYS_REG(3, 0, 0, 2, 0), /* ID_ISAR0_EL1 */
594 ARM64_SYS_REG(3, 0, 0, 2, 1), /* ID_ISAR1_EL1 */
595 ARM64_SYS_REG(3, 0, 0, 2, 2), /* ID_ISAR2_EL1 */
596 ARM64_SYS_REG(3, 0, 0, 2, 3), /* ID_ISAR3_EL1 */
597 ARM64_SYS_REG(3, 0, 0, 2, 4), /* ID_ISAR4_EL1 */
598 ARM64_SYS_REG(3, 0, 0, 2, 5), /* ID_ISAR5_EL1 */
599 ARM64_SYS_REG(3, 0, 0, 2, 6), /* ID_MMFR4_EL1 */
600 ARM64_SYS_REG(3, 0, 0, 2, 7), /* ID_ISAR6_EL1 */
601 ARM64_SYS_REG(3, 0, 0, 3, 0), /* MVFR0_EL1 */
602 ARM64_SYS_REG(3, 0, 0, 3, 1), /* MVFR1_EL1 */
603 ARM64_SYS_REG(3, 0, 0, 3, 2), /* MVFR2_EL1 */
604 ARM64_SYS_REG(3, 0, 0, 3, 3),
605 ARM64_SYS_REG(3, 0, 0, 3, 4), /* ID_PFR2_EL1 */
606 ARM64_SYS_REG(3, 0, 0, 3, 5), /* ID_DFR1_EL1 */
607 ARM64_SYS_REG(3, 0, 0, 3, 6), /* ID_MMFR5_EL1 */
608 ARM64_SYS_REG(3, 0, 0, 3, 7),
609 ARM64_SYS_REG(3, 0, 0, 4, 0), /* ID_AA64PFR0_EL1 */
610 ARM64_SYS_REG(3, 0, 0, 4, 1), /* ID_AA64PFR1_EL1 */
611 ARM64_SYS_REG(3, 0, 0, 4, 2),
612 ARM64_SYS_REG(3, 0, 0, 4, 3),
613 ARM64_SYS_REG(3, 0, 0, 4, 4), /* ID_AA64ZFR0_EL1 */
614 ARM64_SYS_REG(3, 0, 0, 4, 5),
615 ARM64_SYS_REG(3, 0, 0, 4, 6),
616 ARM64_SYS_REG(3, 0, 0, 4, 7),
617 ARM64_SYS_REG(3, 0, 0, 5, 0), /* ID_AA64DFR0_EL1 */
618 ARM64_SYS_REG(3, 0, 0, 5, 1), /* ID_AA64DFR1_EL1 */
619 ARM64_SYS_REG(3, 0, 0, 5, 2),
620 ARM64_SYS_REG(3, 0, 0, 5, 3),
621 ARM64_SYS_REG(3, 0, 0, 5, 4), /* ID_AA64AFR0_EL1 */
622 ARM64_SYS_REG(3, 0, 0, 5, 5), /* ID_AA64AFR1_EL1 */
623 ARM64_SYS_REG(3, 0, 0, 5, 6),
624 ARM64_SYS_REG(3, 0, 0, 5, 7),
625 ARM64_SYS_REG(3, 0, 0, 6, 0), /* ID_AA64ISAR0_EL1 */
626 ARM64_SYS_REG(3, 0, 0, 6, 1), /* ID_AA64ISAR1_EL1 */
627 ARM64_SYS_REG(3, 0, 0, 6, 2),
628 ARM64_SYS_REG(3, 0, 0, 6, 3),
629 ARM64_SYS_REG(3, 0, 0, 6, 4),
630 ARM64_SYS_REG(3, 0, 0, 6, 5),
631 ARM64_SYS_REG(3, 0, 0, 6, 6),
632 ARM64_SYS_REG(3, 0, 0, 6, 7),
633 ARM64_SYS_REG(3, 0, 0, 7, 0), /* ID_AA64MMFR0_EL1 */
634 ARM64_SYS_REG(3, 0, 0, 7, 1), /* ID_AA64MMFR1_EL1 */
635 ARM64_SYS_REG(3, 0, 0, 7, 2), /* ID_AA64MMFR2_EL1 */
636 ARM64_SYS_REG(3, 0, 0, 7, 3),
637 ARM64_SYS_REG(3, 0, 0, 7, 4),
638 ARM64_SYS_REG(3, 0, 0, 7, 5),
639 ARM64_SYS_REG(3, 0, 0, 7, 6),
640 ARM64_SYS_REG(3, 0, 0, 7, 7),
641 ARM64_SYS_REG(3, 0, 1, 0, 0), /* SCTLR_EL1 */
642 ARM64_SYS_REG(3, 0, 1, 0, 1), /* ACTLR_EL1 */
643 ARM64_SYS_REG(3, 0, 1, 0, 2), /* CPACR_EL1 */
644 ARM64_SYS_REG(3, 0, 2, 0, 0), /* TTBR0_EL1 */
645 ARM64_SYS_REG(3, 0, 2, 0, 1), /* TTBR1_EL1 */
646 ARM64_SYS_REG(3, 0, 2, 0, 2), /* TCR_EL1 */
647 ARM64_SYS_REG(3, 0, 5, 1, 0), /* AFSR0_EL1 */
648 ARM64_SYS_REG(3, 0, 5, 1, 1), /* AFSR1_EL1 */
649 ARM64_SYS_REG(3, 0, 5, 2, 0), /* ESR_EL1 */
650 ARM64_SYS_REG(3, 0, 6, 0, 0), /* FAR_EL1 */
651 ARM64_SYS_REG(3, 0, 7, 4, 0), /* PAR_EL1 */
652 ARM64_SYS_REG(3, 0, 9, 14, 1), /* PMINTENSET_EL1 */
653 ARM64_SYS_REG(3, 0, 9, 14, 2), /* PMINTENCLR_EL1 */
654 ARM64_SYS_REG(3, 0, 10, 2, 0), /* MAIR_EL1 */
655 ARM64_SYS_REG(3, 0, 10, 3, 0), /* AMAIR_EL1 */
656 ARM64_SYS_REG(3, 0, 12, 0, 0), /* VBAR_EL1 */
657 ARM64_SYS_REG(3, 0, 12, 1, 1), /* DISR_EL1 */
658 ARM64_SYS_REG(3, 0, 13, 0, 1), /* CONTEXTIDR_EL1 */
659 ARM64_SYS_REG(3, 0, 13, 0, 4), /* TPIDR_EL1 */
660 ARM64_SYS_REG(3, 0, 14, 1, 0), /* CNTKCTL_EL1 */
661 ARM64_SYS_REG(3, 2, 0, 0, 0), /* CSSELR_EL1 */
662 ARM64_SYS_REG(3, 3, 9, 12, 0), /* PMCR_EL0 */
663 ARM64_SYS_REG(3, 3, 9, 12, 1), /* PMCNTENSET_EL0 */
664 ARM64_SYS_REG(3, 3, 9, 12, 2), /* PMCNTENCLR_EL0 */
665 ARM64_SYS_REG(3, 3, 9, 12, 3), /* PMOVSCLR_EL0 */
666 ARM64_SYS_REG(3, 3, 9, 12, 4), /* PMSWINC_EL0 */
667 ARM64_SYS_REG(3, 3, 9, 12, 5), /* PMSELR_EL0 */
668 ARM64_SYS_REG(3, 3, 9, 13, 0), /* PMCCNTR_EL0 */
669 ARM64_SYS_REG(3, 3, 9, 14, 0), /* PMUSERENR_EL0 */
670 ARM64_SYS_REG(3, 3, 9, 14, 3), /* PMOVSSET_EL0 */
671 ARM64_SYS_REG(3, 3, 13, 0, 2), /* TPIDR_EL0 */
672 ARM64_SYS_REG(3, 3, 13, 0, 3), /* TPIDRRO_EL0 */
673 ARM64_SYS_REG(3, 3, 14, 8, 0),
674 ARM64_SYS_REG(3, 3, 14, 8, 1),
675 ARM64_SYS_REG(3, 3, 14, 8, 2),
676 ARM64_SYS_REG(3, 3, 14, 8, 3),
677 ARM64_SYS_REG(3, 3, 14, 8, 4),
678 ARM64_SYS_REG(3, 3, 14, 8, 5),
679 ARM64_SYS_REG(3, 3, 14, 8, 6),
680 ARM64_SYS_REG(3, 3, 14, 8, 7),
681 ARM64_SYS_REG(3, 3, 14, 9, 0),
682 ARM64_SYS_REG(3, 3, 14, 9, 1),
683 ARM64_SYS_REG(3, 3, 14, 9, 2),
684 ARM64_SYS_REG(3, 3, 14, 9, 3),
685 ARM64_SYS_REG(3, 3, 14, 9, 4),
686 ARM64_SYS_REG(3, 3, 14, 9, 5),
687 ARM64_SYS_REG(3, 3, 14, 9, 6),
688 ARM64_SYS_REG(3, 3, 14, 9, 7),
689 ARM64_SYS_REG(3, 3, 14, 10, 0),
690 ARM64_SYS_REG(3, 3, 14, 10, 1),
691 ARM64_SYS_REG(3, 3, 14, 10, 2),
692 ARM64_SYS_REG(3, 3, 14, 10, 3),
693 ARM64_SYS_REG(3, 3, 14, 10, 4),
694 ARM64_SYS_REG(3, 3, 14, 10, 5),
695 ARM64_SYS_REG(3, 3, 14, 10, 6),
696 ARM64_SYS_REG(3, 3, 14, 10, 7),
697 ARM64_SYS_REG(3, 3, 14, 11, 0),
698 ARM64_SYS_REG(3, 3, 14, 11, 1),
699 ARM64_SYS_REG(3, 3, 14, 11, 2),
700 ARM64_SYS_REG(3, 3, 14, 11, 3),
701 ARM64_SYS_REG(3, 3, 14, 11, 4),
702 ARM64_SYS_REG(3, 3, 14, 11, 5),
703 ARM64_SYS_REG(3, 3, 14, 11, 6),
704 ARM64_SYS_REG(3, 3, 14, 12, 0),
705 ARM64_SYS_REG(3, 3, 14, 12, 1),
706 ARM64_SYS_REG(3, 3, 14, 12, 2),
707 ARM64_SYS_REG(3, 3, 14, 12, 3),
708 ARM64_SYS_REG(3, 3, 14, 12, 4),
709 ARM64_SYS_REG(3, 3, 14, 12, 5),
710 ARM64_SYS_REG(3, 3, 14, 12, 6),
711 ARM64_SYS_REG(3, 3, 14, 12, 7),
712 ARM64_SYS_REG(3, 3, 14, 13, 0),
713 ARM64_SYS_REG(3, 3, 14, 13, 1),
714 ARM64_SYS_REG(3, 3, 14, 13, 2),
715 ARM64_SYS_REG(3, 3, 14, 13, 3),
716 ARM64_SYS_REG(3, 3, 14, 13, 4),
717 ARM64_SYS_REG(3, 3, 14, 13, 5),
718 ARM64_SYS_REG(3, 3, 14, 13, 6),
719 ARM64_SYS_REG(3, 3, 14, 13, 7),
720 ARM64_SYS_REG(3, 3, 14, 14, 0),
721 ARM64_SYS_REG(3, 3, 14, 14, 1),
722 ARM64_SYS_REG(3, 3, 14, 14, 2),
723 ARM64_SYS_REG(3, 3, 14, 14, 3),
724 ARM64_SYS_REG(3, 3, 14, 14, 4),
725 ARM64_SYS_REG(3, 3, 14, 14, 5),
726 ARM64_SYS_REG(3, 3, 14, 14, 6),
727 ARM64_SYS_REG(3, 3, 14, 14, 7),
728 ARM64_SYS_REG(3, 3, 14, 15, 0),
729 ARM64_SYS_REG(3, 3, 14, 15, 1),
730 ARM64_SYS_REG(3, 3, 14, 15, 2),
731 ARM64_SYS_REG(3, 3, 14, 15, 3),
732 ARM64_SYS_REG(3, 3, 14, 15, 4),
733 ARM64_SYS_REG(3, 3, 14, 15, 5),
734 ARM64_SYS_REG(3, 3, 14, 15, 6),
735 ARM64_SYS_REG(3, 3, 14, 15, 7), /* PMCCFILTR_EL0 */
736 ARM64_SYS_REG(3, 4, 3, 0, 0), /* DACR32_EL2 */
737 ARM64_SYS_REG(3, 4, 5, 0, 1), /* IFSR32_EL2 */
738 ARM64_SYS_REG(3, 4, 5, 3, 0), /* FPEXC32_EL2 */
739 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 0,
740 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 1,
741 KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | 2,
742 };
743 static __u64 base_regs_n = ARRAY_SIZE(base_regs);
744
745 static __u64 vregs[] = {
746 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]),
747 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[1]),
748 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[2]),
749 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[3]),
750 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[4]),
751 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[5]),
752 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[6]),
753 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[7]),
754 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[8]),
755 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[9]),
756 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[10]),
757 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[11]),
758 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[12]),
759 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[13]),
760 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[14]),
761 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[15]),
762 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[16]),
763 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[17]),
764 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[18]),
765 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[19]),
766 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[20]),
767 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[21]),
768 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[22]),
769 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[23]),
770 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[24]),
771 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[25]),
772 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[26]),
773 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[27]),
774 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[28]),
775 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[29]),
776 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[30]),
777 KVM_REG_ARM64 | KVM_REG_SIZE_U128 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]),
778 };
779 static __u64 vregs_n = ARRAY_SIZE(vregs);
780
781 static __u64 sve_regs[] = {
782 KVM_REG_ARM64_SVE_VLS,
783 KVM_REG_ARM64_SVE_ZREG(0, 0),
784 KVM_REG_ARM64_SVE_ZREG(1, 0),
785 KVM_REG_ARM64_SVE_ZREG(2, 0),
786 KVM_REG_ARM64_SVE_ZREG(3, 0),
787 KVM_REG_ARM64_SVE_ZREG(4, 0),
788 KVM_REG_ARM64_SVE_ZREG(5, 0),
789 KVM_REG_ARM64_SVE_ZREG(6, 0),
790 KVM_REG_ARM64_SVE_ZREG(7, 0),
791 KVM_REG_ARM64_SVE_ZREG(8, 0),
792 KVM_REG_ARM64_SVE_ZREG(9, 0),
793 KVM_REG_ARM64_SVE_ZREG(10, 0),
794 KVM_REG_ARM64_SVE_ZREG(11, 0),
795 KVM_REG_ARM64_SVE_ZREG(12, 0),
796 KVM_REG_ARM64_SVE_ZREG(13, 0),
797 KVM_REG_ARM64_SVE_ZREG(14, 0),
798 KVM_REG_ARM64_SVE_ZREG(15, 0),
799 KVM_REG_ARM64_SVE_ZREG(16, 0),
800 KVM_REG_ARM64_SVE_ZREG(17, 0),
801 KVM_REG_ARM64_SVE_ZREG(18, 0),
802 KVM_REG_ARM64_SVE_ZREG(19, 0),
803 KVM_REG_ARM64_SVE_ZREG(20, 0),
804 KVM_REG_ARM64_SVE_ZREG(21, 0),
805 KVM_REG_ARM64_SVE_ZREG(22, 0),
806 KVM_REG_ARM64_SVE_ZREG(23, 0),
807 KVM_REG_ARM64_SVE_ZREG(24, 0),
808 KVM_REG_ARM64_SVE_ZREG(25, 0),
809 KVM_REG_ARM64_SVE_ZREG(26, 0),
810 KVM_REG_ARM64_SVE_ZREG(27, 0),
811 KVM_REG_ARM64_SVE_ZREG(28, 0),
812 KVM_REG_ARM64_SVE_ZREG(29, 0),
813 KVM_REG_ARM64_SVE_ZREG(30, 0),
814 KVM_REG_ARM64_SVE_ZREG(31, 0),
815 KVM_REG_ARM64_SVE_PREG(0, 0),
816 KVM_REG_ARM64_SVE_PREG(1, 0),
817 KVM_REG_ARM64_SVE_PREG(2, 0),
818 KVM_REG_ARM64_SVE_PREG(3, 0),
819 KVM_REG_ARM64_SVE_PREG(4, 0),
820 KVM_REG_ARM64_SVE_PREG(5, 0),
821 KVM_REG_ARM64_SVE_PREG(6, 0),
822 KVM_REG_ARM64_SVE_PREG(7, 0),
823 KVM_REG_ARM64_SVE_PREG(8, 0),
824 KVM_REG_ARM64_SVE_PREG(9, 0),
825 KVM_REG_ARM64_SVE_PREG(10, 0),
826 KVM_REG_ARM64_SVE_PREG(11, 0),
827 KVM_REG_ARM64_SVE_PREG(12, 0),
828 KVM_REG_ARM64_SVE_PREG(13, 0),
829 KVM_REG_ARM64_SVE_PREG(14, 0),
830 KVM_REG_ARM64_SVE_PREG(15, 0),
831 KVM_REG_ARM64_SVE_FFR(0),
832 ARM64_SYS_REG(3, 0, 1, 2, 0), /* ZCR_EL1 */
833 };
834 static __u64 sve_regs_n = ARRAY_SIZE(sve_regs);
835
836 static __u64 rejects_set[] = {
837 #ifdef REG_LIST_SVE
838 KVM_REG_ARM64_SVE_VLS,
839 #endif
840 };
841 static __u64 rejects_set_n = ARRAY_SIZE(rejects_set);
842