• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2023 SUSE LLC
4  * Author: Nicolai Stange <nstange@suse.de>
5  * LTP port: Martin Doucha <mdoucha@suse.cz>
6  */
7 
8 /*\
9  * CVE 2021-3656
10  *
11  * Check that KVM correctly intercepts VMSAVE and VMLOAD instructions
12  * in a nested virtual machine even when the parent guest disables
13  * intercepting either instruction. If KVM does not override the disabled
14  * intercepts, it'll give the nested VM read/write access to a few bytes
15  * of an arbitrary physical memory page. Unauthorized memory access fixed in:
16  *
17  *  commit c7dfa4009965a9b2d7b329ee970eb8da0d32f0bc
18  *  Author: Maxim Levitsky <mlevitsk@redhat.com>
19  *  Date:   Mon Jul 19 16:05:00 2021 +0300
20  *
21  *  KVM: nSVM: always intercept VMLOAD/VMSAVE when nested (CVE-2021-3656)
22  */
23 
24 #include "kvm_test.h"
25 
26 #ifdef COMPILE_PAYLOAD
27 #if defined(__i386__) || defined(__x86_64__)
28 
29 #include "kvm_x86_svm.h"
30 
31 static void *vmsave_buf;
32 
33 /* Load FS, GS, TR and LDTR state from vmsave_buf */
guest_vmload(void)34 static int guest_vmload(void)
35 {
36 	asm (
37 		"vmload %0\n"
38 		:
39 		: "a" (vmsave_buf)
40 	);
41 	return 0;
42 }
43 
44 /* Save current FS, GS, TR and LDTR state to vmsave_buf */
guest_vmsave(void)45 static int guest_vmsave(void)
46 {
47 	asm (
48 		"vmsave %0\n"
49 		:
50 		: "a" (vmsave_buf)
51 	);
52 	return 0;
53 }
54 
cmp_descriptor(const struct kvm_vmcb_descriptor * a,const struct kvm_vmcb_descriptor * b)55 static int cmp_descriptor(const struct kvm_vmcb_descriptor *a,
56 	const struct kvm_vmcb_descriptor *b)
57 {
58 	int ret;
59 
60 	ret = a->selector != b->selector;
61 	ret = ret || a->attrib != b->attrib;
62 	ret = ret || a->limit != b->limit;
63 	ret = ret || a->base != b->base;
64 	return ret;
65 }
66 
67 /* Return non-zero if the VMCB fields touched by vmsave/vmload differ */
cmp_vmcb(const struct kvm_vmcb * a,const struct kvm_vmcb * b)68 static int cmp_vmcb(const struct kvm_vmcb *a, const struct kvm_vmcb *b)
69 {
70 	int ret;
71 
72 	ret = cmp_descriptor(&a->fs, &b->fs);
73 	ret = ret || cmp_descriptor(&a->gs, &b->gs);
74 	ret = ret || cmp_descriptor(&a->tr, &b->tr);
75 	ret = ret || cmp_descriptor(&a->ldtr, &b->ldtr);
76 	ret = ret || a->kernel_gs_base != b->kernel_gs_base;
77 	ret = ret || a->star != b->star;
78 	ret = ret || a->lstar != b->lstar;
79 	ret = ret || a->cstar != b->cstar;
80 	ret = ret || a->sfmask != b->sfmask;
81 	ret = ret || a->sysenter_cs != b->sysenter_cs;
82 	ret = ret || a->sysenter_esp != b->sysenter_esp;
83 	ret = ret || a->sysenter_eip != b->sysenter_eip;
84 	return ret;
85 }
86 
main(void)87 void main(void)
88 {
89 	uint16_t ss;
90 	uint64_t rsp;
91 	struct kvm_svm_vcpu *vcpu;
92 
93 	kvm_init_svm();
94 	vcpu = kvm_create_svm_vcpu(guest_vmload, 1);
95 	kvm_vmcb_set_intercept(vcpu->vmcb, SVM_INTERCEPT_VMLOAD, 0);
96 	vmsave_buf = kvm_alloc_vmcb();
97 
98 	/* Save allocated stack for later VM reinit */
99 	ss = vcpu->vmcb->ss.selector;
100 	rsp = vcpu->vmcb->rsp;
101 
102 	/* Load partial state from vmsave_buf and save it to vcpu->vmcb */
103 	kvm_svm_vmrun(vcpu);
104 
105 	if (vcpu->vmcb->exitcode != SVM_EXIT_HLT)
106 		tst_brk(TBROK, "Nested VM exited unexpectedly");
107 
108 	if (cmp_vmcb(vcpu->vmcb, vmsave_buf)) {
109 		tst_res(TFAIL, "Nested VM can read host memory");
110 		return;
111 	}
112 
113 	/* Load state from vcpu->vmcb and save it to vmsave_buf */
114 	memset(vmsave_buf, 0xaa, sizeof(struct kvm_vmcb));
115 	kvm_init_guest_vmcb(vcpu->vmcb, 1, ss, (void *)rsp, guest_vmsave);
116 	kvm_vmcb_set_intercept(vcpu->vmcb, SVM_INTERCEPT_VMSAVE, 0);
117 	kvm_svm_vmrun(vcpu);
118 
119 	if (vcpu->vmcb->exitcode != SVM_EXIT_HLT)
120 		tst_brk(TBROK, "Nested VM exited unexpectedly");
121 
122 	if (cmp_vmcb(vcpu->vmcb, vmsave_buf)) {
123 		tst_res(TFAIL, "Nested VM can overwrite host memory");
124 		return;
125 	}
126 
127 	tst_res(TPASS, "VMLOAD and VMSAVE were intercepted by kernel");
128 }
129 
130 #else /* defined(__i386__) || defined(__x86_64__) */
131 TST_TEST_TCONF("Test supported only on x86");
132 #endif /* defined(__i386__) || defined(__x86_64__) */
133 
134 #else /* COMPILE_PAYLOAD */
135 
136 static struct tst_test test = {
137 	.test_all = tst_kvm_run,
138 	.setup = tst_kvm_setup,
139 	.cleanup = tst_kvm_cleanup,
140 	.supported_archs = (const char *const []) {
141 		"x86_64",
142 		"x86",
143 		NULL
144 	},
145 	.tags = (struct tst_tag[]){
146 		{"linux-git", "c7dfa4009965"},
147 		{"CVE", "2021-3656"},
148 		{}
149 	}
150 };
151 
152 #endif /* COMPILE_PAYLOAD */
153