• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * KVM Microsoft Hyper-V emulation
3  *
4  * derived from arch/x86/kvm/x86.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
11  *
12  * Authors:
13  *   Avi Kivity   <avi@qumranet.com>
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Amit Shah    <amit.shah@qumranet.com>
16  *   Ben-Ami Yassour <benami@il.ibm.com>
17  *   Andrey Smetanin <asmetanin@virtuozzo.com>
18  *
19  * This work is licensed under the terms of the GNU GPL, version 2.  See
20  * the COPYING file in the top-level directory.
21  *
22  */
23 
24 #include "x86.h"
25 #include "lapic.h"
26 #include "hyperv.h"
27 
28 #include <linux/kvm_host.h>
29 #include <linux/nospec.h>
30 #include <trace/events/kvm.h>
31 
32 #include "trace.h"
33 
kvm_hv_msr_partition_wide(u32 msr)34 static bool kvm_hv_msr_partition_wide(u32 msr)
35 {
36 	bool r = false;
37 
38 	switch (msr) {
39 	case HV_X64_MSR_GUEST_OS_ID:
40 	case HV_X64_MSR_HYPERCALL:
41 	case HV_X64_MSR_REFERENCE_TSC:
42 	case HV_X64_MSR_TIME_REF_COUNT:
43 	case HV_X64_MSR_CRASH_CTL:
44 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
45 	case HV_X64_MSR_RESET:
46 		r = true;
47 		break;
48 	}
49 
50 	return r;
51 }
52 
kvm_hv_msr_get_crash_data(struct kvm_vcpu * vcpu,u32 index,u64 * pdata)53 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
54 				     u32 index, u64 *pdata)
55 {
56 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
57 	size_t size = ARRAY_SIZE(hv->hv_crash_param);
58 
59 	if (WARN_ON_ONCE(index >= size))
60 		return -EINVAL;
61 
62 	*pdata = hv->hv_crash_param[array_index_nospec(index, size)];
63 	return 0;
64 }
65 
kvm_hv_msr_get_crash_ctl(struct kvm_vcpu * vcpu,u64 * pdata)66 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
67 {
68 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
69 
70 	*pdata = hv->hv_crash_ctl;
71 	return 0;
72 }
73 
kvm_hv_msr_set_crash_ctl(struct kvm_vcpu * vcpu,u64 data,bool host)74 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
75 {
76 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
77 
78 	if (host)
79 		hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
80 
81 	if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
82 
83 		vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
84 			  hv->hv_crash_param[0],
85 			  hv->hv_crash_param[1],
86 			  hv->hv_crash_param[2],
87 			  hv->hv_crash_param[3],
88 			  hv->hv_crash_param[4]);
89 
90 		/* Send notification about crash to user space */
91 		kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
92 	}
93 
94 	return 0;
95 }
96 
kvm_hv_msr_set_crash_data(struct kvm_vcpu * vcpu,u32 index,u64 data)97 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
98 				     u32 index, u64 data)
99 {
100 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
101 	size_t size = ARRAY_SIZE(hv->hv_crash_param);
102 
103 	if (WARN_ON_ONCE(index >= size))
104 		return -EINVAL;
105 
106 	hv->hv_crash_param[array_index_nospec(index, size)] = data;
107 	return 0;
108 }
109 
kvm_hv_set_msr_pw(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)110 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
111 			     bool host)
112 {
113 	struct kvm *kvm = vcpu->kvm;
114 	struct kvm_hv *hv = &kvm->arch.hyperv;
115 
116 	switch (msr) {
117 	case HV_X64_MSR_GUEST_OS_ID:
118 		hv->hv_guest_os_id = data;
119 		/* setting guest os id to zero disables hypercall page */
120 		if (!hv->hv_guest_os_id)
121 			hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
122 		break;
123 	case HV_X64_MSR_HYPERCALL: {
124 		u64 gfn;
125 		unsigned long addr;
126 		u8 instructions[4];
127 
128 		/* if guest os id is not set hypercall should remain disabled */
129 		if (!hv->hv_guest_os_id)
130 			break;
131 		if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
132 			hv->hv_hypercall = data;
133 			break;
134 		}
135 		gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
136 		addr = gfn_to_hva(kvm, gfn);
137 		if (kvm_is_error_hva(addr))
138 			return 1;
139 		kvm_x86_ops->patch_hypercall(vcpu, instructions);
140 		((unsigned char *)instructions)[3] = 0xc3; /* ret */
141 		if (__copy_to_user((void __user *)addr, instructions, 4))
142 			return 1;
143 		hv->hv_hypercall = data;
144 		mark_page_dirty(kvm, gfn);
145 		break;
146 	}
147 	case HV_X64_MSR_REFERENCE_TSC: {
148 		u64 gfn;
149 		HV_REFERENCE_TSC_PAGE tsc_ref;
150 
151 		memset(&tsc_ref, 0, sizeof(tsc_ref));
152 		hv->hv_tsc_page = data;
153 		if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
154 			break;
155 		gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
156 		if (kvm_write_guest(
157 				kvm,
158 				gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
159 				&tsc_ref, sizeof(tsc_ref)))
160 			return 1;
161 		mark_page_dirty(kvm, gfn);
162 		break;
163 	}
164 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
165 		return kvm_hv_msr_set_crash_data(vcpu,
166 						 msr - HV_X64_MSR_CRASH_P0,
167 						 data);
168 	case HV_X64_MSR_CRASH_CTL:
169 		return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
170 	case HV_X64_MSR_RESET:
171 		if (data == 1) {
172 			vcpu_debug(vcpu, "hyper-v reset requested\n");
173 			kvm_make_request(KVM_REQ_HV_RESET, vcpu);
174 		}
175 		break;
176 	default:
177 		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
178 			    msr, data);
179 		return 1;
180 	}
181 	return 0;
182 }
183 
184 /* Calculate cpu time spent by current task in 100ns units */
current_task_runtime_100ns(void)185 static u64 current_task_runtime_100ns(void)
186 {
187 	cputime_t utime, stime;
188 
189 	task_cputime_adjusted(current, &utime, &stime);
190 	return div_u64(cputime_to_nsecs(utime + stime), 100);
191 }
192 
kvm_hv_set_msr(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)193 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
194 {
195 	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
196 
197 	switch (msr) {
198 	case HV_X64_MSR_APIC_ASSIST_PAGE: {
199 		u64 gfn;
200 		unsigned long addr;
201 
202 		if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
203 			hv->hv_vapic = data;
204 			if (kvm_lapic_enable_pv_eoi(vcpu, 0))
205 				return 1;
206 			break;
207 		}
208 		gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
209 		addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
210 		if (kvm_is_error_hva(addr))
211 			return 1;
212 		if (__clear_user((void __user *)addr, PAGE_SIZE))
213 			return 1;
214 		hv->hv_vapic = data;
215 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
216 		if (kvm_lapic_enable_pv_eoi(vcpu,
217 					    gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
218 			return 1;
219 		break;
220 	}
221 	case HV_X64_MSR_EOI:
222 		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
223 	case HV_X64_MSR_ICR:
224 		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
225 	case HV_X64_MSR_TPR:
226 		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
227 	case HV_X64_MSR_VP_RUNTIME:
228 		if (!host)
229 			return 1;
230 		hv->runtime_offset = data - current_task_runtime_100ns();
231 		break;
232 	default:
233 		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
234 			    msr, data);
235 		return 1;
236 	}
237 
238 	return 0;
239 }
240 
kvm_hv_get_msr_pw(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata)241 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
242 {
243 	u64 data = 0;
244 	struct kvm *kvm = vcpu->kvm;
245 	struct kvm_hv *hv = &kvm->arch.hyperv;
246 
247 	switch (msr) {
248 	case HV_X64_MSR_GUEST_OS_ID:
249 		data = hv->hv_guest_os_id;
250 		break;
251 	case HV_X64_MSR_HYPERCALL:
252 		data = hv->hv_hypercall;
253 		break;
254 	case HV_X64_MSR_TIME_REF_COUNT: {
255 		data =
256 		     div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
257 		break;
258 	}
259 	case HV_X64_MSR_REFERENCE_TSC:
260 		data = hv->hv_tsc_page;
261 		break;
262 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
263 		return kvm_hv_msr_get_crash_data(vcpu,
264 						 msr - HV_X64_MSR_CRASH_P0,
265 						 pdata);
266 	case HV_X64_MSR_CRASH_CTL:
267 		return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
268 	case HV_X64_MSR_RESET:
269 		data = 0;
270 		break;
271 	default:
272 		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
273 		return 1;
274 	}
275 
276 	*pdata = data;
277 	return 0;
278 }
279 
kvm_hv_get_msr(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata)280 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
281 {
282 	u64 data = 0;
283 	struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
284 
285 	switch (msr) {
286 	case HV_X64_MSR_VP_INDEX: {
287 		int r;
288 		struct kvm_vcpu *v;
289 
290 		kvm_for_each_vcpu(r, v, vcpu->kvm) {
291 			if (v == vcpu) {
292 				data = r;
293 				break;
294 			}
295 		}
296 		break;
297 	}
298 	case HV_X64_MSR_EOI:
299 		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
300 	case HV_X64_MSR_ICR:
301 		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
302 	case HV_X64_MSR_TPR:
303 		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
304 	case HV_X64_MSR_APIC_ASSIST_PAGE:
305 		data = hv->hv_vapic;
306 		break;
307 	case HV_X64_MSR_VP_RUNTIME:
308 		data = current_task_runtime_100ns() + hv->runtime_offset;
309 		break;
310 	default:
311 		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
312 		return 1;
313 	}
314 	*pdata = data;
315 	return 0;
316 }
317 
kvm_hv_set_msr_common(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)318 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
319 {
320 	if (kvm_hv_msr_partition_wide(msr)) {
321 		int r;
322 
323 		mutex_lock(&vcpu->kvm->lock);
324 		r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
325 		mutex_unlock(&vcpu->kvm->lock);
326 		return r;
327 	} else
328 		return kvm_hv_set_msr(vcpu, msr, data, host);
329 }
330 
kvm_hv_get_msr_common(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata)331 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
332 {
333 	if (kvm_hv_msr_partition_wide(msr)) {
334 		int r;
335 
336 		mutex_lock(&vcpu->kvm->lock);
337 		r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
338 		mutex_unlock(&vcpu->kvm->lock);
339 		return r;
340 	} else
341 		return kvm_hv_get_msr(vcpu, msr, pdata);
342 }
343 
kvm_hv_hypercall_enabled(struct kvm * kvm)344 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
345 {
346 	return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
347 }
348 
kvm_hv_hypercall(struct kvm_vcpu * vcpu)349 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
350 {
351 	u64 param, ingpa, outgpa, ret;
352 	uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
353 	bool fast, longmode;
354 
355 	/*
356 	 * hypercall generates UD from non zero cpl and real mode
357 	 * per HYPER-V spec
358 	 */
359 	if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
360 		kvm_queue_exception(vcpu, UD_VECTOR);
361 		return 0;
362 	}
363 
364 	longmode = is_64_bit_mode(vcpu);
365 
366 	if (!longmode) {
367 		param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
368 			(kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
369 		ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
370 			(kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
371 		outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
372 			(kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
373 	}
374 #ifdef CONFIG_X86_64
375 	else {
376 		param = kvm_register_read(vcpu, VCPU_REGS_RCX);
377 		ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
378 		outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
379 	}
380 #endif
381 
382 	code = param & 0xffff;
383 	fast = (param >> 16) & 0x1;
384 	rep_cnt = (param >> 32) & 0xfff;
385 	rep_idx = (param >> 48) & 0xfff;
386 
387 	trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
388 
389 	switch (code) {
390 	case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
391 		kvm_vcpu_on_spin(vcpu);
392 		break;
393 	default:
394 		res = HV_STATUS_INVALID_HYPERCALL_CODE;
395 		break;
396 	}
397 
398 	ret = res | (((u64)rep_done & 0xfff) << 32);
399 	if (longmode) {
400 		kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
401 	} else {
402 		kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
403 		kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
404 	}
405 
406 	return 1;
407 }
408