• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * KVM paravirt_ops implementation
4  *
5  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  * Copyright IBM Corporation, 2007
7  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
8  */
9 
10 #define pr_fmt(fmt) "kvm-guest: " fmt
11 
12 #include <linux/context_tracking.h>
13 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/kernel.h>
16 #include <linux/kvm_para.h>
17 #include <linux/cpu.h>
18 #include <linux/mm.h>
19 #include <linux/highmem.h>
20 #include <linux/hardirq.h>
21 #include <linux/notifier.h>
22 #include <linux/reboot.h>
23 #include <linux/hash.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/kprobes.h>
27 #include <linux/nmi.h>
28 #include <linux/swait.h>
29 #include <linux/syscore_ops.h>
30 #include <asm/timer.h>
31 #include <asm/cpu.h>
32 #include <asm/traps.h>
33 #include <asm/desc.h>
34 #include <asm/tlbflush.h>
35 #include <asm/apic.h>
36 #include <asm/apicdef.h>
37 #include <asm/hypervisor.h>
38 #include <asm/tlb.h>
39 #include <asm/cpuidle_haltpoll.h>
40 #include <asm/ptrace.h>
41 #include <asm/reboot.h>
42 #include <asm/svm.h>
43 
44 DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
45 
46 static int kvmapf = 1;
47 
parse_no_kvmapf(char * arg)48 static int __init parse_no_kvmapf(char *arg)
49 {
50         kvmapf = 0;
51         return 0;
52 }
53 
54 early_param("no-kvmapf", parse_no_kvmapf);
55 
56 static int steal_acc = 1;
parse_no_stealacc(char * arg)57 static int __init parse_no_stealacc(char *arg)
58 {
59         steal_acc = 0;
60         return 0;
61 }
62 
63 early_param("no-steal-acc", parse_no_stealacc);
64 
65 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
66 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
67 static int has_steal_clock = 0;
68 
69 /*
70  * No need for any "IO delay" on KVM
71  */
kvm_io_delay(void)72 static void kvm_io_delay(void)
73 {
74 }
75 
76 #define KVM_TASK_SLEEP_HASHBITS 8
77 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
78 
79 struct kvm_task_sleep_node {
80 	struct hlist_node link;
81 	struct swait_queue_head wq;
82 	u32 token;
83 	int cpu;
84 };
85 
86 static struct kvm_task_sleep_head {
87 	raw_spinlock_t lock;
88 	struct hlist_head list;
89 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
90 
_find_apf_task(struct kvm_task_sleep_head * b,u32 token)91 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
92 						  u32 token)
93 {
94 	struct hlist_node *p;
95 
96 	hlist_for_each(p, &b->list) {
97 		struct kvm_task_sleep_node *n =
98 			hlist_entry(p, typeof(*n), link);
99 		if (n->token == token)
100 			return n;
101 	}
102 
103 	return NULL;
104 }
105 
kvm_async_pf_queue_task(u32 token,struct kvm_task_sleep_node * n)106 static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
107 {
108 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
109 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
110 	struct kvm_task_sleep_node *e;
111 
112 	raw_spin_lock(&b->lock);
113 	e = _find_apf_task(b, token);
114 	if (e) {
115 		/* dummy entry exist -> wake up was delivered ahead of PF */
116 		hlist_del(&e->link);
117 		raw_spin_unlock(&b->lock);
118 		kfree(e);
119 		return false;
120 	}
121 
122 	n->token = token;
123 	n->cpu = smp_processor_id();
124 	init_swait_queue_head(&n->wq);
125 	hlist_add_head(&n->link, &b->list);
126 	raw_spin_unlock(&b->lock);
127 	return true;
128 }
129 
130 /*
131  * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
132  * @token:	Token to identify the sleep node entry
133  *
134  * Invoked from the async pagefault handling code or from the VM exit page
135  * fault handler. In both cases RCU is watching.
136  */
kvm_async_pf_task_wait_schedule(u32 token)137 void kvm_async_pf_task_wait_schedule(u32 token)
138 {
139 	struct kvm_task_sleep_node n;
140 	DECLARE_SWAITQUEUE(wait);
141 
142 	lockdep_assert_irqs_disabled();
143 
144 	if (!kvm_async_pf_queue_task(token, &n))
145 		return;
146 
147 	for (;;) {
148 		prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
149 		if (hlist_unhashed(&n.link))
150 			break;
151 
152 		local_irq_enable();
153 		schedule();
154 		local_irq_disable();
155 	}
156 	finish_swait(&n.wq, &wait);
157 }
158 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
159 
apf_task_wake_one(struct kvm_task_sleep_node * n)160 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
161 {
162 	hlist_del_init(&n->link);
163 	if (swq_has_sleeper(&n->wq))
164 		swake_up_one(&n->wq);
165 }
166 
apf_task_wake_all(void)167 static void apf_task_wake_all(void)
168 {
169 	int i;
170 
171 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
172 		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
173 		struct kvm_task_sleep_node *n;
174 		struct hlist_node *p, *next;
175 
176 		raw_spin_lock(&b->lock);
177 		hlist_for_each_safe(p, next, &b->list) {
178 			n = hlist_entry(p, typeof(*n), link);
179 			if (n->cpu == smp_processor_id())
180 				apf_task_wake_one(n);
181 		}
182 		raw_spin_unlock(&b->lock);
183 	}
184 }
185 
kvm_async_pf_task_wake(u32 token)186 void kvm_async_pf_task_wake(u32 token)
187 {
188 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
189 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
190 	struct kvm_task_sleep_node *n;
191 
192 	if (token == ~0) {
193 		apf_task_wake_all();
194 		return;
195 	}
196 
197 again:
198 	raw_spin_lock(&b->lock);
199 	n = _find_apf_task(b, token);
200 	if (!n) {
201 		/*
202 		 * async PF was not yet handled.
203 		 * Add dummy entry for the token.
204 		 */
205 		n = kzalloc(sizeof(*n), GFP_ATOMIC);
206 		if (!n) {
207 			/*
208 			 * Allocation failed! Busy wait while other cpu
209 			 * handles async PF.
210 			 */
211 			raw_spin_unlock(&b->lock);
212 			cpu_relax();
213 			goto again;
214 		}
215 		n->token = token;
216 		n->cpu = smp_processor_id();
217 		init_swait_queue_head(&n->wq);
218 		hlist_add_head(&n->link, &b->list);
219 	} else {
220 		apf_task_wake_one(n);
221 	}
222 	raw_spin_unlock(&b->lock);
223 	return;
224 }
225 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
226 
kvm_read_and_reset_apf_flags(void)227 noinstr u32 kvm_read_and_reset_apf_flags(void)
228 {
229 	u32 flags = 0;
230 
231 	if (__this_cpu_read(apf_reason.enabled)) {
232 		flags = __this_cpu_read(apf_reason.flags);
233 		__this_cpu_write(apf_reason.flags, 0);
234 	}
235 
236 	return flags;
237 }
238 EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
239 
__kvm_handle_async_pf(struct pt_regs * regs,u32 token)240 noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
241 {
242 	u32 flags = kvm_read_and_reset_apf_flags();
243 	irqentry_state_t state;
244 
245 	if (!flags)
246 		return false;
247 
248 	state = irqentry_enter(regs);
249 	instrumentation_begin();
250 
251 	/*
252 	 * If the host managed to inject an async #PF into an interrupt
253 	 * disabled region, then die hard as this is not going to end well
254 	 * and the host side is seriously broken.
255 	 */
256 	if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
257 		panic("Host injected async #PF in interrupt disabled region\n");
258 
259 	if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
260 		if (unlikely(!(user_mode(regs))))
261 			panic("Host injected async #PF in kernel mode\n");
262 		/* Page is swapped out by the host. */
263 		kvm_async_pf_task_wait_schedule(token);
264 	} else {
265 		WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
266 	}
267 
268 	instrumentation_end();
269 	irqentry_exit(regs, state);
270 	return true;
271 }
272 
DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)273 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
274 {
275 	struct pt_regs *old_regs = set_irq_regs(regs);
276 	u32 token;
277 
278 	ack_APIC_irq();
279 
280 	inc_irq_stat(irq_hv_callback_count);
281 
282 	if (__this_cpu_read(apf_reason.enabled)) {
283 		token = __this_cpu_read(apf_reason.token);
284 		kvm_async_pf_task_wake(token);
285 		__this_cpu_write(apf_reason.token, 0);
286 		wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
287 	}
288 
289 	set_irq_regs(old_regs);
290 }
291 
paravirt_ops_setup(void)292 static void __init paravirt_ops_setup(void)
293 {
294 	pv_info.name = "KVM";
295 
296 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
297 		pv_ops.cpu.io_delay = kvm_io_delay;
298 
299 #ifdef CONFIG_X86_IO_APIC
300 	no_timer_check = 1;
301 #endif
302 }
303 
kvm_register_steal_time(void)304 static void kvm_register_steal_time(void)
305 {
306 	int cpu = smp_processor_id();
307 	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
308 
309 	if (!has_steal_clock)
310 		return;
311 
312 	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
313 	pr_info("stealtime: cpu %d, msr %llx\n", cpu,
314 		(unsigned long long) slow_virt_to_phys(st));
315 }
316 
317 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
318 
kvm_guest_apic_eoi_write(u32 reg,u32 val)319 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
320 {
321 	/**
322 	 * This relies on __test_and_clear_bit to modify the memory
323 	 * in a way that is atomic with respect to the local CPU.
324 	 * The hypervisor only accesses this memory from the local CPU so
325 	 * there's no need for lock or memory barriers.
326 	 * An optimization barrier is implied in apic write.
327 	 */
328 	if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
329 		return;
330 	apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
331 }
332 
kvm_guest_cpu_init(void)333 static void kvm_guest_cpu_init(void)
334 {
335 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
336 		u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
337 
338 		WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
339 
340 		pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
341 		pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
342 
343 		if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
344 			pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
345 
346 		wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
347 
348 		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
349 		__this_cpu_write(apf_reason.enabled, 1);
350 		pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
351 	}
352 
353 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
354 		unsigned long pa;
355 
356 		/* Size alignment is implied but just to make it explicit. */
357 		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
358 		__this_cpu_write(kvm_apic_eoi, 0);
359 		pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
360 			| KVM_MSR_ENABLED;
361 		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
362 	}
363 
364 	if (has_steal_clock)
365 		kvm_register_steal_time();
366 }
367 
kvm_pv_disable_apf(void)368 static void kvm_pv_disable_apf(void)
369 {
370 	if (!__this_cpu_read(apf_reason.enabled))
371 		return;
372 
373 	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
374 	__this_cpu_write(apf_reason.enabled, 0);
375 
376 	pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
377 }
378 
kvm_disable_steal_time(void)379 static void kvm_disable_steal_time(void)
380 {
381 	if (!has_steal_clock)
382 		return;
383 
384 	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
385 }
386 
kvm_pv_guest_cpu_reboot(void * unused)387 static void kvm_pv_guest_cpu_reboot(void *unused)
388 {
389 	/*
390 	 * We disable PV EOI before we load a new kernel by kexec,
391 	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
392 	 * New kernel can re-enable when it boots.
393 	 */
394 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
395 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
396 	kvm_pv_disable_apf();
397 	kvm_disable_steal_time();
398 }
399 
kvm_pv_reboot_notify(struct notifier_block * nb,unsigned long code,void * unused)400 static int kvm_pv_reboot_notify(struct notifier_block *nb,
401 				unsigned long code, void *unused)
402 {
403 	if (code == SYS_RESTART)
404 		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
405 	return NOTIFY_DONE;
406 }
407 
408 static struct notifier_block kvm_pv_reboot_nb = {
409 	.notifier_call = kvm_pv_reboot_notify,
410 };
411 
kvm_steal_clock(int cpu)412 static u64 kvm_steal_clock(int cpu)
413 {
414 	u64 steal;
415 	struct kvm_steal_time *src;
416 	int version;
417 
418 	src = &per_cpu(steal_time, cpu);
419 	do {
420 		version = src->version;
421 		virt_rmb();
422 		steal = src->steal;
423 		virt_rmb();
424 	} while ((version & 1) || (version != src->version));
425 
426 	return steal;
427 }
428 
__set_percpu_decrypted(void * ptr,unsigned long size)429 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
430 {
431 	early_set_memory_decrypted((unsigned long) ptr, size);
432 }
433 
434 /*
435  * Iterate through all possible CPUs and map the memory region pointed
436  * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
437  *
438  * Note: we iterate through all possible CPUs to ensure that CPUs
439  * hotplugged will have their per-cpu variable already mapped as
440  * decrypted.
441  */
sev_map_percpu_data(void)442 static void __init sev_map_percpu_data(void)
443 {
444 	int cpu;
445 
446 	if (!sev_active())
447 		return;
448 
449 	for_each_possible_cpu(cpu) {
450 		__set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
451 		__set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
452 		__set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
453 	}
454 }
455 
pv_tlb_flush_supported(void)456 static bool pv_tlb_flush_supported(void)
457 {
458 	return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
459 		!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
460 		kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
461 }
462 
463 static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
464 
kvm_guest_cpu_offline(bool shutdown)465 static void kvm_guest_cpu_offline(bool shutdown)
466 {
467 	kvm_disable_steal_time();
468 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
469 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
470 	kvm_pv_disable_apf();
471 	if (!shutdown)
472 		apf_task_wake_all();
473 	kvmclock_disable();
474 }
475 
kvm_cpu_online(unsigned int cpu)476 static int kvm_cpu_online(unsigned int cpu)
477 {
478 	unsigned long flags;
479 
480 	local_irq_save(flags);
481 	kvm_guest_cpu_init();
482 	local_irq_restore(flags);
483 	return 0;
484 }
485 
486 #ifdef CONFIG_SMP
487 
pv_ipi_supported(void)488 static bool pv_ipi_supported(void)
489 {
490 	return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
491 }
492 
pv_sched_yield_supported(void)493 static bool pv_sched_yield_supported(void)
494 {
495 	return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
496 		!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
497 	    kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
498 }
499 
500 #define KVM_IPI_CLUSTER_SIZE	(2 * BITS_PER_LONG)
501 
__send_ipi_mask(const struct cpumask * mask,int vector)502 static void __send_ipi_mask(const struct cpumask *mask, int vector)
503 {
504 	unsigned long flags;
505 	int cpu, apic_id, icr;
506 	int min = 0, max = 0;
507 #ifdef CONFIG_X86_64
508 	__uint128_t ipi_bitmap = 0;
509 #else
510 	u64 ipi_bitmap = 0;
511 #endif
512 	long ret;
513 
514 	if (cpumask_empty(mask))
515 		return;
516 
517 	local_irq_save(flags);
518 
519 	switch (vector) {
520 	default:
521 		icr = APIC_DM_FIXED | vector;
522 		break;
523 	case NMI_VECTOR:
524 		icr = APIC_DM_NMI;
525 		break;
526 	}
527 
528 	for_each_cpu(cpu, mask) {
529 		apic_id = per_cpu(x86_cpu_to_apicid, cpu);
530 		if (!ipi_bitmap) {
531 			min = max = apic_id;
532 		} else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
533 			ipi_bitmap <<= min - apic_id;
534 			min = apic_id;
535 		} else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) {
536 			max = apic_id < max ? max : apic_id;
537 		} else {
538 			ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
539 				(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
540 			WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
541 				  ret);
542 			min = max = apic_id;
543 			ipi_bitmap = 0;
544 		}
545 		__set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
546 	}
547 
548 	if (ipi_bitmap) {
549 		ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
550 			(unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
551 		WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
552 			  ret);
553 	}
554 
555 	local_irq_restore(flags);
556 }
557 
kvm_send_ipi_mask(const struct cpumask * mask,int vector)558 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
559 {
560 	__send_ipi_mask(mask, vector);
561 }
562 
kvm_send_ipi_mask_allbutself(const struct cpumask * mask,int vector)563 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
564 {
565 	unsigned int this_cpu = smp_processor_id();
566 	struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
567 	const struct cpumask *local_mask;
568 
569 	cpumask_copy(new_mask, mask);
570 	cpumask_clear_cpu(this_cpu, new_mask);
571 	local_mask = new_mask;
572 	__send_ipi_mask(local_mask, vector);
573 }
574 
575 /*
576  * Set the IPI entry points
577  */
kvm_setup_pv_ipi(void)578 static void kvm_setup_pv_ipi(void)
579 {
580 	apic->send_IPI_mask = kvm_send_ipi_mask;
581 	apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
582 	pr_info("setup PV IPIs\n");
583 }
584 
kvm_smp_send_call_func_ipi(const struct cpumask * mask)585 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
586 {
587 	int cpu;
588 
589 	native_send_call_func_ipi(mask);
590 
591 	/* Make sure other vCPUs get a chance to run if they need to. */
592 	for_each_cpu(cpu, mask) {
593 		if (vcpu_is_preempted(cpu)) {
594 			kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
595 			break;
596 		}
597 	}
598 }
599 
kvm_smp_prepare_boot_cpu(void)600 static void __init kvm_smp_prepare_boot_cpu(void)
601 {
602 	/*
603 	 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
604 	 * shares the guest physical address with the hypervisor.
605 	 */
606 	sev_map_percpu_data();
607 
608 	kvm_guest_cpu_init();
609 	native_smp_prepare_boot_cpu();
610 	kvm_spinlock_init();
611 }
612 
kvm_cpu_down_prepare(unsigned int cpu)613 static int kvm_cpu_down_prepare(unsigned int cpu)
614 {
615 	unsigned long flags;
616 
617 	local_irq_save(flags);
618 	kvm_guest_cpu_offline(false);
619 	local_irq_restore(flags);
620 	return 0;
621 }
622 
623 #endif
624 
kvm_suspend(void)625 static int kvm_suspend(void)
626 {
627 	kvm_guest_cpu_offline(false);
628 
629 	return 0;
630 }
631 
kvm_resume(void)632 static void kvm_resume(void)
633 {
634 	kvm_cpu_online(raw_smp_processor_id());
635 }
636 
637 static struct syscore_ops kvm_syscore_ops = {
638 	.suspend	= kvm_suspend,
639 	.resume		= kvm_resume,
640 };
641 
642 /*
643  * After a PV feature is registered, the host will keep writing to the
644  * registered memory location. If the guest happens to shutdown, this memory
645  * won't be valid. In cases like kexec, in which you install a new kernel, this
646  * means a random memory location will be kept being written.
647  */
648 #ifdef CONFIG_KEXEC_CORE
kvm_crash_shutdown(struct pt_regs * regs)649 static void kvm_crash_shutdown(struct pt_regs *regs)
650 {
651 	kvm_guest_cpu_offline(true);
652 	native_machine_crash_shutdown(regs);
653 }
654 #endif
655 
kvm_flush_tlb_others(const struct cpumask * cpumask,const struct flush_tlb_info * info)656 static void kvm_flush_tlb_others(const struct cpumask *cpumask,
657 			const struct flush_tlb_info *info)
658 {
659 	u8 state;
660 	int cpu;
661 	struct kvm_steal_time *src;
662 	struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
663 
664 	cpumask_copy(flushmask, cpumask);
665 	/*
666 	 * We have to call flush only on online vCPUs. And
667 	 * queue flush_on_enter for pre-empted vCPUs
668 	 */
669 	for_each_cpu(cpu, flushmask) {
670 		src = &per_cpu(steal_time, cpu);
671 		state = READ_ONCE(src->preempted);
672 		if ((state & KVM_VCPU_PREEMPTED)) {
673 			if (try_cmpxchg(&src->preempted, &state,
674 					state | KVM_VCPU_FLUSH_TLB))
675 				__cpumask_clear_cpu(cpu, flushmask);
676 		}
677 	}
678 
679 	native_flush_tlb_others(flushmask, info);
680 }
681 
kvm_guest_init(void)682 static void __init kvm_guest_init(void)
683 {
684 	int i;
685 
686 	paravirt_ops_setup();
687 	register_reboot_notifier(&kvm_pv_reboot_nb);
688 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
689 		raw_spin_lock_init(&async_pf_sleepers[i].lock);
690 
691 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
692 		has_steal_clock = 1;
693 		pv_ops.time.steal_clock = kvm_steal_clock;
694 	}
695 
696 	if (pv_tlb_flush_supported()) {
697 		pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
698 		pv_ops.mmu.tlb_remove_table = tlb_remove_table;
699 		pr_info("KVM setup pv remote TLB flush\n");
700 	}
701 
702 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
703 		apic_set_eoi_write(kvm_guest_apic_eoi_write);
704 
705 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
706 		static_branch_enable(&kvm_async_pf_enabled);
707 		alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
708 	}
709 
710 #ifdef CONFIG_SMP
711 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
712 	if (pv_sched_yield_supported()) {
713 		smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
714 		pr_info("setup PV sched yield\n");
715 	}
716 	if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
717 				      kvm_cpu_online, kvm_cpu_down_prepare) < 0)
718 		pr_err("failed to install cpu hotplug callbacks\n");
719 #else
720 	sev_map_percpu_data();
721 	kvm_guest_cpu_init();
722 #endif
723 
724 #ifdef CONFIG_KEXEC_CORE
725 	machine_ops.crash_shutdown = kvm_crash_shutdown;
726 #endif
727 
728 	register_syscore_ops(&kvm_syscore_ops);
729 
730 	/*
731 	 * Hard lockup detection is enabled by default. Disable it, as guests
732 	 * can get false positives too easily, for example if the host is
733 	 * overcommitted.
734 	 */
735 	hardlockup_detector_disable();
736 }
737 
__kvm_cpuid_base(void)738 static noinline uint32_t __kvm_cpuid_base(void)
739 {
740 	if (boot_cpu_data.cpuid_level < 0)
741 		return 0;	/* So we don't blow up on old processors */
742 
743 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
744 		return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
745 
746 	return 0;
747 }
748 
kvm_cpuid_base(void)749 static inline uint32_t kvm_cpuid_base(void)
750 {
751 	static int kvm_cpuid_base = -1;
752 
753 	if (kvm_cpuid_base == -1)
754 		kvm_cpuid_base = __kvm_cpuid_base();
755 
756 	return kvm_cpuid_base;
757 }
758 
kvm_para_available(void)759 bool kvm_para_available(void)
760 {
761 	return kvm_cpuid_base() != 0;
762 }
763 EXPORT_SYMBOL_GPL(kvm_para_available);
764 
kvm_arch_para_features(void)765 unsigned int kvm_arch_para_features(void)
766 {
767 	return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
768 }
769 
kvm_arch_para_hints(void)770 unsigned int kvm_arch_para_hints(void)
771 {
772 	return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
773 }
774 EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
775 
kvm_detect(void)776 static uint32_t __init kvm_detect(void)
777 {
778 	return kvm_cpuid_base();
779 }
780 
kvm_apic_init(void)781 static void __init kvm_apic_init(void)
782 {
783 #if defined(CONFIG_SMP)
784 	if (pv_ipi_supported())
785 		kvm_setup_pv_ipi();
786 #endif
787 }
788 
kvm_init_platform(void)789 static void __init kvm_init_platform(void)
790 {
791 	kvmclock_init();
792 	x86_platform.apic_post_init = kvm_apic_init;
793 }
794 
795 #if defined(CONFIG_AMD_MEM_ENCRYPT)
kvm_sev_es_hcall_prepare(struct ghcb * ghcb,struct pt_regs * regs)796 static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
797 {
798 	/* RAX and CPL are already in the GHCB */
799 	ghcb_set_rbx(ghcb, regs->bx);
800 	ghcb_set_rcx(ghcb, regs->cx);
801 	ghcb_set_rdx(ghcb, regs->dx);
802 	ghcb_set_rsi(ghcb, regs->si);
803 }
804 
kvm_sev_es_hcall_finish(struct ghcb * ghcb,struct pt_regs * regs)805 static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
806 {
807 	/* No checking of the return state needed */
808 	return true;
809 }
810 #endif
811 
812 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
813 	.name				= "KVM",
814 	.detect				= kvm_detect,
815 	.type				= X86_HYPER_KVM,
816 	.init.guest_late_init		= kvm_guest_init,
817 	.init.x2apic_available		= kvm_para_available,
818 	.init.init_platform		= kvm_init_platform,
819 #if defined(CONFIG_AMD_MEM_ENCRYPT)
820 	.runtime.sev_es_hcall_prepare	= kvm_sev_es_hcall_prepare,
821 	.runtime.sev_es_hcall_finish	= kvm_sev_es_hcall_finish,
822 #endif
823 };
824 
activate_jump_labels(void)825 static __init int activate_jump_labels(void)
826 {
827 	if (has_steal_clock) {
828 		static_key_slow_inc(&paravirt_steal_enabled);
829 		if (steal_acc)
830 			static_key_slow_inc(&paravirt_steal_rq_enabled);
831 	}
832 
833 	return 0;
834 }
835 arch_initcall(activate_jump_labels);
836 
kvm_alloc_cpumask(void)837 static __init int kvm_alloc_cpumask(void)
838 {
839 	int cpu;
840 	bool alloc = false;
841 
842 	if (!kvm_para_available() || nopv)
843 		return 0;
844 
845 	if (pv_tlb_flush_supported())
846 		alloc = true;
847 
848 #if defined(CONFIG_SMP)
849 	if (pv_ipi_supported())
850 		alloc = true;
851 #endif
852 
853 	if (alloc)
854 		for_each_possible_cpu(cpu) {
855 			zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
856 				GFP_KERNEL, cpu_to_node(cpu));
857 		}
858 
859 	return 0;
860 }
861 arch_initcall(kvm_alloc_cpumask);
862 
863 #ifdef CONFIG_PARAVIRT_SPINLOCKS
864 
865 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
kvm_kick_cpu(int cpu)866 static void kvm_kick_cpu(int cpu)
867 {
868 	int apicid;
869 	unsigned long flags = 0;
870 
871 	apicid = per_cpu(x86_cpu_to_apicid, cpu);
872 	kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
873 }
874 
875 #include <asm/qspinlock.h>
876 
kvm_wait(u8 * ptr,u8 val)877 static void kvm_wait(u8 *ptr, u8 val)
878 {
879 	unsigned long flags;
880 
881 	if (in_nmi())
882 		return;
883 
884 	local_irq_save(flags);
885 
886 	if (READ_ONCE(*ptr) != val)
887 		goto out;
888 
889 	/*
890 	 * halt until it's our turn and kicked. Note that we do safe halt
891 	 * for irq enabled case to avoid hang when lock info is overwritten
892 	 * in irq spinlock slowpath and no spurious interrupt occur to save us.
893 	 */
894 	if (arch_irqs_disabled_flags(flags))
895 		halt();
896 	else
897 		safe_halt();
898 
899 out:
900 	local_irq_restore(flags);
901 }
902 
903 #ifdef CONFIG_X86_32
__kvm_vcpu_is_preempted(long cpu)904 __visible bool __kvm_vcpu_is_preempted(long cpu)
905 {
906 	struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
907 
908 	return !!(src->preempted & KVM_VCPU_PREEMPTED);
909 }
910 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
911 
912 #else
913 
914 #include <asm/asm-offsets.h>
915 
916 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
917 
918 /*
919  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
920  * restoring to/from the stack.
921  */
922 asm(
923 ".pushsection .text;"
924 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
925 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
926 "__raw_callee_save___kvm_vcpu_is_preempted:"
927 "movq	__per_cpu_offset(,%rdi,8), %rax;"
928 "cmpb	$0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
929 "setne	%al;"
930 ASM_RET
931 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
932 ".popsection");
933 
934 #endif
935 
936 /*
937  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
938  */
kvm_spinlock_init(void)939 void __init kvm_spinlock_init(void)
940 {
941 	/*
942 	 * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
943 	 * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
944 	 * preferred over native qspinlock when vCPU is preempted.
945 	 */
946 	if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
947 		pr_info("PV spinlocks disabled, no host support\n");
948 		return;
949 	}
950 
951 	/*
952 	 * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
953 	 * are available.
954 	 */
955 	if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
956 		pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
957 		goto out;
958 	}
959 
960 	if (num_possible_cpus() == 1) {
961 		pr_info("PV spinlocks disabled, single CPU\n");
962 		goto out;
963 	}
964 
965 	if (nopvspin) {
966 		pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
967 		goto out;
968 	}
969 
970 	pr_info("PV spinlocks enabled\n");
971 
972 	__pv_init_lock_hash();
973 	pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
974 	pv_ops.lock.queued_spin_unlock =
975 		PV_CALLEE_SAVE(__pv_queued_spin_unlock);
976 	pv_ops.lock.wait = kvm_wait;
977 	pv_ops.lock.kick = kvm_kick_cpu;
978 
979 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
980 		pv_ops.lock.vcpu_is_preempted =
981 			PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
982 	}
983 	/*
984 	 * When PV spinlock is enabled which is preferred over
985 	 * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
986 	 * Just disable it anyway.
987 	 */
988 out:
989 	static_branch_disable(&virt_spin_lock_key);
990 }
991 
992 #endif	/* CONFIG_PARAVIRT_SPINLOCKS */
993 
994 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
995 
kvm_disable_host_haltpoll(void * i)996 static void kvm_disable_host_haltpoll(void *i)
997 {
998 	wrmsrl(MSR_KVM_POLL_CONTROL, 0);
999 }
1000 
kvm_enable_host_haltpoll(void * i)1001 static void kvm_enable_host_haltpoll(void *i)
1002 {
1003 	wrmsrl(MSR_KVM_POLL_CONTROL, 1);
1004 }
1005 
arch_haltpoll_enable(unsigned int cpu)1006 void arch_haltpoll_enable(unsigned int cpu)
1007 {
1008 	if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
1009 		pr_err_once("host does not support poll control\n");
1010 		pr_err_once("host upgrade recommended\n");
1011 		return;
1012 	}
1013 
1014 	/* Enable guest halt poll disables host halt poll */
1015 	smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
1016 }
1017 EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
1018 
arch_haltpoll_disable(unsigned int cpu)1019 void arch_haltpoll_disable(unsigned int cpu)
1020 {
1021 	if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
1022 		return;
1023 
1024 	/* Disable guest halt poll enables host halt poll */
1025 	smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
1026 }
1027 EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
1028 #endif
1029