• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22 
23 #include <linux/context_tracking.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <asm/timer.h>
38 #include <asm/cpu.h>
39 #include <asm/traps.h>
40 #include <asm/desc.h>
41 #include <asm/tlbflush.h>
42 #include <asm/idle.h>
43 #include <asm/apic.h>
44 #include <asm/apicdef.h>
45 #include <asm/hypervisor.h>
46 #include <asm/kvm_guest.h>
47 
48 static int kvmapf = 1;
49 
parse_no_kvmapf(char * arg)50 static int parse_no_kvmapf(char *arg)
51 {
52         kvmapf = 0;
53         return 0;
54 }
55 
56 early_param("no-kvmapf", parse_no_kvmapf);
57 
58 static int steal_acc = 1;
parse_no_stealacc(char * arg)59 static int parse_no_stealacc(char *arg)
60 {
61         steal_acc = 0;
62         return 0;
63 }
64 
65 early_param("no-steal-acc", parse_no_stealacc);
66 
67 static int kvmclock_vsyscall = 1;
parse_no_kvmclock_vsyscall(char * arg)68 static int parse_no_kvmclock_vsyscall(char *arg)
69 {
70         kvmclock_vsyscall = 0;
71         return 0;
72 }
73 
74 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
75 
76 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
77 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
78 static int has_steal_clock = 0;
79 
80 /*
81  * No need for any "IO delay" on KVM
82  */
kvm_io_delay(void)83 static void kvm_io_delay(void)
84 {
85 }
86 
87 #define KVM_TASK_SLEEP_HASHBITS 8
88 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
89 
90 struct kvm_task_sleep_node {
91 	struct hlist_node link;
92 	wait_queue_head_t wq;
93 	u32 token;
94 	int cpu;
95 	bool halted;
96 };
97 
98 static struct kvm_task_sleep_head {
99 	spinlock_t lock;
100 	struct hlist_head list;
101 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
102 
_find_apf_task(struct kvm_task_sleep_head * b,u32 token)103 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
104 						  u32 token)
105 {
106 	struct hlist_node *p;
107 
108 	hlist_for_each(p, &b->list) {
109 		struct kvm_task_sleep_node *n =
110 			hlist_entry(p, typeof(*n), link);
111 		if (n->token == token)
112 			return n;
113 	}
114 
115 	return NULL;
116 }
117 
kvm_async_pf_task_wait(u32 token)118 void kvm_async_pf_task_wait(u32 token)
119 {
120 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
121 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
122 	struct kvm_task_sleep_node n, *e;
123 	DEFINE_WAIT(wait);
124 
125 	rcu_irq_enter();
126 
127 	spin_lock(&b->lock);
128 	e = _find_apf_task(b, token);
129 	if (e) {
130 		/* dummy entry exist -> wake up was delivered ahead of PF */
131 		hlist_del(&e->link);
132 		kfree(e);
133 		spin_unlock(&b->lock);
134 
135 		rcu_irq_exit();
136 		return;
137 	}
138 
139 	n.token = token;
140 	n.cpu = smp_processor_id();
141 	n.halted = is_idle_task(current) || preempt_count() > 1;
142 	init_waitqueue_head(&n.wq);
143 	hlist_add_head(&n.link, &b->list);
144 	spin_unlock(&b->lock);
145 
146 	for (;;) {
147 		if (!n.halted)
148 			prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
149 		if (hlist_unhashed(&n.link))
150 			break;
151 
152 		if (!n.halted) {
153 			local_irq_enable();
154 			schedule();
155 			local_irq_disable();
156 		} else {
157 			/*
158 			 * We cannot reschedule. So halt.
159 			 */
160 			rcu_irq_exit();
161 			native_safe_halt();
162 			rcu_irq_enter();
163 			local_irq_disable();
164 		}
165 	}
166 	if (!n.halted)
167 		finish_wait(&n.wq, &wait);
168 
169 	rcu_irq_exit();
170 	return;
171 }
172 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
173 
apf_task_wake_one(struct kvm_task_sleep_node * n)174 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
175 {
176 	hlist_del_init(&n->link);
177 	if (n->halted)
178 		smp_send_reschedule(n->cpu);
179 	else if (waitqueue_active(&n->wq))
180 		wake_up(&n->wq);
181 }
182 
apf_task_wake_all(void)183 static void apf_task_wake_all(void)
184 {
185 	int i;
186 
187 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
188 		struct hlist_node *p, *next;
189 		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
190 		spin_lock(&b->lock);
191 		hlist_for_each_safe(p, next, &b->list) {
192 			struct kvm_task_sleep_node *n =
193 				hlist_entry(p, typeof(*n), link);
194 			if (n->cpu == smp_processor_id())
195 				apf_task_wake_one(n);
196 		}
197 		spin_unlock(&b->lock);
198 	}
199 }
200 
kvm_async_pf_task_wake(u32 token)201 void kvm_async_pf_task_wake(u32 token)
202 {
203 	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
204 	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
205 	struct kvm_task_sleep_node *n;
206 
207 	if (token == ~0) {
208 		apf_task_wake_all();
209 		return;
210 	}
211 
212 again:
213 	spin_lock(&b->lock);
214 	n = _find_apf_task(b, token);
215 	if (!n) {
216 		/*
217 		 * async PF was not yet handled.
218 		 * Add dummy entry for the token.
219 		 */
220 		n = kzalloc(sizeof(*n), GFP_ATOMIC);
221 		if (!n) {
222 			/*
223 			 * Allocation failed! Busy wait while other cpu
224 			 * handles async PF.
225 			 */
226 			spin_unlock(&b->lock);
227 			cpu_relax();
228 			goto again;
229 		}
230 		n->token = token;
231 		n->cpu = smp_processor_id();
232 		init_waitqueue_head(&n->wq);
233 		hlist_add_head(&n->link, &b->list);
234 	} else
235 		apf_task_wake_one(n);
236 	spin_unlock(&b->lock);
237 	return;
238 }
239 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
240 
kvm_read_and_reset_pf_reason(void)241 u32 kvm_read_and_reset_pf_reason(void)
242 {
243 	u32 reason = 0;
244 
245 	if (__get_cpu_var(apf_reason).enabled) {
246 		reason = __get_cpu_var(apf_reason).reason;
247 		__get_cpu_var(apf_reason).reason = 0;
248 	}
249 
250 	return reason;
251 }
252 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
253 
254 dotraplinkage void __kprobes
do_async_page_fault(struct pt_regs * regs,unsigned long error_code)255 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
256 {
257 	enum ctx_state prev_state;
258 
259 	switch (kvm_read_and_reset_pf_reason()) {
260 	default:
261 		do_page_fault(regs, error_code);
262 		break;
263 	case KVM_PV_REASON_PAGE_NOT_PRESENT:
264 		/* page is swapped out by the host. */
265 		prev_state = exception_enter();
266 		exit_idle();
267 		kvm_async_pf_task_wait((u32)read_cr2());
268 		exception_exit(prev_state);
269 		break;
270 	case KVM_PV_REASON_PAGE_READY:
271 		rcu_irq_enter();
272 		exit_idle();
273 		kvm_async_pf_task_wake((u32)read_cr2());
274 		rcu_irq_exit();
275 		break;
276 	}
277 }
278 
paravirt_ops_setup(void)279 static void __init paravirt_ops_setup(void)
280 {
281 	pv_info.name = "KVM";
282 	pv_info.paravirt_enabled = 1;
283 
284 	if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
285 		pv_cpu_ops.io_delay = kvm_io_delay;
286 
287 #ifdef CONFIG_X86_IO_APIC
288 	no_timer_check = 1;
289 #endif
290 }
291 
kvm_register_steal_time(void)292 static void kvm_register_steal_time(void)
293 {
294 	int cpu = smp_processor_id();
295 	struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
296 
297 	if (!has_steal_clock)
298 		return;
299 
300 	memset(st, 0, sizeof(*st));
301 
302 	wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
303 	pr_info("kvm-stealtime: cpu %d, msr %llx\n",
304 		cpu, (unsigned long long) slow_virt_to_phys(st));
305 }
306 
307 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
308 
kvm_guest_apic_eoi_write(u32 reg,u32 val)309 static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
310 {
311 	/**
312 	 * This relies on __test_and_clear_bit to modify the memory
313 	 * in a way that is atomic with respect to the local CPU.
314 	 * The hypervisor only accesses this memory from the local CPU so
315 	 * there's no need for lock or memory barriers.
316 	 * An optimization barrier is implied in apic write.
317 	 */
318 	if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
319 		return;
320 	apic_write(APIC_EOI, APIC_EOI_ACK);
321 }
322 
kvm_guest_cpu_init(void)323 void __cpuinit kvm_guest_cpu_init(void)
324 {
325 	if (!kvm_para_available())
326 		return;
327 
328 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
329 		u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason));
330 
331 #ifdef CONFIG_PREEMPT
332 		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
333 #endif
334 		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
335 		__get_cpu_var(apf_reason).enabled = 1;
336 		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
337 		       smp_processor_id());
338 	}
339 
340 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
341 		unsigned long pa;
342 		/* Size alignment is implied but just to make it explicit. */
343 		BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
344 		__get_cpu_var(kvm_apic_eoi) = 0;
345 		pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi))
346 			| KVM_MSR_ENABLED;
347 		wrmsrl(MSR_KVM_PV_EOI_EN, pa);
348 	}
349 
350 	if (has_steal_clock)
351 		kvm_register_steal_time();
352 }
353 
kvm_pv_disable_apf(void)354 static void kvm_pv_disable_apf(void)
355 {
356 	if (!__get_cpu_var(apf_reason).enabled)
357 		return;
358 
359 	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
360 	__get_cpu_var(apf_reason).enabled = 0;
361 
362 	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
363 	       smp_processor_id());
364 }
365 
kvm_pv_guest_cpu_reboot(void * unused)366 static void kvm_pv_guest_cpu_reboot(void *unused)
367 {
368 	/*
369 	 * We disable PV EOI before we load a new kernel by kexec,
370 	 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
371 	 * New kernel can re-enable when it boots.
372 	 */
373 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
374 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
375 	kvm_pv_disable_apf();
376 	kvm_disable_steal_time();
377 }
378 
kvm_pv_reboot_notify(struct notifier_block * nb,unsigned long code,void * unused)379 static int kvm_pv_reboot_notify(struct notifier_block *nb,
380 				unsigned long code, void *unused)
381 {
382 	if (code == SYS_RESTART)
383 		on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
384 	return NOTIFY_DONE;
385 }
386 
387 static struct notifier_block kvm_pv_reboot_nb = {
388 	.notifier_call = kvm_pv_reboot_notify,
389 };
390 
kvm_steal_clock(int cpu)391 static u64 kvm_steal_clock(int cpu)
392 {
393 	u64 steal;
394 	struct kvm_steal_time *src;
395 	int version;
396 
397 	src = &per_cpu(steal_time, cpu);
398 	do {
399 		version = src->version;
400 		rmb();
401 		steal = src->steal;
402 		rmb();
403 	} while ((version & 1) || (version != src->version));
404 
405 	return steal;
406 }
407 
kvm_disable_steal_time(void)408 void kvm_disable_steal_time(void)
409 {
410 	if (!has_steal_clock)
411 		return;
412 
413 	wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
414 }
415 
416 #ifdef CONFIG_SMP
kvm_smp_prepare_boot_cpu(void)417 static void __init kvm_smp_prepare_boot_cpu(void)
418 {
419 	WARN_ON(kvm_register_clock("primary cpu clock"));
420 	kvm_guest_cpu_init();
421 	native_smp_prepare_boot_cpu();
422 }
423 
kvm_guest_cpu_online(void * dummy)424 static void __cpuinit kvm_guest_cpu_online(void *dummy)
425 {
426 	kvm_guest_cpu_init();
427 }
428 
kvm_guest_cpu_offline(void * dummy)429 static void kvm_guest_cpu_offline(void *dummy)
430 {
431 	kvm_disable_steal_time();
432 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
433 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
434 	kvm_pv_disable_apf();
435 	apf_task_wake_all();
436 }
437 
kvm_cpu_notify(struct notifier_block * self,unsigned long action,void * hcpu)438 static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
439 				    unsigned long action, void *hcpu)
440 {
441 	int cpu = (unsigned long)hcpu;
442 	switch (action) {
443 	case CPU_ONLINE:
444 	case CPU_DOWN_FAILED:
445 	case CPU_ONLINE_FROZEN:
446 		smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
447 		break;
448 	case CPU_DOWN_PREPARE:
449 	case CPU_DOWN_PREPARE_FROZEN:
450 		smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
451 		break;
452 	default:
453 		break;
454 	}
455 	return NOTIFY_OK;
456 }
457 
458 static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
459         .notifier_call  = kvm_cpu_notify,
460 };
461 #endif
462 
kvm_apf_trap_init(void)463 static void __init kvm_apf_trap_init(void)
464 {
465 	set_intr_gate(14, &async_page_fault);
466 }
467 
kvm_guest_init(void)468 void __init kvm_guest_init(void)
469 {
470 	int i;
471 
472 	if (!kvm_para_available())
473 		return;
474 
475 	paravirt_ops_setup();
476 	register_reboot_notifier(&kvm_pv_reboot_nb);
477 	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
478 		spin_lock_init(&async_pf_sleepers[i].lock);
479 	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
480 		x86_init.irqs.trap_init = kvm_apf_trap_init;
481 
482 	if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
483 		has_steal_clock = 1;
484 		pv_time_ops.steal_clock = kvm_steal_clock;
485 	}
486 
487 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
488 		apic_set_eoi_write(kvm_guest_apic_eoi_write);
489 
490 	if (kvmclock_vsyscall)
491 		kvm_setup_vsyscall_timeinfo();
492 
493 #ifdef CONFIG_SMP
494 	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
495 	register_cpu_notifier(&kvm_cpu_notifier);
496 #else
497 	kvm_guest_cpu_init();
498 #endif
499 }
500 
kvm_detect(void)501 static bool __init kvm_detect(void)
502 {
503 	if (!kvm_para_available())
504 		return false;
505 	return true;
506 }
507 
508 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
509 	.name			= "KVM",
510 	.detect			= kvm_detect,
511 	.x2apic_available	= kvm_para_available,
512 };
513 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
514 
activate_jump_labels(void)515 static __init int activate_jump_labels(void)
516 {
517 	if (has_steal_clock) {
518 		static_key_slow_inc(&paravirt_steal_enabled);
519 		if (steal_acc)
520 			static_key_slow_inc(&paravirt_steal_rq_enabled);
521 	}
522 
523 	return 0;
524 }
525 arch_initcall(activate_jump_labels);
526