1 /*
2 * KVM paravirt_ops implementation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
21 */
22
23 #include <linux/context_tracking.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <asm/timer.h>
40 #include <asm/cpu.h>
41 #include <asm/traps.h>
42 #include <asm/desc.h>
43 #include <asm/tlbflush.h>
44 #include <asm/idle.h>
45 #include <asm/apic.h>
46 #include <asm/apicdef.h>
47 #include <asm/hypervisor.h>
48 #include <asm/kvm_guest.h>
49
50 static int kvmapf = 1;
51
parse_no_kvmapf(char * arg)52 static int parse_no_kvmapf(char *arg)
53 {
54 kvmapf = 0;
55 return 0;
56 }
57
58 early_param("no-kvmapf", parse_no_kvmapf);
59
60 static int steal_acc = 1;
parse_no_stealacc(char * arg)61 static int parse_no_stealacc(char *arg)
62 {
63 steal_acc = 0;
64 return 0;
65 }
66
67 early_param("no-steal-acc", parse_no_stealacc);
68
69 static int kvmclock_vsyscall = 1;
parse_no_kvmclock_vsyscall(char * arg)70 static int parse_no_kvmclock_vsyscall(char *arg)
71 {
72 kvmclock_vsyscall = 0;
73 return 0;
74 }
75
76 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
77
78 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
79 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
80 static int has_steal_clock = 0;
81
82 /*
83 * No need for any "IO delay" on KVM
84 */
kvm_io_delay(void)85 static void kvm_io_delay(void)
86 {
87 }
88
89 #define KVM_TASK_SLEEP_HASHBITS 8
90 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
91
92 struct kvm_task_sleep_node {
93 struct hlist_node link;
94 wait_queue_head_t wq;
95 u32 token;
96 int cpu;
97 bool halted;
98 };
99
100 static struct kvm_task_sleep_head {
101 spinlock_t lock;
102 struct hlist_head list;
103 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
104
_find_apf_task(struct kvm_task_sleep_head * b,u32 token)105 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
106 u32 token)
107 {
108 struct hlist_node *p;
109
110 hlist_for_each(p, &b->list) {
111 struct kvm_task_sleep_node *n =
112 hlist_entry(p, typeof(*n), link);
113 if (n->token == token)
114 return n;
115 }
116
117 return NULL;
118 }
119
kvm_async_pf_task_wait(u32 token)120 void kvm_async_pf_task_wait(u32 token)
121 {
122 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
123 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
124 struct kvm_task_sleep_node n, *e;
125 DEFINE_WAIT(wait);
126
127 rcu_irq_enter();
128
129 spin_lock(&b->lock);
130 e = _find_apf_task(b, token);
131 if (e) {
132 /* dummy entry exist -> wake up was delivered ahead of PF */
133 hlist_del(&e->link);
134 kfree(e);
135 spin_unlock(&b->lock);
136
137 rcu_irq_exit();
138 return;
139 }
140
141 n.token = token;
142 n.cpu = smp_processor_id();
143 n.halted = is_idle_task(current) || preempt_count() > 1;
144 init_waitqueue_head(&n.wq);
145 hlist_add_head(&n.link, &b->list);
146 spin_unlock(&b->lock);
147
148 for (;;) {
149 if (!n.halted)
150 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
151 if (hlist_unhashed(&n.link))
152 break;
153
154 rcu_irq_exit();
155
156 if (!n.halted) {
157 local_irq_enable();
158 schedule();
159 local_irq_disable();
160 } else {
161 /*
162 * We cannot reschedule. So halt.
163 */
164 native_safe_halt();
165 local_irq_disable();
166 }
167
168 rcu_irq_enter();
169 }
170 if (!n.halted)
171 finish_wait(&n.wq, &wait);
172
173 rcu_irq_exit();
174 return;
175 }
176 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
177
apf_task_wake_one(struct kvm_task_sleep_node * n)178 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
179 {
180 hlist_del_init(&n->link);
181 if (n->halted)
182 smp_send_reschedule(n->cpu);
183 else if (waitqueue_active(&n->wq))
184 wake_up(&n->wq);
185 }
186
apf_task_wake_all(void)187 static void apf_task_wake_all(void)
188 {
189 int i;
190
191 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
192 struct hlist_node *p, *next;
193 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
194 spin_lock(&b->lock);
195 hlist_for_each_safe(p, next, &b->list) {
196 struct kvm_task_sleep_node *n =
197 hlist_entry(p, typeof(*n), link);
198 if (n->cpu == smp_processor_id())
199 apf_task_wake_one(n);
200 }
201 spin_unlock(&b->lock);
202 }
203 }
204
kvm_async_pf_task_wake(u32 token)205 void kvm_async_pf_task_wake(u32 token)
206 {
207 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
208 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
209 struct kvm_task_sleep_node *n;
210
211 if (token == ~0) {
212 apf_task_wake_all();
213 return;
214 }
215
216 again:
217 spin_lock(&b->lock);
218 n = _find_apf_task(b, token);
219 if (!n) {
220 /*
221 * async PF was not yet handled.
222 * Add dummy entry for the token.
223 */
224 n = kzalloc(sizeof(*n), GFP_ATOMIC);
225 if (!n) {
226 /*
227 * Allocation failed! Busy wait while other cpu
228 * handles async PF.
229 */
230 spin_unlock(&b->lock);
231 cpu_relax();
232 goto again;
233 }
234 n->token = token;
235 n->cpu = smp_processor_id();
236 init_waitqueue_head(&n->wq);
237 hlist_add_head(&n->link, &b->list);
238 } else
239 apf_task_wake_one(n);
240 spin_unlock(&b->lock);
241 return;
242 }
243 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
244
kvm_read_and_reset_pf_reason(void)245 u32 kvm_read_and_reset_pf_reason(void)
246 {
247 u32 reason = 0;
248
249 if (__this_cpu_read(apf_reason.enabled)) {
250 reason = __this_cpu_read(apf_reason.reason);
251 __this_cpu_write(apf_reason.reason, 0);
252 }
253
254 return reason;
255 }
256 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
257 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
258
259 dotraplinkage void
do_async_page_fault(struct pt_regs * regs,unsigned long error_code)260 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
261 {
262 enum ctx_state prev_state;
263
264 switch (kvm_read_and_reset_pf_reason()) {
265 default:
266 trace_do_page_fault(regs, error_code);
267 break;
268 case KVM_PV_REASON_PAGE_NOT_PRESENT:
269 /* page is swapped out by the host. */
270 prev_state = exception_enter();
271 exit_idle();
272 kvm_async_pf_task_wait((u32)read_cr2());
273 exception_exit(prev_state);
274 break;
275 case KVM_PV_REASON_PAGE_READY:
276 rcu_irq_enter();
277 exit_idle();
278 kvm_async_pf_task_wake((u32)read_cr2());
279 rcu_irq_exit();
280 break;
281 }
282 }
283 NOKPROBE_SYMBOL(do_async_page_fault);
284
paravirt_ops_setup(void)285 static void __init paravirt_ops_setup(void)
286 {
287 pv_info.name = "KVM";
288
289 /*
290 * KVM isn't paravirt in the sense of paravirt_enabled. A KVM
291 * guest kernel works like a bare metal kernel with additional
292 * features, and paravirt_enabled is about features that are
293 * missing.
294 */
295 pv_info.paravirt_enabled = 0;
296
297 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
298 pv_cpu_ops.io_delay = kvm_io_delay;
299
300 #ifdef CONFIG_X86_IO_APIC
301 no_timer_check = 1;
302 #endif
303 }
304
kvm_register_steal_time(void)305 static void kvm_register_steal_time(void)
306 {
307 int cpu = smp_processor_id();
308 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
309
310 if (!has_steal_clock)
311 return;
312
313 memset(st, 0, sizeof(*st));
314
315 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
316 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
317 cpu, (unsigned long long) slow_virt_to_phys(st));
318 }
319
320 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
321
kvm_guest_apic_eoi_write(u32 reg,u32 val)322 static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
323 {
324 /**
325 * This relies on __test_and_clear_bit to modify the memory
326 * in a way that is atomic with respect to the local CPU.
327 * The hypervisor only accesses this memory from the local CPU so
328 * there's no need for lock or memory barriers.
329 * An optimization barrier is implied in apic write.
330 */
331 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
332 return;
333 apic_write(APIC_EOI, APIC_EOI_ACK);
334 }
335
kvm_guest_cpu_init(void)336 void kvm_guest_cpu_init(void)
337 {
338 if (!kvm_para_available())
339 return;
340
341 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
342 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
343
344 #ifdef CONFIG_PREEMPT
345 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
346 #endif
347 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
348 __this_cpu_write(apf_reason.enabled, 1);
349 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
350 smp_processor_id());
351 }
352
353 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
354 unsigned long pa;
355 /* Size alignment is implied but just to make it explicit. */
356 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
357 __this_cpu_write(kvm_apic_eoi, 0);
358 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
359 | KVM_MSR_ENABLED;
360 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
361 }
362
363 if (has_steal_clock)
364 kvm_register_steal_time();
365 }
366
kvm_pv_disable_apf(void)367 static void kvm_pv_disable_apf(void)
368 {
369 if (!__this_cpu_read(apf_reason.enabled))
370 return;
371
372 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
373 __this_cpu_write(apf_reason.enabled, 0);
374
375 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
376 smp_processor_id());
377 }
378
kvm_pv_guest_cpu_reboot(void * unused)379 static void kvm_pv_guest_cpu_reboot(void *unused)
380 {
381 /*
382 * We disable PV EOI before we load a new kernel by kexec,
383 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
384 * New kernel can re-enable when it boots.
385 */
386 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
387 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
388 kvm_pv_disable_apf();
389 kvm_disable_steal_time();
390 }
391
kvm_pv_reboot_notify(struct notifier_block * nb,unsigned long code,void * unused)392 static int kvm_pv_reboot_notify(struct notifier_block *nb,
393 unsigned long code, void *unused)
394 {
395 if (code == SYS_RESTART)
396 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
397 return NOTIFY_DONE;
398 }
399
400 static struct notifier_block kvm_pv_reboot_nb = {
401 .notifier_call = kvm_pv_reboot_notify,
402 };
403
kvm_steal_clock(int cpu)404 static u64 kvm_steal_clock(int cpu)
405 {
406 u64 steal;
407 struct kvm_steal_time *src;
408 int version;
409
410 src = &per_cpu(steal_time, cpu);
411 do {
412 version = src->version;
413 rmb();
414 steal = src->steal;
415 rmb();
416 } while ((version & 1) || (version != src->version));
417
418 return steal;
419 }
420
kvm_disable_steal_time(void)421 void kvm_disable_steal_time(void)
422 {
423 if (!has_steal_clock)
424 return;
425
426 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
427 }
428
429 #ifdef CONFIG_SMP
kvm_smp_prepare_boot_cpu(void)430 static void __init kvm_smp_prepare_boot_cpu(void)
431 {
432 kvm_guest_cpu_init();
433 native_smp_prepare_boot_cpu();
434 kvm_spinlock_init();
435 }
436
kvm_guest_cpu_online(void * dummy)437 static void kvm_guest_cpu_online(void *dummy)
438 {
439 kvm_guest_cpu_init();
440 }
441
kvm_guest_cpu_offline(void * dummy)442 static void kvm_guest_cpu_offline(void *dummy)
443 {
444 kvm_disable_steal_time();
445 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
446 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
447 kvm_pv_disable_apf();
448 apf_task_wake_all();
449 }
450
kvm_cpu_notify(struct notifier_block * self,unsigned long action,void * hcpu)451 static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
452 void *hcpu)
453 {
454 int cpu = (unsigned long)hcpu;
455 switch (action) {
456 case CPU_ONLINE:
457 case CPU_DOWN_FAILED:
458 case CPU_ONLINE_FROZEN:
459 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
460 break;
461 case CPU_DOWN_PREPARE:
462 case CPU_DOWN_PREPARE_FROZEN:
463 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
464 break;
465 default:
466 break;
467 }
468 return NOTIFY_OK;
469 }
470
471 static struct notifier_block kvm_cpu_notifier = {
472 .notifier_call = kvm_cpu_notify,
473 };
474 #endif
475
kvm_apf_trap_init(void)476 static void __init kvm_apf_trap_init(void)
477 {
478 set_intr_gate(14, async_page_fault);
479 }
480
kvm_guest_init(void)481 void __init kvm_guest_init(void)
482 {
483 int i;
484
485 if (!kvm_para_available())
486 return;
487
488 paravirt_ops_setup();
489 register_reboot_notifier(&kvm_pv_reboot_nb);
490 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
491 spin_lock_init(&async_pf_sleepers[i].lock);
492 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
493 x86_init.irqs.trap_init = kvm_apf_trap_init;
494
495 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
496 has_steal_clock = 1;
497 pv_time_ops.steal_clock = kvm_steal_clock;
498 }
499
500 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
501 apic_set_eoi_write(kvm_guest_apic_eoi_write);
502
503 if (kvmclock_vsyscall)
504 kvm_setup_vsyscall_timeinfo();
505
506 #ifdef CONFIG_SMP
507 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
508 register_cpu_notifier(&kvm_cpu_notifier);
509 #else
510 kvm_guest_cpu_init();
511 #endif
512
513 /*
514 * Hard lockup detection is enabled by default. Disable it, as guests
515 * can get false positives too easily, for example if the host is
516 * overcommitted.
517 */
518 watchdog_enable_hardlockup_detector(false);
519 }
520
__kvm_cpuid_base(void)521 static noinline uint32_t __kvm_cpuid_base(void)
522 {
523 if (boot_cpu_data.cpuid_level < 0)
524 return 0; /* So we don't blow up on old processors */
525
526 if (cpu_has_hypervisor)
527 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
528
529 return 0;
530 }
531
kvm_cpuid_base(void)532 static inline uint32_t kvm_cpuid_base(void)
533 {
534 static int kvm_cpuid_base = -1;
535
536 if (kvm_cpuid_base == -1)
537 kvm_cpuid_base = __kvm_cpuid_base();
538
539 return kvm_cpuid_base;
540 }
541
kvm_para_available(void)542 bool kvm_para_available(void)
543 {
544 return kvm_cpuid_base() != 0;
545 }
546 EXPORT_SYMBOL_GPL(kvm_para_available);
547
kvm_arch_para_features(void)548 unsigned int kvm_arch_para_features(void)
549 {
550 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
551 }
552
kvm_detect(void)553 static uint32_t __init kvm_detect(void)
554 {
555 return kvm_cpuid_base();
556 }
557
558 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
559 .name = "KVM",
560 .detect = kvm_detect,
561 .x2apic_available = kvm_para_available,
562 };
563 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
564
activate_jump_labels(void)565 static __init int activate_jump_labels(void)
566 {
567 if (has_steal_clock) {
568 static_key_slow_inc(¶virt_steal_enabled);
569 if (steal_acc)
570 static_key_slow_inc(¶virt_steal_rq_enabled);
571 }
572
573 return 0;
574 }
575 arch_initcall(activate_jump_labels);
576
577 #ifdef CONFIG_PARAVIRT_SPINLOCKS
578
579 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
kvm_kick_cpu(int cpu)580 static void kvm_kick_cpu(int cpu)
581 {
582 int apicid;
583 unsigned long flags = 0;
584
585 apicid = per_cpu(x86_cpu_to_apicid, cpu);
586 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
587 }
588
589 enum kvm_contention_stat {
590 TAKEN_SLOW,
591 TAKEN_SLOW_PICKUP,
592 RELEASED_SLOW,
593 RELEASED_SLOW_KICKED,
594 NR_CONTENTION_STATS
595 };
596
597 #ifdef CONFIG_KVM_DEBUG_FS
598 #define HISTO_BUCKETS 30
599
600 static struct kvm_spinlock_stats
601 {
602 u32 contention_stats[NR_CONTENTION_STATS];
603 u32 histo_spin_blocked[HISTO_BUCKETS+1];
604 u64 time_blocked;
605 } spinlock_stats;
606
607 static u8 zero_stats;
608
check_zero(void)609 static inline void check_zero(void)
610 {
611 u8 ret;
612 u8 old;
613
614 old = ACCESS_ONCE(zero_stats);
615 if (unlikely(old)) {
616 ret = cmpxchg(&zero_stats, old, 0);
617 /* This ensures only one fellow resets the stat */
618 if (ret == old)
619 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
620 }
621 }
622
add_stats(enum kvm_contention_stat var,u32 val)623 static inline void add_stats(enum kvm_contention_stat var, u32 val)
624 {
625 check_zero();
626 spinlock_stats.contention_stats[var] += val;
627 }
628
629
spin_time_start(void)630 static inline u64 spin_time_start(void)
631 {
632 return sched_clock();
633 }
634
__spin_time_accum(u64 delta,u32 * array)635 static void __spin_time_accum(u64 delta, u32 *array)
636 {
637 unsigned index;
638
639 index = ilog2(delta);
640 check_zero();
641
642 if (index < HISTO_BUCKETS)
643 array[index]++;
644 else
645 array[HISTO_BUCKETS]++;
646 }
647
spin_time_accum_blocked(u64 start)648 static inline void spin_time_accum_blocked(u64 start)
649 {
650 u32 delta;
651
652 delta = sched_clock() - start;
653 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
654 spinlock_stats.time_blocked += delta;
655 }
656
657 static struct dentry *d_spin_debug;
658 static struct dentry *d_kvm_debug;
659
kvm_init_debugfs(void)660 struct dentry *kvm_init_debugfs(void)
661 {
662 d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
663 if (!d_kvm_debug)
664 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
665
666 return d_kvm_debug;
667 }
668
kvm_spinlock_debugfs(void)669 static int __init kvm_spinlock_debugfs(void)
670 {
671 struct dentry *d_kvm;
672
673 d_kvm = kvm_init_debugfs();
674 if (d_kvm == NULL)
675 return -ENOMEM;
676
677 d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
678
679 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
680
681 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
682 &spinlock_stats.contention_stats[TAKEN_SLOW]);
683 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
684 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
685
686 debugfs_create_u32("released_slow", 0444, d_spin_debug,
687 &spinlock_stats.contention_stats[RELEASED_SLOW]);
688 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
689 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
690
691 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
692 &spinlock_stats.time_blocked);
693
694 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
695 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
696
697 return 0;
698 }
699 fs_initcall(kvm_spinlock_debugfs);
700 #else /* !CONFIG_KVM_DEBUG_FS */
add_stats(enum kvm_contention_stat var,u32 val)701 static inline void add_stats(enum kvm_contention_stat var, u32 val)
702 {
703 }
704
spin_time_start(void)705 static inline u64 spin_time_start(void)
706 {
707 return 0;
708 }
709
spin_time_accum_blocked(u64 start)710 static inline void spin_time_accum_blocked(u64 start)
711 {
712 }
713 #endif /* CONFIG_KVM_DEBUG_FS */
714
715 struct kvm_lock_waiting {
716 struct arch_spinlock *lock;
717 __ticket_t want;
718 };
719
720 /* cpus 'waiting' on a spinlock to become available */
721 static cpumask_t waiting_cpus;
722
723 /* Track spinlock on which a cpu is waiting */
724 static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
725
kvm_lock_spinning(struct arch_spinlock * lock,__ticket_t want)726 __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
727 {
728 struct kvm_lock_waiting *w;
729 int cpu;
730 u64 start;
731 unsigned long flags;
732
733 if (in_nmi())
734 return;
735
736 w = this_cpu_ptr(&klock_waiting);
737 cpu = smp_processor_id();
738 start = spin_time_start();
739
740 /*
741 * Make sure an interrupt handler can't upset things in a
742 * partially setup state.
743 */
744 local_irq_save(flags);
745
746 /*
747 * The ordering protocol on this is that the "lock" pointer
748 * may only be set non-NULL if the "want" ticket is correct.
749 * If we're updating "want", we must first clear "lock".
750 */
751 w->lock = NULL;
752 smp_wmb();
753 w->want = want;
754 smp_wmb();
755 w->lock = lock;
756
757 add_stats(TAKEN_SLOW, 1);
758
759 /*
760 * This uses set_bit, which is atomic but we should not rely on its
761 * reordering gurantees. So barrier is needed after this call.
762 */
763 cpumask_set_cpu(cpu, &waiting_cpus);
764
765 barrier();
766
767 /*
768 * Mark entry to slowpath before doing the pickup test to make
769 * sure we don't deadlock with an unlocker.
770 */
771 __ticket_enter_slowpath(lock);
772
773 /*
774 * check again make sure it didn't become free while
775 * we weren't looking.
776 */
777 if (ACCESS_ONCE(lock->tickets.head) == want) {
778 add_stats(TAKEN_SLOW_PICKUP, 1);
779 goto out;
780 }
781
782 /*
783 * halt until it's our turn and kicked. Note that we do safe halt
784 * for irq enabled case to avoid hang when lock info is overwritten
785 * in irq spinlock slowpath and no spurious interrupt occur to save us.
786 */
787 if (arch_irqs_disabled_flags(flags))
788 halt();
789 else
790 safe_halt();
791
792 out:
793 cpumask_clear_cpu(cpu, &waiting_cpus);
794 w->lock = NULL;
795 local_irq_restore(flags);
796 spin_time_accum_blocked(start);
797 }
798 PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
799
800 /* Kick vcpu waiting on @lock->head to reach value @ticket */
kvm_unlock_kick(struct arch_spinlock * lock,__ticket_t ticket)801 static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
802 {
803 int cpu;
804
805 add_stats(RELEASED_SLOW, 1);
806 for_each_cpu(cpu, &waiting_cpus) {
807 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
808 if (ACCESS_ONCE(w->lock) == lock &&
809 ACCESS_ONCE(w->want) == ticket) {
810 add_stats(RELEASED_SLOW_KICKED, 1);
811 kvm_kick_cpu(cpu);
812 break;
813 }
814 }
815 }
816
817 /*
818 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
819 */
kvm_spinlock_init(void)820 void __init kvm_spinlock_init(void)
821 {
822 if (!kvm_para_available())
823 return;
824 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
825 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
826 return;
827
828 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
829 pv_lock_ops.unlock_kick = kvm_unlock_kick;
830 }
831
kvm_spinlock_init_jump(void)832 static __init int kvm_spinlock_init_jump(void)
833 {
834 if (!kvm_para_available())
835 return 0;
836 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
837 return 0;
838
839 static_key_slow_inc(¶virt_ticketlocks_enabled);
840 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
841
842 return 0;
843 }
844 early_initcall(kvm_spinlock_init_jump);
845
846 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
847