• Home
  • Raw
  • Download

Lines Matching +full:os +full:- +full:initiated

1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM Microsoft Hyper-V emulation
17 * Ben-Ami Yassour <benami@il.ibm.com>
46 return atomic64_read(&synic->sint[sint]); in synic_read_sint()
52 return -1; in synic_get_sint_vector()
61 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_has_vector_connected()
74 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_has_vector_auto_eoi()
90 __set_bit(vector, synic->vec_bitmap); in synic_update_vector()
92 __clear_bit(vector, synic->vec_bitmap); in synic_update_vector()
95 __set_bit(vector, synic->auto_eoi_bitmap); in synic_update_vector()
97 __clear_bit(vector, synic->auto_eoi_bitmap); in synic_update_vector()
110 * Valid vectors are 16-255, however, nested Hyper-V attempts to write in synic_set_sint()
112 * allow zero-initing the register from host as well. in synic_set_sint()
119 * bitmap of vectors with auto-eoi behavior. The bitmaps are in synic_set_sint()
124 atomic64_set(&synic->sint[sint], data); in synic_set_sint()
144 if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) in get_vcpu_by_vpidx()
147 if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) in get_vcpu_by_vpidx()
161 return (synic->active) ? synic : NULL; in synic_get()
166 struct kvm *kvm = vcpu->kvm; in kvm_hv_notify_acked_sint()
172 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); in kvm_hv_notify_acked_sint()
174 /* Try to deliver pending Hyper-V SynIC timers messages */ in kvm_hv_notify_acked_sint()
175 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { in kvm_hv_notify_acked_sint()
176 stimer = &hv_vcpu->stimer[idx]; in kvm_hv_notify_acked_sint()
177 if (stimer->msg_pending && stimer->config.enable && in kvm_hv_notify_acked_sint()
178 !stimer->config.direct_mode && in kvm_hv_notify_acked_sint()
179 stimer->config.sintx == sint) in kvm_hv_notify_acked_sint()
183 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_hv_notify_acked_sint()
184 gsi = atomic_read(&synic->sint_to_gsi[sint]); in kvm_hv_notify_acked_sint()
185 if (gsi != -1) in kvm_hv_notify_acked_sint()
187 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_hv_notify_acked_sint()
193 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; in synic_exit()
195 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; in synic_exit()
196 hv_vcpu->exit.u.synic.msr = msr; in synic_exit()
197 hv_vcpu->exit.u.synic.control = synic->control; in synic_exit()
198 hv_vcpu->exit.u.synic.evt_page = synic->evt_page; in synic_exit()
199 hv_vcpu->exit.u.synic.msg_page = synic->msg_page; in synic_exit()
210 if (!synic->active && (!host || data)) in synic_set_msr()
213 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); in synic_set_msr()
218 synic->control = data; in synic_set_msr()
227 synic->version = data; in synic_set_msr()
231 !synic->dont_zero_synic_pages) in synic_set_msr()
232 if (kvm_clear_guest(vcpu->kvm, in synic_set_msr()
237 synic->evt_page = data; in synic_set_msr()
243 !synic->dont_zero_synic_pages) in synic_set_msr()
244 if (kvm_clear_guest(vcpu->kvm, in synic_set_msr()
249 synic->msg_page = data; in synic_set_msr()
256 if (!synic->active) in synic_set_msr()
259 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) in synic_set_msr()
264 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); in synic_set_msr()
283 return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; in kvm_hv_is_syndbg_enabled()
288 struct kvm *kvm = vcpu->kvm; in kvm_hv_syndbg_complete_userspace()
289 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_syndbg_complete_userspace()
291 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL) in kvm_hv_syndbg_complete_userspace()
292 hv->hv_syndbg.control.status = in kvm_hv_syndbg_complete_userspace()
293 vcpu->run->hyperv.u.syndbg.status; in kvm_hv_syndbg_complete_userspace()
300 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; in syndbg_exit()
302 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; in syndbg_exit()
303 hv_vcpu->exit.u.syndbg.msr = msr; in syndbg_exit()
304 hv_vcpu->exit.u.syndbg.control = syndbg->control.control; in syndbg_exit()
305 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page; in syndbg_exit()
306 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page; in syndbg_exit()
307 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page; in syndbg_exit()
308 vcpu->arch.complete_userspace_io = in syndbg_exit()
321 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id, in syndbg_set_msr()
322 vcpu_to_hv_vcpu(vcpu)->vp_index, msr, data); in syndbg_set_msr()
325 syndbg->control.control = data; in syndbg_set_msr()
330 syndbg->control.status = data; in syndbg_set_msr()
333 syndbg->control.send_page = data; in syndbg_set_msr()
336 syndbg->control.recv_page = data; in syndbg_set_msr()
339 syndbg->control.pending_page = data; in syndbg_set_msr()
344 syndbg->options = data; in syndbg_set_msr()
362 *pdata = syndbg->control.control; in syndbg_get_msr()
365 *pdata = syndbg->control.status; in syndbg_get_msr()
368 *pdata = syndbg->control.send_page; in syndbg_get_msr()
371 *pdata = syndbg->control.recv_page; in syndbg_get_msr()
374 *pdata = syndbg->control.pending_page; in syndbg_get_msr()
377 *pdata = syndbg->options; in syndbg_get_msr()
383 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, in syndbg_get_msr()
384 vcpu_to_hv_vcpu(vcpu)->vp_index, msr, in syndbg_get_msr()
395 if (!synic->active && !host) in synic_get_msr()
401 *pdata = synic->control; in synic_get_msr()
404 *pdata = synic->version; in synic_get_msr()
407 *pdata = synic->evt_page; in synic_get_msr()
410 *pdata = synic->msg_page; in synic_get_msr()
416 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); in synic_get_msr()
431 if (sint >= ARRAY_SIZE(synic->sint)) in synic_set_irq()
432 return -EINVAL; in synic_set_irq()
436 return -ENOENT; in synic_set_irq()
445 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL); in synic_set_irq()
446 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); in synic_set_irq()
456 return -EINVAL; in kvm_hv_synic_set_irq()
466 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); in kvm_hv_synic_send_eoi()
468 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) in kvm_hv_synic_send_eoi()
479 return -EINVAL; in kvm_hv_set_sint_gsi()
481 if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) in kvm_hv_set_sint_gsi()
482 return -EINVAL; in kvm_hv_set_sint_gsi()
484 atomic_set(&synic->sint_to_gsi[sint], gsi); in kvm_hv_set_sint_gsi()
494 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, in kvm_hv_irq_routing_update()
495 lockdep_is_held(&kvm->irq_lock)); in kvm_hv_irq_routing_update()
497 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { in kvm_hv_irq_routing_update()
498 hlist_for_each_entry(e, &irq_rt->map[gsi], link) { in kvm_hv_irq_routing_update()
499 if (e->type == KVM_IRQ_ROUTING_HV_SINT) in kvm_hv_irq_routing_update()
500 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu, in kvm_hv_irq_routing_update()
501 e->hv_sint.sint, gsi); in kvm_hv_irq_routing_update()
511 synic->version = HV_SYNIC_VERSION_1; in synic_init()
512 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_init()
513 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED); in synic_init()
514 atomic_set(&synic->sint_to_gsi[i], -1); in synic_init()
520 struct kvm_hv *hv = &kvm->arch.hyperv; in get_time_ref_counter()
528 if (!hv->tsc_ref.tsc_sequence) in get_time_ref_counter()
533 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64) in get_time_ref_counter()
534 + hv->tsc_ref.tsc_offset; in get_time_ref_counter()
542 set_bit(stimer->index, in stimer_mark_pending()
543 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); in stimer_mark_pending()
553 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id, in stimer_cleanup()
554 stimer->index); in stimer_cleanup()
556 hrtimer_cancel(&stimer->timer); in stimer_cleanup()
557 clear_bit(stimer->index, in stimer_cleanup()
558 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); in stimer_cleanup()
559 stimer->msg_pending = false; in stimer_cleanup()
560 stimer->exp_time = 0; in stimer_cleanup()
568 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id, in stimer_timer_callback()
569 stimer->index); in stimer_timer_callback()
577 * a) stimer->count is not equal to 0
578 * b) stimer->config has HV_STIMER_ENABLE flag
585 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm); in stimer_start()
588 if (stimer->config.periodic) { in stimer_start()
589 if (stimer->exp_time) { in stimer_start()
590 if (time_now >= stimer->exp_time) { in stimer_start()
593 div64_u64_rem(time_now - stimer->exp_time, in stimer_start()
594 stimer->count, &remainder); in stimer_start()
595 stimer->exp_time = in stimer_start()
596 time_now + (stimer->count - remainder); in stimer_start()
599 stimer->exp_time = time_now + stimer->count; in stimer_start()
602 stimer_to_vcpu(stimer)->vcpu_id, in stimer_start()
603 stimer->index, in stimer_start()
604 time_now, stimer->exp_time); in stimer_start()
606 hrtimer_start(&stimer->timer, in stimer_start()
608 100 * (stimer->exp_time - time_now)), in stimer_start()
612 stimer->exp_time = stimer->count; in stimer_start()
613 if (time_now >= stimer->count) { in stimer_start()
615 * Expire timer according to Hypervisor Top-Level Functional in stimer_start()
624 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id, in stimer_start()
625 stimer->index, in stimer_start()
626 time_now, stimer->count); in stimer_start()
628 hrtimer_start(&stimer->timer, in stimer_start()
629 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), in stimer_start()
638 old_config = {.as_uint64 = stimer->config.as_uint64}; in stimer_set_config()
642 if (!synic->active && (!host || config)) in stimer_set_config()
645 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, in stimer_set_config()
646 stimer->index, config, host); in stimer_set_config()
652 stimer->config.as_uint64 = new_config.as_uint64; in stimer_set_config()
654 if (stimer->config.enable) in stimer_set_config()
666 if (!synic->active && (!host || count)) in stimer_set_count()
669 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, in stimer_set_count()
670 stimer->index, count, host); in stimer_set_count()
673 stimer->count = count; in stimer_set_count()
674 if (stimer->count == 0) in stimer_set_count()
675 stimer->config.enable = 0; in stimer_set_count()
676 else if (stimer->config.auto_enable) in stimer_set_count()
677 stimer->config.enable = 1; in stimer_set_count()
679 if (stimer->config.enable) in stimer_set_count()
687 *pconfig = stimer->config.as_uint64; in stimer_get_config()
693 *pcount = stimer->count; in stimer_get_count()
706 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) in synic_deliver_msg()
707 return -ENOENT; in synic_deliver_msg()
709 msg_page_gfn = synic->msg_page >> PAGE_SHIFT; in synic_deliver_msg()
712 * Strictly following the spec-mandated ordering would assume setting in synic_deliver_msg()
737 return -EAGAIN; in synic_deliver_msg()
741 sizeof(src_msg->header) + in synic_deliver_msg()
742 src_msg->header.payload_size); in synic_deliver_msg()
750 return -EFAULT; in synic_deliver_msg()
757 struct hv_message *msg = &stimer->msg; in stimer_send_msg()
759 (struct hv_timer_message_payload *)&msg->u.payload; in stimer_send_msg()
765 bool no_retry = stimer->config.periodic; in stimer_send_msg()
767 payload->expiration_time = stimer->exp_time; in stimer_send_msg()
768 payload->delivery_time = get_time_ref_counter(vcpu->kvm); in stimer_send_msg()
770 stimer->config.sintx, msg, in stimer_send_msg()
779 .vector = stimer->config.apic_vector in stimer_notify_direct()
789 int r, direct = stimer->config.direct_mode; in stimer_expiration()
791 stimer->msg_pending = true; in stimer_expiration()
796 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id, in stimer_expiration()
797 stimer->index, direct, r); in stimer_expiration()
799 stimer->msg_pending = false; in stimer_expiration()
800 if (!(stimer->config.periodic)) in stimer_expiration()
801 stimer->config.enable = 0; in stimer_expiration()
812 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_process_stimers()
813 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { in kvm_hv_process_stimers()
814 stimer = &hv_vcpu->stimer[i]; in kvm_hv_process_stimers()
815 if (stimer->config.enable) { in kvm_hv_process_stimers()
816 exp_time = stimer->exp_time; in kvm_hv_process_stimers()
820 get_time_ref_counter(vcpu->kvm); in kvm_hv_process_stimers()
825 if ((stimer->config.enable) && in kvm_hv_process_stimers()
826 stimer->count) { in kvm_hv_process_stimers()
827 if (!stimer->msg_pending) in kvm_hv_process_stimers()
840 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_uninit()
841 stimer_cleanup(&hv_vcpu->stimer[i]); in kvm_hv_vcpu_uninit()
846 if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) in kvm_hv_assist_page_enabled()
848 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; in kvm_hv_assist_page_enabled()
857 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, in kvm_hv_get_assist_page()
864 struct hv_message *msg = &stimer->msg; in stimer_prepare_msg()
866 (struct hv_timer_message_payload *)&msg->u.payload; in stimer_prepare_msg()
868 memset(&msg->header, 0, sizeof(msg->header)); in stimer_prepare_msg()
869 msg->header.message_type = HVMSG_TIMER_EXPIRED; in stimer_prepare_msg()
870 msg->header.payload_size = sizeof(*payload); in stimer_prepare_msg()
872 payload->timer_index = stimer->index; in stimer_prepare_msg()
873 payload->expiration_time = 0; in stimer_prepare_msg()
874 payload->delivery_time = 0; in stimer_prepare_msg()
880 stimer->index = timer_index; in stimer_init()
881 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in stimer_init()
882 stimer->timer.function = stimer_timer_callback; in stimer_init()
891 synic_init(&hv_vcpu->synic); in kvm_hv_vcpu_init()
893 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); in kvm_hv_vcpu_init()
894 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_init()
895 stimer_init(&hv_vcpu->stimer[i], i); in kvm_hv_vcpu_init()
902 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); in kvm_hv_vcpu_postcreate()
910 * Hyper-V SynIC auto EOI SINT's are in kvm_hv_activate_synic()
914 kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_HYPERV); in kvm_hv_activate_synic()
915 synic->active = true; in kvm_hv_activate_synic()
916 synic->dont_zero_synic_pages = dont_zero_synic_pages; in kvm_hv_activate_synic()
917 synic->control = HV_SYNIC_CONTROL_ENABLE; in kvm_hv_activate_synic()
948 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_get_crash_data()
949 size_t size = ARRAY_SIZE(hv->hv_crash_param); in kvm_hv_msr_get_crash_data()
952 return -EINVAL; in kvm_hv_msr_get_crash_data()
954 *pdata = hv->hv_crash_param[array_index_nospec(index, size)]; in kvm_hv_msr_get_crash_data()
960 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_get_crash_ctl()
962 *pdata = hv->hv_crash_ctl; in kvm_hv_msr_get_crash_ctl()
968 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_set_crash_ctl()
971 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; in kvm_hv_msr_set_crash_ctl()
976 hv->hv_crash_param[0], in kvm_hv_msr_set_crash_ctl()
977 hv->hv_crash_param[1], in kvm_hv_msr_set_crash_ctl()
978 hv->hv_crash_param[2], in kvm_hv_msr_set_crash_ctl()
979 hv->hv_crash_param[3], in kvm_hv_msr_set_crash_ctl()
980 hv->hv_crash_param[4]); in kvm_hv_msr_set_crash_ctl()
992 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_set_crash_data()
993 size_t size = ARRAY_SIZE(hv->hv_crash_param); in kvm_hv_msr_set_crash_data()
996 return -EINVAL; in kvm_hv_msr_set_crash_data()
998 hv->hv_crash_param[array_index_nospec(index, size)] = data; in kvm_hv_msr_set_crash_data()
1003 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
1007 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1010 * Hyper-V formula:
1013 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1015 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1016 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1020 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1021 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1023 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1024 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1027 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1029 * - tsc_timestamp * scale / 2^64
1032 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1033 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
1042 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT)) in compute_tsc_page_parameters()
1048 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift) in compute_tsc_page_parameters()
1049 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift) in compute_tsc_page_parameters()
1051 max_mul = 100ull << (32 - hv_clock->tsc_shift); in compute_tsc_page_parameters()
1052 if (hv_clock->tsc_to_system_mul >= max_mul) in compute_tsc_page_parameters()
1059 tsc_ref->tsc_scale = in compute_tsc_page_parameters()
1060 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift), in compute_tsc_page_parameters()
1061 hv_clock->tsc_to_system_mul, in compute_tsc_page_parameters()
1064 tsc_ref->tsc_offset = hv_clock->system_time; in compute_tsc_page_parameters()
1065 do_div(tsc_ref->tsc_offset, 100); in compute_tsc_page_parameters()
1066 tsc_ref->tsc_offset -= in compute_tsc_page_parameters()
1067 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64); in compute_tsc_page_parameters()
1074 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_setup_tsc_page()
1078 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); in kvm_hv_setup_tsc_page()
1081 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) in kvm_hv_setup_tsc_page()
1084 mutex_lock(&kvm->arch.hyperv.hv_lock); in kvm_hv_setup_tsc_page()
1085 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) in kvm_hv_setup_tsc_page()
1088 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; in kvm_hv_setup_tsc_page()
1101 hv->tsc_ref.tsc_sequence = 0; in kvm_hv_setup_tsc_page()
1103 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) in kvm_hv_setup_tsc_page()
1106 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) in kvm_hv_setup_tsc_page()
1111 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) in kvm_hv_setup_tsc_page()
1121 /* Write the struct entirely before the non-zero sequence. */ in kvm_hv_setup_tsc_page()
1124 hv->tsc_ref.tsc_sequence = tsc_seq; in kvm_hv_setup_tsc_page()
1126 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); in kvm_hv_setup_tsc_page()
1128 mutex_unlock(&kvm->arch.hyperv.hv_lock); in kvm_hv_setup_tsc_page()
1134 struct kvm *kvm = vcpu->kvm; in kvm_hv_set_msr_pw()
1135 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_set_msr_pw()
1139 hv->hv_guest_os_id = data; in kvm_hv_set_msr_pw()
1140 /* setting guest os id to zero disables hypercall page */ in kvm_hv_set_msr_pw()
1141 if (!hv->hv_guest_os_id) in kvm_hv_set_msr_pw()
1142 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; in kvm_hv_set_msr_pw()
1149 /* if guest os id is not set hypercall should remain disabled */ in kvm_hv_set_msr_pw()
1150 if (!hv->hv_guest_os_id) in kvm_hv_set_msr_pw()
1153 hv->hv_hypercall = data; in kvm_hv_set_msr_pw()
1164 hv->hv_hypercall = data; in kvm_hv_set_msr_pw()
1169 hv->hv_tsc_page = data; in kvm_hv_set_msr_pw()
1170 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) in kvm_hv_set_msr_pw()
1175 msr - HV_X64_MSR_CRASH_P0, in kvm_hv_set_msr_pw()
1181 vcpu_debug(vcpu, "hyper-v reset requested\n"); in kvm_hv_set_msr_pw()
1186 hv->hv_reenlightenment_control = data; in kvm_hv_set_msr_pw()
1189 hv->hv_tsc_emulation_control = data; in kvm_hv_set_msr_pw()
1192 hv->hv_tsc_emulation_status = data; in kvm_hv_set_msr_pw()
1195 /* read-only, but still ignore it if host-initiated */ in kvm_hv_set_msr_pw()
1203 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n", in kvm_hv_set_msr_pw()
1222 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; in kvm_hv_set_msr()
1226 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_set_msr()
1233 if (new_vp_index == hv_vcpu->vp_index) in kvm_hv_set_msr()
1242 if (hv_vcpu->vp_index == vcpu_idx) in kvm_hv_set_msr()
1243 atomic_inc(&hv->num_mismatched_vp_indexes); in kvm_hv_set_msr()
1245 atomic_dec(&hv->num_mismatched_vp_indexes); in kvm_hv_set_msr()
1247 hv_vcpu->vp_index = new_vp_index; in kvm_hv_set_msr()
1255 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1272 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1289 hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); in kvm_hv_set_msr()
1302 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; in kvm_hv_set_msr()
1311 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; in kvm_hv_set_msr()
1318 /* read-only, but still ignore it if host-initiated */ in kvm_hv_set_msr()
1323 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n", in kvm_hv_set_msr()
1335 struct kvm *kvm = vcpu->kvm; in kvm_hv_get_msr_pw()
1336 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_get_msr_pw()
1340 data = hv->hv_guest_os_id; in kvm_hv_get_msr_pw()
1343 data = hv->hv_hypercall; in kvm_hv_get_msr_pw()
1349 data = hv->hv_tsc_page; in kvm_hv_get_msr_pw()
1353 msr - HV_X64_MSR_CRASH_P0, in kvm_hv_get_msr_pw()
1361 data = hv->hv_reenlightenment_control; in kvm_hv_get_msr_pw()
1364 data = hv->hv_tsc_emulation_control; in kvm_hv_get_msr_pw()
1367 data = hv->hv_tsc_emulation_status; in kvm_hv_get_msr_pw()
1373 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); in kvm_hv_get_msr_pw()
1385 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; in kvm_hv_get_msr()
1389 data = hv_vcpu->vp_index; in kvm_hv_get_msr()
1398 data = hv_vcpu->hv_vapic; in kvm_hv_get_msr()
1401 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; in kvm_hv_get_msr()
1414 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; in kvm_hv_get_msr()
1423 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; in kvm_hv_get_msr()
1429 data = (u64)vcpu->arch.virtual_tsc_khz * 1000; in kvm_hv_get_msr()
1435 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); in kvm_hv_get_msr()
1447 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); in kvm_hv_set_msr_common()
1449 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); in kvm_hv_set_msr_common()
1460 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); in kvm_hv_get_msr_common()
1462 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); in kvm_hv_get_msr_common()
1472 struct kvm_hv *hv = &kvm->arch.hyperv; in sparse_set_to_vcpu_mask()
1482 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) { in sparse_set_to_vcpu_mask()
1489 if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index, in sparse_set_to_vcpu_mask()
1499 struct kvm *kvm = current_vcpu->kvm; in kvm_hv_flush_tlb()
1500 struct kvm_vcpu_hv *hv_vcpu = &current_vcpu->arch.hyperv; in kvm_hv_flush_tlb()
1560 cpumask_clear(&hv_vcpu->tlb_flush); in kvm_hv_flush_tlb()
1567 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't in kvm_hv_flush_tlb()
1571 NULL, vcpu_mask, &hv_vcpu->tlb_flush); in kvm_hv_flush_tlb()
1601 struct kvm *kvm = current_vcpu->kvm; in kvm_hv_send_ipi()
1677 return READ_ONCE(kvm->arch.hyperv.hv_guest_os_id) != 0; in kvm_hv_hypercall_enabled()
1696 ++vcpu->stat.hypercalls; in kvm_hv_hypercall_complete()
1702 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result); in kvm_hv_hypercall_complete_userspace()
1713 if ((gpa & (__alignof__(param) - 1)) || in kvm_hvcall_signal_event()
1723 * Per spec, bits 32-47 contain the extra "flag number". However, we in kvm_hvcall_signal_event()
1729 /* remaining bits are reserved-zero */ in kvm_hvcall_signal_event()
1733 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ in kvm_hvcall_signal_event()
1735 eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); in kvm_hvcall_signal_event()
1752 * per HYPER-V spec in kvm_hv_hypercall()
1802 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { in kvm_hv_hypercall()
1806 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in kvm_hv_hypercall()
1807 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; in kvm_hv_hypercall()
1808 vcpu->run->hyperv.u.hcall.input = param; in kvm_hv_hypercall()
1809 vcpu->run->hyperv.u.hcall.params[0] = ingpa; in kvm_hv_hypercall()
1810 vcpu->run->hyperv.u.hcall.params[1] = outgpa; in kvm_hv_hypercall()
1811 vcpu->arch.complete_userspace_io = in kvm_hv_hypercall()
1871 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) { in kvm_hv_hypercall()
1875 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in kvm_hv_hypercall()
1876 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; in kvm_hv_hypercall()
1877 vcpu->run->hyperv.u.hcall.input = param; in kvm_hv_hypercall()
1878 vcpu->run->hyperv.u.hcall.params[0] = ingpa; in kvm_hv_hypercall()
1879 vcpu->run->hyperv.u.hcall.params[1] = outgpa; in kvm_hv_hypercall()
1880 vcpu->arch.complete_userspace_io = in kvm_hv_hypercall()
1894 mutex_init(&kvm->arch.hyperv.hv_lock); in kvm_hv_init_vm()
1895 idr_init(&kvm->arch.hyperv.conn_to_evt); in kvm_hv_init_vm()
1903 idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i) in kvm_hv_destroy_vm()
1905 idr_destroy(&kvm->arch.hyperv.conn_to_evt); in kvm_hv_destroy_vm()
1910 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_eventfd_assign()
1918 mutex_lock(&hv->hv_lock); in kvm_hv_eventfd_assign()
1919 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1, in kvm_hv_eventfd_assign()
1921 mutex_unlock(&hv->hv_lock); in kvm_hv_eventfd_assign()
1926 if (ret == -ENOSPC) in kvm_hv_eventfd_assign()
1927 ret = -EEXIST; in kvm_hv_eventfd_assign()
1934 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_eventfd_deassign()
1937 mutex_lock(&hv->hv_lock); in kvm_hv_eventfd_deassign()
1938 eventfd = idr_remove(&hv->conn_to_evt, conn_id); in kvm_hv_eventfd_deassign()
1939 mutex_unlock(&hv->hv_lock); in kvm_hv_eventfd_deassign()
1942 return -ENOENT; in kvm_hv_eventfd_deassign()
1944 synchronize_srcu(&kvm->srcu); in kvm_hv_eventfd_deassign()
1951 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) || in kvm_vm_ioctl_hv_eventfd()
1952 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK)) in kvm_vm_ioctl_hv_eventfd()
1953 return -EINVAL; in kvm_vm_ioctl_hv_eventfd()
1955 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN) in kvm_vm_ioctl_hv_eventfd()
1956 return kvm_hv_eventfd_deassign(kvm, args->conn_id); in kvm_vm_ioctl_hv_eventfd()
1957 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd); in kvm_vm_ioctl_hv_eventfd()
1978 if (kvm_x86_ops.nested_ops->get_evmcs_version) in kvm_vcpu_ioctl_get_hv_cpuid()
1979 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu); in kvm_vcpu_ioctl_get_hv_cpuid()
1983 --nent; in kvm_vcpu_ioctl_get_hv_cpuid()
1985 if (cpuid->nent < nent) in kvm_vcpu_ioctl_get_hv_cpuid()
1986 return -E2BIG; in kvm_vcpu_ioctl_get_hv_cpuid()
1988 if (cpuid->nent > nent) in kvm_vcpu_ioctl_get_hv_cpuid()
1989 cpuid->nent = nent; in kvm_vcpu_ioctl_get_hv_cpuid()
1995 switch (ent->function) { in kvm_vcpu_ioctl_get_hv_cpuid()
1999 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; in kvm_vcpu_ioctl_get_hv_cpuid()
2000 ent->ebx = signature[0]; in kvm_vcpu_ioctl_get_hv_cpuid()
2001 ent->ecx = signature[1]; in kvm_vcpu_ioctl_get_hv_cpuid()
2002 ent->edx = signature[2]; in kvm_vcpu_ioctl_get_hv_cpuid()
2007 ent->eax = signature[0]; in kvm_vcpu_ioctl_get_hv_cpuid()
2012 * We implement some Hyper-V 2016 functions so let's use in kvm_vcpu_ioctl_get_hv_cpuid()
2015 ent->eax = 0x00003839; in kvm_vcpu_ioctl_get_hv_cpuid()
2016 ent->ebx = 0x000A0000; in kvm_vcpu_ioctl_get_hv_cpuid()
2020 ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2021 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2022 ent->eax |= HV_MSR_SYNIC_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2023 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2024 ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2025 ent->eax |= HV_MSR_HYPERCALL_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2026 ent->eax |= HV_MSR_VP_INDEX_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2027 ent->eax |= HV_MSR_RESET_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2028 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2029 ent->eax |= HV_ACCESS_FREQUENCY_MSRS; in kvm_vcpu_ioctl_get_hv_cpuid()
2030 ent->eax |= HV_ACCESS_REENLIGHTENMENT; in kvm_vcpu_ioctl_get_hv_cpuid()
2032 ent->ebx |= HV_POST_MESSAGES; in kvm_vcpu_ioctl_get_hv_cpuid()
2033 ent->ebx |= HV_SIGNAL_EVENTS; in kvm_vcpu_ioctl_get_hv_cpuid()
2035 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2036 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2038 ent->ebx |= HV_DEBUGGING; in kvm_vcpu_ioctl_get_hv_cpuid()
2039 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2040 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2043 * Direct Synthetic timers only make sense with in-kernel in kvm_vcpu_ioctl_get_hv_cpuid()
2047 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2052 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2053 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2054 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2055 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2056 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2058 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2060 ent->eax |= HV_X64_NO_NONARCH_CORESHARING; in kvm_vcpu_ioctl_get_hv_cpuid()
2065 ent->ebx = 0x00000FFF; in kvm_vcpu_ioctl_get_hv_cpuid()
2071 ent->eax = KVM_MAX_VCPUS; in kvm_vcpu_ioctl_get_hv_cpuid()
2076 ent->ebx = 64; in kvm_vcpu_ioctl_get_hv_cpuid()
2081 ent->eax = evmcs_ver; in kvm_vcpu_ioctl_get_hv_cpuid()
2088 ent->eax = 0; in kvm_vcpu_ioctl_get_hv_cpuid()
2089 ent->ebx = signature[0]; in kvm_vcpu_ioctl_get_hv_cpuid()
2090 ent->ecx = signature[1]; in kvm_vcpu_ioctl_get_hv_cpuid()
2091 ent->edx = signature[2]; in kvm_vcpu_ioctl_get_hv_cpuid()
2096 ent->eax = signature[0]; in kvm_vcpu_ioctl_get_hv_cpuid()
2100 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; in kvm_vcpu_ioctl_get_hv_cpuid()
2110 return -EFAULT; in kvm_vcpu_ioctl_get_hv_cpuid()