Lines Matching +full:reserved +full:- +full:ipi +full:- +full:vectors
1 // SPDX-License-Identifier: GPL-2.0-only
46 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
57 #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
68 /* step-by-step approximation to mitigate fluctuation */
78 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_pending_eoi()
80 return apic_test_vector(vector, apic->regs + APIC_ISR) || in kvm_apic_pending_eoi()
81 apic_test_vector(vector, apic->regs + APIC_IRR); in kvm_apic_pending_eoi()
111 return apic->vcpu->vcpu_id; in kvm_x2apic_id()
117 (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm)); in kvm_can_post_timer_interrupt()
123 && !(kvm_mwait_in_guest(vcpu->kvm) || in kvm_can_use_hv_timer()
130 return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE; in kvm_use_posted_timer_interrupt()
135 switch (map->mode) { in kvm_apic_map_get_logical_dest()
138 u32 max_apic_id = map->max_apic_id; in kvm_apic_map_get_logical_dest()
141 u8 cluster_size = min(max_apic_id - offset + 1, 16U); in kvm_apic_map_get_logical_dest()
143 offset = array_index_nospec(offset, map->max_apic_id + 1); in kvm_apic_map_get_logical_dest()
144 *cluster = &map->phys_map[offset]; in kvm_apic_map_get_logical_dest()
145 *mask = dest_id & (0xffff >> (16 - cluster_size)); in kvm_apic_map_get_logical_dest()
153 *cluster = map->xapic_flat_map; in kvm_apic_map_get_logical_dest()
157 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf]; in kvm_apic_map_get_logical_dest()
174 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
176 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
192 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */ in kvm_recalculate_apic_map()
193 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN) in kvm_recalculate_apic_map()
196 mutex_lock(&kvm->arch.apic_map_lock); in kvm_recalculate_apic_map()
198 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map in kvm_recalculate_apic_map()
201 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty, in kvm_recalculate_apic_map()
204 mutex_unlock(&kvm->arch.apic_map_lock); in kvm_recalculate_apic_map()
210 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic)); in kvm_recalculate_apic_map()
219 new->max_apic_id = max_id; in kvm_recalculate_apic_map()
222 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_recalculate_apic_map()
237 x2apic_id <= new->max_apic_id) in kvm_recalculate_apic_map()
238 new->phys_map[x2apic_id] = apic; in kvm_recalculate_apic_map()
240 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around, in kvm_recalculate_apic_map()
243 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id]) in kvm_recalculate_apic_map()
244 new->phys_map[xapic_id] = apic; in kvm_recalculate_apic_map()
252 new->mode |= KVM_APIC_MODE_X2APIC; in kvm_recalculate_apic_map()
256 new->mode |= KVM_APIC_MODE_XAPIC_FLAT; in kvm_recalculate_apic_map()
258 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER; in kvm_recalculate_apic_map()
265 cluster[ffs(mask) - 1] = apic; in kvm_recalculate_apic_map()
268 old = rcu_dereference_protected(kvm->arch.apic_map, in kvm_recalculate_apic_map()
269 lockdep_is_held(&kvm->arch.apic_map_lock)); in kvm_recalculate_apic_map()
270 rcu_assign_pointer(kvm->arch.apic_map, new); in kvm_recalculate_apic_map()
272 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty. in kvm_recalculate_apic_map()
275 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty, in kvm_recalculate_apic_map()
277 mutex_unlock(&kvm->arch.apic_map_lock); in kvm_recalculate_apic_map()
280 call_rcu(&old->rcu, kvm_apic_map_free); in kvm_recalculate_apic_map()
291 if (enabled != apic->sw_enabled) { in apic_set_spiv()
292 apic->sw_enabled = enabled; in apic_set_spiv()
298 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); in apic_set_spiv()
303 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu); in apic_set_spiv()
309 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); in kvm_apic_set_xapic_id()
315 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); in kvm_apic_set_ldr()
321 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); in kvm_apic_set_dfr()
333 WARN_ON_ONCE(id != apic->vcpu->vcpu_id); in kvm_apic_set_x2apic_id()
337 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); in kvm_apic_set_x2apic_id()
347 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT; in apic_lvtt_oneshot()
352 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC; in apic_lvtt_period()
357 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE; in apic_lvtt_tscdeadline()
367 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_version()
374 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation) in kvm_apic_set_version()
376 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC in kvm_apic_set_version()
377 * version first and level-triggered interrupts never get EOIed in in kvm_apic_set_version()
381 !ioapic_in_kernel(vcpu->kvm)) in kvm_apic_set_version()
390 LINT_MASK, LINT_MASK, /* LVT0-1 */
399 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG; in find_highest_vector()
400 vec >= 0; vec -= APIC_VECTORS_PER_REG) { in find_highest_vector()
406 return -1; in find_highest_vector()
429 max_updated_irr = -1; in __kvm_apic_update_irr()
430 *max_irr = -1; in __kvm_apic_update_irr()
448 return ((max_updated_irr != -1) && in __kvm_apic_update_irr()
455 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_update_irr()
457 return __kvm_apic_update_irr(pir, apic->regs, max_irr); in kvm_apic_update_irr()
463 return find_highest_vector(apic->regs + APIC_IRR); in apic_search_irr()
474 if (!apic->irr_pending) in apic_find_highest_irr()
475 return -1; in apic_find_highest_irr()
478 ASSERT(result == -1 || result >= 16); in apic_find_highest_irr()
487 vcpu = apic->vcpu; in apic_clear_irr()
489 if (unlikely(vcpu->arch.apicv_active)) { in apic_clear_irr()
491 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR); in apic_clear_irr()
495 apic->irr_pending = false; in apic_clear_irr()
496 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR); in apic_clear_irr()
497 if (apic_search_irr(apic) != -1) in apic_clear_irr()
498 apic->irr_pending = true; in apic_clear_irr()
504 apic_clear_irr(vec, vcpu->arch.apic); in kvm_apic_clear_irr()
512 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) in apic_set_isr()
515 vcpu = apic->vcpu; in apic_set_isr()
522 if (unlikely(vcpu->arch.apicv_active)) in apic_set_isr()
525 ++apic->isr_count; in apic_set_isr()
526 BUG_ON(apic->isr_count > MAX_APIC_VECTOR); in apic_set_isr()
532 apic->highest_isr_cache = vec; in apic_set_isr()
542 * is always -1, with APIC virtualization enabled. in apic_find_highest_isr()
544 if (!apic->isr_count) in apic_find_highest_isr()
545 return -1; in apic_find_highest_isr()
546 if (likely(apic->highest_isr_cache != -1)) in apic_find_highest_isr()
547 return apic->highest_isr_cache; in apic_find_highest_isr()
549 result = find_highest_vector(apic->regs + APIC_ISR); in apic_find_highest_isr()
550 ASSERT(result == -1 || result >= 16); in apic_find_highest_isr()
558 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR)) in apic_clear_isr()
561 vcpu = apic->vcpu; in apic_clear_isr()
565 * uses the Hyper-V APIC enlightenment. In this case we may need in apic_clear_isr()
570 if (unlikely(vcpu->arch.apicv_active)) in apic_clear_isr()
574 --apic->isr_count; in apic_clear_isr()
575 BUG_ON(apic->isr_count < 0); in apic_clear_isr()
576 apic->highest_isr_cache = -1; in apic_clear_isr()
587 return apic_find_highest_irr(vcpu->arch.apic); in kvm_lapic_find_highest_irr()
598 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_irq()
600 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector, in kvm_apic_set_irq()
601 irq->level, irq->trig_mode, dest_map); in kvm_apic_set_irq()
610 if (min > map->max_apic_id) in __pv_send_ipi()
614 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { in __pv_send_ipi()
615 if (map->phys_map[min + i]) { in __pv_send_ipi()
616 vcpu = map->phys_map[min + i]->vcpu; in __pv_send_ipi()
634 return -KVM_EINVAL; in kvm_pv_send_ipi()
642 map = rcu_dereference(kvm->arch.apic_map); in kvm_pv_send_ipi()
644 count = -EOPNOTSUPP; in kvm_pv_send_ipi()
658 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val, in pv_eoi_put_user()
665 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val, in pv_eoi_get_user()
671 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; in pv_eoi_enabled()
679 (unsigned long long)vcpu->arch.pv_eoi.msr_val); in pv_eoi_get_pending()
689 (unsigned long long)vcpu->arch.pv_eoi.msr_val); in pv_eoi_set_pending()
692 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); in pv_eoi_set_pending()
699 (unsigned long long)vcpu->arch.pv_eoi.msr_val); in pv_eoi_clr_pending()
702 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention); in pv_eoi_clr_pending()
708 if (apic->vcpu->arch.apicv_active) in apic_has_interrupt_for_ppr()
709 highest_irr = kvm_x86_ops.sync_pir_to_irr(apic->vcpu); in apic_has_interrupt_for_ppr()
712 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr) in apic_has_interrupt_for_ppr()
713 return -1; in apic_has_interrupt_for_ppr()
725 isrv = (isr != -1) ? isr : 0; in __apic_update_ppr()
744 apic_has_interrupt_for_ppr(apic, ppr) != -1) in apic_update_ppr()
745 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in apic_update_ppr()
750 apic_update_ppr(vcpu->arch.apic); in kvm_apic_update_ppr()
814 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
818 * - in-kernel IOAPIC messages have to be delivered directly to
821 * rewrites the destination of non-IPI messages from APIC_BROADCAST
825 * important when userspace wants to use x2APIC-format MSIs, because
826 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
831 bool ipi = source != NULL; in kvm_apic_mda() local
833 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled && in kvm_apic_mda()
834 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target)) in kvm_apic_mda()
843 struct kvm_lapic *target = vcpu->arch.apic; in kvm_apic_match_dest()
869 int i, idx = -1; in kvm_vector_to_index()
883 if (!kvm->arch.disabled_lapic_found) { in kvm_apic_disabled_lapic_found()
884 kvm->arch.disabled_lapic_found = true; in kvm_apic_disabled_lapic_found()
893 if (kvm->arch.x2apic_broadcast_quirk_disabled) { in kvm_apic_is_broadcast_dest()
894 if ((irq->dest_id == APIC_BROADCAST && in kvm_apic_is_broadcast_dest()
895 map->mode != KVM_APIC_MODE_X2APIC)) in kvm_apic_is_broadcast_dest()
897 if (irq->dest_id == X2APIC_BROADCAST) in kvm_apic_is_broadcast_dest()
901 if (irq->dest_id == (x2apic_ipi ? in kvm_apic_is_broadcast_dest()
923 if (irq->shorthand == APIC_DEST_SELF && src) { in kvm_apic_map_get_dest_lapic()
927 } else if (irq->shorthand) in kvm_apic_map_get_dest_lapic()
933 if (irq->dest_mode == APIC_DEST_PHYSICAL) { in kvm_apic_map_get_dest_lapic()
934 if (irq->dest_id > map->max_apic_id) { in kvm_apic_map_get_dest_lapic()
937 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1); in kvm_apic_map_get_dest_lapic()
938 *dst = &map->phys_map[dest_id]; in kvm_apic_map_get_dest_lapic()
945 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst, in kvm_apic_map_get_dest_lapic()
953 lowest = -1; in kvm_apic_map_get_dest_lapic()
959 else if (kvm_apic_compare_prio((*dst)[i]->vcpu, in kvm_apic_map_get_dest_lapic()
960 (*dst)[lowest]->vcpu) < 0) in kvm_apic_map_get_dest_lapic()
967 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap), in kvm_apic_map_get_dest_lapic()
991 *r = -1; in kvm_irq_delivery_to_apic_fast()
993 if (irq->shorthand == APIC_DEST_SELF) { in kvm_irq_delivery_to_apic_fast()
998 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map); in kvm_irq_delivery_to_apic_fast()
1003 map = rcu_dereference(kvm->arch.apic_map); in kvm_irq_delivery_to_apic_fast()
1011 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map); in kvm_irq_delivery_to_apic_fast()
1022 * - For single-destination interrupts, handle it in posted mode
1023 * - Else if vector hashing is enabled and it is a lowest-priority
1026 * 1. For lowest-priority interrupts, store all the possible
1029 * the right destination vCPU in the array for the lowest-priority
1031 * - Otherwise, use remapped mode to inject the interrupt.
1041 if (irq->shorthand) in kvm_intr_is_single_vcpu_fast()
1045 map = rcu_dereference(kvm->arch.apic_map); in kvm_intr_is_single_vcpu_fast()
1052 *dest_vcpu = dst[i]->vcpu; in kvm_intr_is_single_vcpu_fast()
1070 struct kvm_vcpu *vcpu = apic->vcpu; in __apic_accept_irq()
1072 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, in __apic_accept_irq()
1076 vcpu->arch.apic_arb_prio++; in __apic_accept_irq()
1089 __set_bit(vcpu->vcpu_id, dest_map->map); in __apic_accept_irq()
1090 dest_map->vectors[vcpu->vcpu_id] = vector; in __apic_accept_irq()
1093 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) { in __apic_accept_irq()
1096 apic->regs + APIC_TMR); in __apic_accept_irq()
1099 apic->regs + APIC_TMR); in __apic_accept_irq()
1111 vcpu->arch.pv.pv_unhalted = 1; in __apic_accept_irq()
1132 apic->pending_events = (1UL << KVM_APIC_INIT); in __apic_accept_irq()
1140 apic->sipi_vector = vector; in __apic_accept_irq()
1143 set_bit(KVM_APIC_SIPI, &apic->pending_events); in __apic_accept_irq()
1182 map = rcu_dereference(kvm->arch.apic_map); in kvm_bitmap_or_dest_vcpus()
1190 vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx; in kvm_bitmap_or_dest_vcpus()
1198 irq->shorthand, in kvm_bitmap_or_dest_vcpus()
1199 irq->dest_id, in kvm_bitmap_or_dest_vcpus()
1200 irq->dest_mode)) in kvm_bitmap_or_dest_vcpus()
1210 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio; in kvm_apic_compare_prio()
1215 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors); in kvm_ioapic_handles_vector()
1227 if (irqchip_split(apic->vcpu->kvm)) { in kvm_ioapic_send_eoi()
1228 apic->vcpu->arch.pending_ioapic_eoi = vector; in kvm_ioapic_send_eoi()
1229 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu); in kvm_ioapic_send_eoi()
1233 if (apic_test_vector(vector, apic->regs + APIC_TMR)) in kvm_ioapic_send_eoi()
1238 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode); in kvm_ioapic_send_eoi()
1251 if (vector == -1) in apic_set_eoi()
1257 if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap)) in apic_set_eoi()
1258 kvm_hv_synic_send_eoi(apic->vcpu, vector); in apic_set_eoi()
1261 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in apic_set_eoi()
1266 * this interface assumes a trap-like exit, which has already finished
1271 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_eoi_accelerated()
1276 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); in kvm_apic_set_eoi_accelerated()
1298 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL); in kvm_apic_send_ipi()
1311 apic->lapic_timer.period == 0) in apic_get_tmcct()
1315 remaining = ktime_sub(apic->lapic_timer.target_expiration, now); in apic_get_tmcct()
1319 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); in apic_get_tmcct()
1321 (APIC_BUS_CYCLE_NS * apic->divide_count)); in apic_get_tmcct()
1328 struct kvm_vcpu *vcpu = apic->vcpu; in __report_tpr_access()
1329 struct kvm_run *run = vcpu->run; in __report_tpr_access()
1332 run->tpr_access.rip = kvm_rip_read(vcpu); in __report_tpr_access()
1333 run->tpr_access.is_write = write; in __report_tpr_access()
1338 if (apic->vcpu->arch.tpr_access_reporting) in report_tpr_access()
1381 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1388 /* this bitmask has a bit cleared for each reserved register */ in kvm_lapic_reg_read()
1444 return addr >= apic->base_address && in apic_mmio_in_range()
1445 addr < apic->base_address + LAPIC_MMIO_LENGTH; in apic_mmio_in_range()
1452 u32 offset = address - apic->base_address; in apic_mmio_read()
1455 return -EOPNOTSUPP; in apic_mmio_read()
1458 if (!kvm_check_has_quirk(vcpu->kvm, in apic_mmio_read()
1460 return -EOPNOTSUPP; in apic_mmio_read()
1478 apic->divide_count = 0x1 << (tmp2 & 0x7); in update_divide_count()
1488 if (apic_lvtt_period(apic) && apic->lapic_timer.period) { in limit_periodic_timer_frequency()
1491 if (apic->lapic_timer.period < min_period) { in limit_periodic_timer_frequency()
1495 apic->vcpu->vcpu_id, in limit_periodic_timer_frequency()
1496 apic->lapic_timer.period, min_period); in limit_periodic_timer_frequency()
1497 apic->lapic_timer.period = min_period; in limit_periodic_timer_frequency()
1507 apic->lapic_timer.timer_mode_mask; in apic_update_lvtt()
1509 if (apic->lapic_timer.timer_mode != timer_mode) { in apic_update_lvtt()
1512 hrtimer_cancel(&apic->lapic_timer.timer); in apic_update_lvtt()
1514 if (apic->lapic_timer.hv_timer_in_use) in apic_update_lvtt()
1518 apic->lapic_timer.period = 0; in apic_update_lvtt()
1519 apic->lapic_timer.tscdeadline = 0; in apic_update_lvtt()
1521 apic->lapic_timer.timer_mode = timer_mode; in apic_update_lvtt()
1528 * during a higher-priority task.
1533 struct kvm_lapic *apic = vcpu->arch.apic; in lapic_timer_int_injected()
1538 void *bitmap = apic->regs + APIC_ISR; in lapic_timer_int_injected()
1540 if (vcpu->arch.apicv_active) in lapic_timer_int_injected()
1541 bitmap = apic->regs + APIC_IRR; in lapic_timer_int_injected()
1551 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns; in __wait_lapic_expire()
1559 if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) { in __wait_lapic_expire()
1564 do_div(delay_ns, vcpu->arch.virtual_tsc_khz); in __wait_lapic_expire()
1572 struct kvm_lapic *apic = vcpu->arch.apic; in adjust_lapic_timer_advance()
1573 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns; in adjust_lapic_timer_advance()
1583 ns = -advance_expire_delta * 1000000ULL; in adjust_lapic_timer_advance()
1584 do_div(ns, vcpu->arch.virtual_tsc_khz); in adjust_lapic_timer_advance()
1585 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP; in adjust_lapic_timer_advance()
1589 do_div(ns, vcpu->arch.virtual_tsc_khz); in adjust_lapic_timer_advance()
1595 apic->lapic_timer.timer_advance_ns = timer_advance_ns; in adjust_lapic_timer_advance()
1600 struct kvm_lapic *apic = vcpu->arch.apic; in __kvm_wait_lapic_expire()
1603 tsc_deadline = apic->lapic_timer.expired_tscdeadline; in __kvm_wait_lapic_expire()
1604 apic->lapic_timer.expired_tscdeadline = 0; in __kvm_wait_lapic_expire()
1606 apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline; in __kvm_wait_lapic_expire()
1609 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc); in __kvm_wait_lapic_expire()
1612 adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta); in __kvm_wait_lapic_expire()
1618 vcpu->arch.apic->lapic_timer.expired_tscdeadline && in kvm_wait_lapic_expire()
1619 vcpu->arch.apic->lapic_timer.timer_advance_ns && in kvm_wait_lapic_expire()
1627 struct kvm_timer *ktimer = &apic->lapic_timer; in kvm_apic_inject_pending_timer_irqs()
1631 ktimer->tscdeadline = 0; in kvm_apic_inject_pending_timer_irqs()
1633 ktimer->tscdeadline = 0; in kvm_apic_inject_pending_timer_irqs()
1634 ktimer->target_expiration = 0; in kvm_apic_inject_pending_timer_irqs()
1640 struct kvm_vcpu *vcpu = apic->vcpu; in apic_timer_expired()
1641 struct kvm_timer *ktimer = &apic->lapic_timer; in apic_timer_expired()
1643 if (atomic_read(&apic->lapic_timer.pending)) in apic_timer_expired()
1646 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use) in apic_timer_expired()
1647 ktimer->expired_tscdeadline = ktimer->tscdeadline; in apic_timer_expired()
1649 if (!from_timer_fn && vcpu->arch.apicv_active) { in apic_timer_expired()
1655 if (kvm_use_posted_timer_interrupt(apic->vcpu)) { in apic_timer_expired()
1663 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline && in apic_timer_expired()
1664 vcpu->arch.apic->lapic_timer.timer_advance_ns) in apic_timer_expired()
1670 atomic_inc(&apic->lapic_timer.pending); in apic_timer_expired()
1678 struct kvm_timer *ktimer = &apic->lapic_timer; in start_sw_tscdeadline()
1679 u64 guest_tsc, tscdeadline = ktimer->tscdeadline; in start_sw_tscdeadline()
1682 struct kvm_vcpu *vcpu = apic->vcpu; in start_sw_tscdeadline()
1683 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz; in start_sw_tscdeadline()
1695 ns = (tscdeadline - guest_tsc) * 1000000ULL; in start_sw_tscdeadline()
1699 likely(ns > apic->lapic_timer.timer_advance_ns)) { in start_sw_tscdeadline()
1701 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns); in start_sw_tscdeadline()
1702 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD); in start_sw_tscdeadline()
1711 return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count; in tmict_to_ns()
1719 apic->lapic_timer.period = in update_target_expiration()
1724 remaining = ktime_sub(apic->lapic_timer.target_expiration, now); in update_target_expiration()
1730 apic->divide_count, old_divisor); in update_target_expiration()
1732 apic->lapic_timer.tscdeadline += in update_target_expiration()
1733 nsec_to_cycles(apic->vcpu, ns_remaining_new) - in update_target_expiration()
1734 nsec_to_cycles(apic->vcpu, ns_remaining_old); in update_target_expiration()
1735 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new); in update_target_expiration()
1745 apic->lapic_timer.period = in set_target_expiration()
1748 if (!apic->lapic_timer.period) { in set_target_expiration()
1749 apic->lapic_timer.tscdeadline = 0; in set_target_expiration()
1754 deadline = apic->lapic_timer.period; in set_target_expiration()
1761 deadline = apic->lapic_timer.period; in set_target_expiration()
1762 else if (unlikely(deadline > apic->lapic_timer.period)) { in set_target_expiration()
1767 apic->vcpu->vcpu_id, in set_target_expiration()
1770 deadline, apic->lapic_timer.period); in set_target_expiration()
1772 deadline = apic->lapic_timer.period; in set_target_expiration()
1777 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + in set_target_expiration()
1778 nsec_to_cycles(apic->vcpu, deadline); in set_target_expiration()
1779 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline); in set_target_expiration()
1797 apic->lapic_timer.target_expiration = in advance_periodic_target_expiration()
1798 ktime_add_ns(apic->lapic_timer.target_expiration, in advance_periodic_target_expiration()
1799 apic->lapic_timer.period); in advance_periodic_target_expiration()
1800 delta = ktime_sub(apic->lapic_timer.target_expiration, now); in advance_periodic_target_expiration()
1801 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) + in advance_periodic_target_expiration()
1802 nsec_to_cycles(apic->vcpu, delta); in advance_periodic_target_expiration()
1807 if (!apic->lapic_timer.period) in start_sw_period()
1811 apic->lapic_timer.target_expiration)) { in start_sw_period()
1820 hrtimer_start(&apic->lapic_timer.timer, in start_sw_period()
1821 apic->lapic_timer.target_expiration, in start_sw_period()
1830 return vcpu->arch.apic->lapic_timer.hv_timer_in_use; in kvm_lapic_hv_timer_in_use()
1837 WARN_ON(!apic->lapic_timer.hv_timer_in_use); in cancel_hv_timer()
1838 kvm_x86_ops.cancel_hv_timer(apic->vcpu); in cancel_hv_timer()
1839 apic->lapic_timer.hv_timer_in_use = false; in cancel_hv_timer()
1844 struct kvm_timer *ktimer = &apic->lapic_timer; in start_hv_timer()
1845 struct kvm_vcpu *vcpu = apic->vcpu; in start_hv_timer()
1852 if (!ktimer->tscdeadline) in start_hv_timer()
1855 if (kvm_x86_ops.set_hv_timer(vcpu, ktimer->tscdeadline, &expired)) in start_hv_timer()
1858 ktimer->hv_timer_in_use = true; in start_hv_timer()
1859 hrtimer_cancel(&ktimer->timer); in start_hv_timer()
1864 * VM-Exit to recompute the periodic timer's target expiration. in start_hv_timer()
1871 if (atomic_read(&ktimer->pending)) { in start_hv_timer()
1879 trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use); in start_hv_timer()
1886 struct kvm_timer *ktimer = &apic->lapic_timer; in start_sw_timer()
1889 if (apic->lapic_timer.hv_timer_in_use) in start_sw_timer()
1891 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending)) in start_sw_timer()
1898 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false); in start_sw_timer()
1905 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending)) in restart_apic_timer()
1916 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_expired_hv_timer()
1920 if (!apic->lapic_timer.hv_timer_in_use) in kvm_lapic_expired_hv_timer()
1922 WARN_ON(rcuwait_active(&vcpu->wait)); in kvm_lapic_expired_hv_timer()
1926 if (apic_lvtt_period(apic) && apic->lapic_timer.period) { in kvm_lapic_expired_hv_timer()
1937 restart_apic_timer(vcpu->arch.apic); in kvm_lapic_switch_to_hv_timer()
1943 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_switch_to_sw_timer()
1947 if (apic->lapic_timer.hv_timer_in_use) in kvm_lapic_switch_to_sw_timer()
1955 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_restart_hv_timer()
1957 WARN_ON(!apic->lapic_timer.hv_timer_in_use); in kvm_lapic_restart_hv_timer()
1963 atomic_set(&apic->lapic_timer.pending, 0); in __start_apic_timer()
1981 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) { in apic_manage_nmi_watchdog()
1982 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode; in apic_manage_nmi_watchdog()
1984 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); in apic_manage_nmi_watchdog()
1986 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode); in apic_manage_nmi_watchdog()
2043 atomic_set(&apic->lapic_timer.pending, 0); in kvm_lapic_reg_write()
2076 (reg - APIC_LVTT) >> 4, size); in kvm_lapic_reg_write()
2085 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); in kvm_lapic_reg_write()
2094 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_lapic_reg_write()
2100 uint32_t old_divisor = apic->divide_count; in kvm_lapic_reg_write()
2104 if (apic->divide_count != old_divisor && in kvm_lapic_reg_write()
2105 apic->lapic_timer.period) { in kvm_lapic_reg_write()
2106 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_lapic_reg_write()
2119 * Self-IPI exists only when x2APIC is enabled. Bits 7:0 hold in kvm_lapic_reg_write()
2120 * the vector, everything else is reserved. in kvm_lapic_reg_write()
2132 kvm_recalculate_apic_map(apic->vcpu->kvm); in kvm_lapic_reg_write()
2142 unsigned int offset = address - apic->base_address; in apic_mmio_write()
2146 return -EOPNOTSUPP; in apic_mmio_write()
2149 if (!kvm_check_has_quirk(vcpu->kvm, in apic_mmio_write()
2151 return -EOPNOTSUPP; in apic_mmio_write()
2157 * APIC register must be aligned on 128-bits boundary. in apic_mmio_write()
2173 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0); in kvm_lapic_set_eoi()
2185 kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val); in kvm_apic_write_nodecode()
2188 kvm_lapic_reg_write(vcpu->arch.apic, offset, val); in kvm_apic_write_nodecode()
2194 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_free_lapic()
2196 if (!vcpu->arch.apic) in kvm_free_lapic()
2199 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_free_lapic()
2201 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)) in kvm_free_lapic()
2204 if (!apic->sw_enabled) in kvm_free_lapic()
2207 if (apic->regs) in kvm_free_lapic()
2208 free_page((unsigned long)apic->regs); in kvm_free_lapic()
2214 *----------------------------------------------------------------------
2216 *----------------------------------------------------------------------
2220 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_get_lapic_tscdeadline_msr()
2225 return apic->lapic_timer.tscdeadline; in kvm_get_lapic_tscdeadline_msr()
2230 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_set_lapic_tscdeadline_msr()
2235 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_set_lapic_tscdeadline_msr()
2236 apic->lapic_timer.tscdeadline = data; in kvm_set_lapic_tscdeadline_msr()
2242 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4); in kvm_lapic_set_tpr()
2249 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI); in kvm_lapic_get_cr8()
2256 u64 old_value = vcpu->arch.apic_base; in kvm_lapic_set_base()
2257 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_set_base()
2262 vcpu->arch.apic_base = value; in kvm_lapic_set_base()
2273 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id); in kvm_lapic_set_base()
2279 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); in kvm_lapic_set_base()
2284 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id); in kvm_lapic_set_base()
2289 apic->base_address = apic->vcpu->arch.apic_base & in kvm_lapic_set_base()
2293 apic->base_address != APIC_DEFAULT_PHYS_BASE) in kvm_lapic_set_base()
2299 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_update_apicv()
2301 if (vcpu->arch.apicv_active) { in kvm_apic_update_apicv()
2303 apic->irr_pending = true; in kvm_apic_update_apicv()
2304 apic->isr_count = 1; in kvm_apic_update_apicv()
2306 apic->irr_pending = (apic_search_irr(apic) != -1); in kvm_apic_update_apicv()
2307 apic->isr_count = count_vectors(apic->regs + APIC_ISR); in kvm_apic_update_apicv()
2314 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_reset()
2321 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_lapic_reset()
2326 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id); in kvm_lapic_reset()
2328 kvm_apic_set_version(apic->vcpu); in kvm_lapic_reset()
2334 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) in kvm_lapic_reset()
2355 apic->highest_isr_cache = -1; in kvm_lapic_reset()
2357 atomic_set(&apic->lapic_timer.pending, 0); in kvm_lapic_reset()
2360 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP); in kvm_lapic_reset()
2361 vcpu->arch.pv_eoi.msr_val = 0; in kvm_lapic_reset()
2363 if (vcpu->arch.apicv_active) { in kvm_lapic_reset()
2365 kvm_x86_ops.hwapic_irr_update(vcpu, -1); in kvm_lapic_reset()
2366 kvm_x86_ops.hwapic_isr_update(vcpu, -1); in kvm_lapic_reset()
2369 vcpu->arch.apic_arb_prio = 0; in kvm_lapic_reset()
2370 vcpu->arch.apic_attention = 0; in kvm_lapic_reset()
2372 kvm_recalculate_apic_map(vcpu->kvm); in kvm_lapic_reset()
2376 *----------------------------------------------------------------------
2378 *----------------------------------------------------------------------
2388 struct kvm_lapic *apic = vcpu->arch.apic; in apic_has_pending_timer()
2391 return atomic_read(&apic->lapic_timer.pending); in apic_has_pending_timer()
2417 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_nmi_wd_deliver()
2437 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period); in apic_timer_fn()
2453 vcpu->arch.apic = apic; in kvm_create_lapic()
2455 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); in kvm_create_lapic()
2456 if (!apic->regs) { in kvm_create_lapic()
2458 vcpu->vcpu_id); in kvm_create_lapic()
2461 apic->vcpu = vcpu; in kvm_create_lapic()
2463 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, in kvm_create_lapic()
2465 apic->lapic_timer.timer.function = apic_timer_fn; in kvm_create_lapic()
2466 if (timer_advance_ns == -1) { in kvm_create_lapic()
2467 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT; in kvm_create_lapic()
2470 apic->lapic_timer.timer_advance_ns = timer_advance_ns; in kvm_create_lapic()
2478 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; in kvm_create_lapic()
2480 kvm_iodevice_init(&apic->dev, &apic_mmio_ops); in kvm_create_lapic()
2485 vcpu->arch.apic = NULL; in kvm_create_lapic()
2487 return -ENOMEM; in kvm_create_lapic()
2492 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_has_interrupt()
2496 return -1; in kvm_apic_has_interrupt()
2505 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0); in kvm_apic_accept_pic_intr()
2507 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in kvm_apic_accept_pic_intr()
2517 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_inject_apic_timer_irqs()
2519 if (atomic_read(&apic->lapic_timer.pending) > 0) { in kvm_inject_apic_timer_irqs()
2521 atomic_set(&apic->lapic_timer.pending, 0); in kvm_inject_apic_timer_irqs()
2528 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_get_apic_interrupt()
2531 if (vector == -1) in kvm_get_apic_interrupt()
2532 return -1; in kvm_get_apic_interrupt()
2542 if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) { in kvm_get_apic_interrupt()
2544 * For auto-EOI interrupts, there might be another pending in kvm_get_apic_interrupt()
2552 * be a higher-priority pending interrupt---except if there was in kvm_get_apic_interrupt()
2566 if (apic_x2apic_mode(vcpu->arch.apic)) { in kvm_apic_state_fixup()
2567 u32 *id = (u32 *)(s->regs + APIC_ID); in kvm_apic_state_fixup()
2568 u32 *ldr = (u32 *)(s->regs + APIC_LDR); in kvm_apic_state_fixup()
2570 if (vcpu->kvm->arch.x2apic_format) { in kvm_apic_state_fixup()
2571 if (*id != vcpu->vcpu_id) in kvm_apic_state_fixup()
2572 return -EINVAL; in kvm_apic_state_fixup()
2590 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s)); in kvm_apic_get_state()
2596 __kvm_lapic_set_reg(s->regs, APIC_TMCCT, in kvm_apic_get_state()
2597 __apic_read(vcpu->arch.apic, APIC_TMCCT)); in kvm_apic_get_state()
2604 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_set_state()
2607 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base); in kvm_apic_set_state()
2609 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV))); in kvm_apic_set_state()
2613 kvm_recalculate_apic_map(vcpu->kvm); in kvm_apic_set_state()
2616 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s)); in kvm_apic_set_state()
2618 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY); in kvm_apic_set_state()
2619 kvm_recalculate_apic_map(vcpu->kvm); in kvm_apic_set_state()
2623 hrtimer_cancel(&apic->lapic_timer.timer); in kvm_apic_set_state()
2629 apic->highest_isr_cache = -1; in kvm_apic_set_state()
2630 if (vcpu->arch.apicv_active) { in kvm_apic_set_state()
2638 if (ioapic_in_kernel(vcpu->kvm)) in kvm_apic_set_state()
2641 vcpu->arch.apic_arb_prio = 0; in kvm_apic_set_state()
2654 timer = &vcpu->arch.apic->lapic_timer.timer; in __kvm_migrate_apic_timer()
2660 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2676 * -> host disabled PV EOI. in apic_sync_pv_eoi_from_guest()
2678 * -> host enabled PV EOI, guest did not execute EOI yet. in apic_sync_pv_eoi_from_guest()
2680 * -> host enabled PV EOI, guest executed EOI. in apic_sync_pv_eoi_from_guest()
2700 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) in kvm_lapic_sync_from_vapic()
2701 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); in kvm_lapic_sync_from_vapic()
2703 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) in kvm_lapic_sync_from_vapic()
2706 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, in kvm_lapic_sync_from_vapic()
2710 apic_set_tpr(vcpu->arch.apic, data & 0xff); in kvm_lapic_sync_from_vapic()
2714 * apic_sync_pv_eoi_to_guest - called before vmentry
2724 apic->irr_pending || in apic_sync_pv_eoi_to_guest()
2726 apic->highest_isr_cache == -1 || in apic_sync_pv_eoi_to_guest()
2728 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) { in apic_sync_pv_eoi_to_guest()
2736 pv_eoi_set_pending(apic->vcpu); in apic_sync_pv_eoi_to_guest()
2743 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_lapic_sync_to_vapic()
2747 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) in kvm_lapic_sync_to_vapic()
2759 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, in kvm_lapic_sync_to_vapic()
2766 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_lapic_set_vapic_addr()
2767 &vcpu->arch.apic->vapic_cache, in kvm_lapic_set_vapic_addr()
2769 return -EINVAL; in kvm_lapic_set_vapic_addr()
2770 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); in kvm_lapic_set_vapic_addr()
2772 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); in kvm_lapic_set_vapic_addr()
2775 vcpu->arch.apic->vapic_addr = vapic_addr; in kvm_lapic_set_vapic_addr()
2781 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_x2apic_msr_write()
2782 u32 reg = (msr - APIC_BASE_MSR) << 4; in kvm_x2apic_msr_write()
2798 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_x2apic_msr_read()
2799 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0; in kvm_x2apic_msr_read()
2819 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_hv_vapic_msr_write()
2832 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_hv_vapic_msr_read()
2851 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data; in kvm_lapic_enable_pv_eoi()
2857 vcpu->arch.pv_eoi.msr_val = data; in kvm_lapic_enable_pv_eoi()
2861 if (addr == ghc->gpa && len <= ghc->len) in kvm_lapic_enable_pv_eoi()
2862 new_len = ghc->len; in kvm_lapic_enable_pv_eoi()
2866 return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len); in kvm_lapic_enable_pv_eoi()
2871 struct kvm_lapic *apic = vcpu->arch.apic; in kvm_apic_accept_events()
2875 if (!lapic_in_kernel(vcpu) || !apic->pending_events) in kvm_apic_accept_events()
2880 * (SMM, VMX non-root mode, SVM with GIF=0). in kvm_apic_accept_events()
2887 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED); in kvm_apic_accept_events()
2888 if (test_bit(KVM_APIC_SIPI, &apic->pending_events)) in kvm_apic_accept_events()
2889 clear_bit(KVM_APIC_SIPI, &apic->pending_events); in kvm_apic_accept_events()
2893 pe = xchg(&apic->pending_events, 0); in kvm_apic_accept_events()
2896 if (kvm_vcpu_is_bsp(apic->vcpu)) in kvm_apic_accept_events()
2897 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_apic_accept_events()
2899 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_apic_accept_events()
2902 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in kvm_apic_accept_events()
2905 sipi_vector = apic->sipi_vector; in kvm_apic_accept_events()
2907 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_apic_accept_events()