Lines Matching +full:halt +full:- +full:regs
1 // SPDX-License-Identifier: GPL-2.0-or-later
10 #define pr_fmt(fmt) "kvm-guest: " fmt
54 early_param("no-kvmapf", parse_no_kvmapf);
63 early_param("no-steal-acc", parse_no_stealacc);
97 hlist_for_each(p, &b->list) { in _find_apf_task()
100 if (n->token == token) in _find_apf_task()
113 raw_spin_lock(&b->lock); in kvm_async_pf_queue_task()
116 /* dummy entry exist -> wake up was delivered ahead of PF */ in kvm_async_pf_queue_task()
117 hlist_del(&e->link); in kvm_async_pf_queue_task()
118 raw_spin_unlock(&b->lock); in kvm_async_pf_queue_task()
123 n->token = token; in kvm_async_pf_queue_task()
124 n->cpu = smp_processor_id(); in kvm_async_pf_queue_task()
125 init_swait_queue_head(&n->wq); in kvm_async_pf_queue_task()
126 hlist_add_head(&n->link, &b->list); in kvm_async_pf_queue_task()
127 raw_spin_unlock(&b->lock); in kvm_async_pf_queue_task()
132 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
163 hlist_del_init(&n->link); in apf_task_wake_one()
164 if (swq_has_sleeper(&n->wq)) in apf_task_wake_one()
165 swake_up_one(&n->wq); in apf_task_wake_one()
177 raw_spin_lock(&b->lock); in apf_task_wake_all()
178 hlist_for_each_safe(p, next, &b->list) { in apf_task_wake_all()
180 if (n->cpu == smp_processor_id()) in apf_task_wake_all()
183 raw_spin_unlock(&b->lock); in apf_task_wake_all()
199 raw_spin_lock(&b->lock); in kvm_async_pf_task_wake()
208 raw_spin_unlock(&b->lock); in kvm_async_pf_task_wake()
225 dummy->token = token; in kvm_async_pf_task_wake()
226 dummy->cpu = smp_processor_id(); in kvm_async_pf_task_wake()
227 init_swait_queue_head(&dummy->wq); in kvm_async_pf_task_wake()
228 hlist_add_head(&dummy->link, &b->list); in kvm_async_pf_task_wake()
233 raw_spin_unlock(&b->lock); in kvm_async_pf_task_wake()
254 noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token) in __kvm_handle_async_pf() argument
262 state = irqentry_enter(regs); in __kvm_handle_async_pf()
270 if (unlikely(!(regs->flags & X86_EFLAGS_IF))) in __kvm_handle_async_pf()
274 if (unlikely(!(user_mode(regs)))) in __kvm_handle_async_pf()
283 irqentry_exit(regs, state); in __kvm_handle_async_pf()
289 struct pt_regs *old_regs = set_irq_regs(regs); in DEFINE_IDTENTRY_SYSVEC()
344 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK); in kvm_guest_apic_eoi_write()
406 * New kernel can re-enable when it boots. in kvm_pv_guest_cpu_reboot()
434 version = src->version; in kvm_steal_clock()
436 steal = src->steal; in kvm_steal_clock()
438 } while ((version & 1) || (version != src->version)); in kvm_steal_clock()
453 * hotplugged will have their per-cpu variable already mapped as
546 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) { in __send_ipi_mask()
547 ipi_bitmap <<= min - apic_id; in __send_ipi_mask()
554 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", in __send_ipi_mask()
559 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap); in __send_ipi_mask()
565 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld", in __send_ipi_mask()
594 apic->send_IPI_mask = kvm_send_ipi_mask; in kvm_setup_pv_ipi()
595 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself; in kvm_setup_pv_ipi()
617 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init() in kvm_smp_prepare_boot_cpu()
675 static void kvm_crash_shutdown(struct pt_regs *regs) in kvm_crash_shutdown() argument
678 native_machine_crash_shutdown(regs); in kvm_crash_shutdown()
693 * queue flush_on_enter for pre-empted vCPUs in kvm_flush_tlb_others()
697 state = READ_ONCE(src->preempted); in kvm_flush_tlb_others()
699 if (try_cmpxchg(&src->preempted, &state, in kvm_flush_tlb_others()
777 static int kvm_cpuid_base = -1; in kvm_cpuid_base()
779 if (kvm_cpuid_base == -1) in kvm_cpuid_base()
822 static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs) in kvm_sev_es_hcall_prepare() argument
825 ghcb_set_rbx(ghcb, regs->bx); in kvm_sev_es_hcall_prepare()
826 ghcb_set_rcx(ghcb, regs->cx); in kvm_sev_es_hcall_prepare()
827 ghcb_set_rdx(ghcb, regs->dx); in kvm_sev_es_hcall_prepare()
828 ghcb_set_rsi(ghcb, regs->si); in kvm_sev_es_hcall_prepare()
831 static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs) in kvm_sev_es_hcall_finish() argument
916 * halt until it's our turn and kicked. Note that we do safe halt in kvm_wait()
921 halt(); in kvm_wait()
934 return !!(src->preempted & KVM_VCPU_PREEMPTED); in __kvm_vcpu_is_preempted()
940 #include <asm/asm-offsets.h>
945 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
957 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
1040 /* Enable guest halt poll disables host halt poll */ in arch_haltpoll_enable()
1050 /* Disable guest halt poll enables host halt poll */ in arch_haltpoll_disable()