Lines Matching +full:control +full:- +full:bit
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
98 /* cache for control fields of the guest */
128 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
130 * perform speculative control.
156 * Per-vcpu list of struct amd_svm_iommu_ir:
164 /* Save desired MSR intercept (read: pass-through) state */
198 vmcb->control.clean = 0; in vmcb_mark_all_dirty()
203 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1) in vmcb_mark_all_clean()
207 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) in vmcb_mark_dirty() argument
209 vmcb->control.clean &= ~(1 << bit); in vmcb_mark_dirty()
219 if (is_guest_mode(&svm->vcpu)) in get_host_vmcb()
220 return svm->nested.hsave; in get_host_vmcb()
222 return svm->vmcb; in get_host_vmcb()
225 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit) in vmcb_set_intercept() argument
227 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); in vmcb_set_intercept()
228 __set_bit(bit, (unsigned long *)&control->intercepts); in vmcb_set_intercept()
231 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit) in vmcb_clr_intercept() argument
233 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); in vmcb_clr_intercept()
234 __clear_bit(bit, (unsigned long *)&control->intercepts); in vmcb_clr_intercept()
237 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit) in vmcb_is_intercept() argument
239 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT); in vmcb_is_intercept()
240 return test_bit(bit, (unsigned long *)&control->intercepts); in vmcb_is_intercept()
247 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ); in set_dr_intercepts()
248 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ); in set_dr_intercepts()
249 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ); in set_dr_intercepts()
250 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ); in set_dr_intercepts()
251 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ); in set_dr_intercepts()
252 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ); in set_dr_intercepts()
253 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ); in set_dr_intercepts()
254 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ); in set_dr_intercepts()
255 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE); in set_dr_intercepts()
256 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE); in set_dr_intercepts()
257 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE); in set_dr_intercepts()
258 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE); in set_dr_intercepts()
259 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE); in set_dr_intercepts()
260 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE); in set_dr_intercepts()
261 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE); in set_dr_intercepts()
262 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE); in set_dr_intercepts()
271 vmcb->control.intercepts[INTERCEPT_DR] = 0; in clr_dr_intercepts()
276 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) in set_exception_intercept() argument
280 WARN_ON_ONCE(bit >= 32); in set_exception_intercept()
281 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); in set_exception_intercept()
286 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) in clr_exception_intercept() argument
290 WARN_ON_ONCE(bit >= 32); in clr_exception_intercept()
291 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit); in clr_exception_intercept()
296 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) in svm_set_intercept() argument
300 vmcb_set_intercept(&vmcb->control, bit); in svm_set_intercept()
305 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit) in svm_clr_intercept() argument
309 vmcb_clr_intercept(&vmcb->control, bit); in svm_clr_intercept()
314 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit) in svm_is_intercept() argument
316 return vmcb_is_intercept(&svm->vmcb->control, bit); in svm_is_intercept()
321 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK); in vgif_enabled()
327 svm->vmcb->control.int_ctl |= V_GIF_MASK; in enable_gif()
329 svm->vcpu.arch.hflags |= HF_GIF_MASK; in enable_gif()
335 svm->vmcb->control.int_ctl &= ~V_GIF_MASK; in disable_gif()
337 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; in disable_gif()
343 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK); in gif_set()
345 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); in gif_set()
376 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK); in nested_svm_virtualize_tpr()
381 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI); in nested_exit_on_smi()
386 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR); in nested_exit_on_intr()
391 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI); in nested_exit_on_nmi()
428 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK; in avic_update_vapic_bar()
429 vmcb_mark_dirty(svm->vmcb, VMCB_AVIC); in avic_update_vapic_bar()
435 u64 *entry = svm->avic_physical_id_cache; in avic_vcpu_is_running()
456 bool svm_check_apicv_inhibit_reasons(ulong bit);
475 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; in sev_guest()
477 return sev->active; in sev_guest()