Lines Matching refs:ghc
96 struct gfn_to_hva_cache *ghc = &vx->runstate_cache; in kvm_xen_update_runstate_guest() local
108 if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) && in kvm_xen_update_runstate_guest()
109 kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len)) in kvm_xen_update_runstate_guest()
113 BUG_ON(!ghc->memslot); in kvm_xen_update_runstate_guest()
130 user_state = (int __user *)ghc->hva; in kvm_xen_update_runstate_guest()
134 user_times = (uint64_t __user *)(ghc->hva + in kvm_xen_update_runstate_guest()
144 user_times = (uint64_t __user *)(ghc->hva + in kvm_xen_update_runstate_guest()
204 mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT); in kvm_xen_update_runstate_guest()
219 struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache; in __kvm_xen_has_interrupt() local
236 if (likely(slots->generation == ghc->generation && in __kvm_xen_has_interrupt()
237 !kvm_is_error_hva(ghc->hva) && ghc->memslot)) { in __kvm_xen_has_interrupt()
240 err = __get_user(rc, (u8 __user *)ghc->hva + offset); in __kvm_xen_has_interrupt()
259 kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset, in __kvm_xen_has_interrupt()