Lines Matching refs:gpa
41 gpa_t gpa = gfn_to_gpa(gfn); in kvm_xen_shared_info_init() local
54 ret = kvm_gpc_activate(gpc, gpa, PAGE_SIZE); in kvm_xen_shared_info_init()
264 if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) { in kvm_xen_update_runstate_guest()
265 user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK); in kvm_xen_update_runstate_guest()
346 if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1, in kvm_xen_update_runstate_guest()
438 mark_page_dirty_in_slot(v->kvm, gpc1->memslot, gpc1->gpa >> PAGE_SHIFT); in kvm_xen_update_runstate_guest()
440 mark_page_dirty_in_slot(v->kvm, gpc2->memslot, gpc2->gpa >> PAGE_SHIFT); in kvm_xen_update_runstate_guest()
552 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); in kvm_xen_inject_pending_events()
681 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); in kvm_xen_hvm_get_attr()
729 if (data->u.gpa == KVM_XEN_INVALID_GPA) { in kvm_xen_vcpu_set_attr()
736 data->u.gpa, sizeof(struct vcpu_info)); in kvm_xen_vcpu_set_attr()
743 if (data->u.gpa == KVM_XEN_INVALID_GPA) { in kvm_xen_vcpu_set_attr()
750 data->u.gpa, in kvm_xen_vcpu_set_attr()
763 if (data->u.gpa == KVM_XEN_INVALID_GPA) { in kvm_xen_vcpu_set_attr()
782 sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK); in kvm_xen_vcpu_set_attr()
784 data->u.gpa, sz1); in kvm_xen_vcpu_set_attr()
793 BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK); in kvm_xen_vcpu_set_attr()
795 data->u.gpa + sz1, sz2); in kvm_xen_vcpu_set_attr()
959 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; in kvm_xen_vcpu_get_attr()
961 data->u.gpa = KVM_XEN_INVALID_GPA; in kvm_xen_vcpu_get_attr()
967 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; in kvm_xen_vcpu_get_attr()
969 data->u.gpa = KVM_XEN_INVALID_GPA; in kvm_xen_vcpu_get_attr()
979 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa; in kvm_xen_vcpu_get_attr()