Lines Matching full:vpa
446 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) in init_vpa() argument
448 vpa->__old_status |= LPPACA_OLD_SHARED_PROC; in init_vpa()
449 vpa->yield_count = cpu_to_be32(1); in init_vpa()
486 unsigned long vcpuid, unsigned long vpa) in do_h_register_vpa() argument
504 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) in do_h_register_vpa()
508 va = kvmppc_pin_guest_page(kvm, vpa, &nb); in do_h_register_vpa()
515 kvmppc_unpin_guest_page(kvm, va, vpa, false); in do_h_register_vpa()
521 vpa = 0; in do_h_register_vpa()
530 case H_VPA_REG_VPA: /* register VPA */ in do_h_register_vpa()
540 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
549 /* Check that they have previously registered a VPA */ in do_h_register_vpa()
551 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
559 /* Check that they have previously registered a VPA */ in do_h_register_vpa()
561 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
568 case H_VPA_DEREG_VPA: /* deregister VPA */ in do_h_register_vpa()
575 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
591 vpap->next_gpa = vpa; in do_h_register_vpa()
653 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
659 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
660 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
661 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
662 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
696 struct lppaca *vpa; in kvmppc_create_dtl_entry() local
703 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_create_dtl_entry()
712 if (!dt || !vpa) in kvmppc_create_dtl_entry()
725 /* order writing *dt vs. writing vpa->dtl_idx */ in kvmppc_create_dtl_entry()
727 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in kvmppc_create_dtl_entry()
901 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
1785 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
2024 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
2030 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
2039 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
2475 static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa) in unpin_vpa() argument
2477 if (vpa->pinned_addr) in unpin_vpa()
2478 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, in unpin_vpa()
2479 vpa->dirty); in unpin_vpa()
2487 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
2900 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
3109 * or need a VPA update done in kvmppc_run_core()
3626 if (vcpu->arch.vpa.pinned_addr) { in kvmhv_p9_guest_entry()
3627 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in kvmhv_p9_guest_entry()
3630 vcpu->arch.vpa.dirty = 1; in kvmhv_p9_guest_entry()
3640 if (vcpu->arch.vpa.pinned_addr) { in kvmhv_p9_guest_entry()
3641 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in kvmhv_p9_guest_entry()
3775 if (vcpu->arch.vpa.pinned_addr) { in kvmhv_p9_guest_entry()
3776 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in kvmhv_p9_guest_entry()
3779 vcpu->arch.vpa.dirty = 1; in kvmhv_p9_guest_entry()
4555 /* Harvest dirty bits from VPA and DTL updates */ in kvm_vm_ioctl_get_dirty_log_hv()
4559 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
5544 static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa) in unpin_vpa_reset() argument
5546 unpin_vpa(kvm, vpa); in unpin_vpa_reset()
5547 vpa->gpa = 0; in unpin_vpa_reset()
5548 vpa->pinned_addr = NULL; in unpin_vpa_reset()
5549 vpa->dirty = false; in unpin_vpa_reset()
5550 vpa->update_pending = 0; in unpin_vpa_reset()
5573 * - Unpin the VPA pages.
5624 * chance to run and unpin their VPA pages. Unpinning of all in kvmhv_svm_off()
5625 * VPA pages is done here explicitly so that VPA pages in kvmhv_svm_off()
5635 unpin_vpa_reset(kvm, &vcpu->arch.vpa); in kvmhv_svm_off()