Lines Matching full:lpcr
480 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.16lx\n", in kvmppc_dump_regs()
481 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
2100 * Enforce limits on guest LPCR values based on hardware availability,
2104 unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr) in kvmppc_filter_lpcr_hv() argument
2108 lpcr &= ~LPCR_TC; in kvmppc_filter_lpcr_hv()
2112 lpcr &= ~LPCR_AIL; in kvmppc_filter_lpcr_hv()
2113 if ((lpcr & LPCR_AIL) != LPCR_AIL_3) in kvmppc_filter_lpcr_hv()
2114 lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */ in kvmppc_filter_lpcr_hv()
2122 lpcr &= ~LPCR_AIL; in kvmppc_filter_lpcr_hv()
2129 lpcr &= ~LPCR_LD; in kvmppc_filter_lpcr_hv()
2131 return lpcr; in kvmppc_filter_lpcr_hv()
2134 static void verify_lpcr(struct kvm *kvm, unsigned long lpcr) in verify_lpcr() argument
2136 if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) { in verify_lpcr()
2137 WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n", in verify_lpcr()
2138 lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr)); in verify_lpcr()
2160 /* Broken 32-bit version of LPCR must not clear top bits */ in kvmppc_set_lpcr()
2165 (vc->lpcr & ~mask) | (new_lpcr & mask)); in kvmppc_set_lpcr()
2171 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { in kvmppc_set_lpcr()
2185 vc->lpcr = new_lpcr; in kvmppc_set_lpcr()
2345 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
2743 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
3220 if (kvm->arch.lpcr & LPCR_GTSE) in do_migrate_away_vcpu()
4047 static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u6… in kvmhv_vcpu_entry_p9_nested() argument
4080 hvregs.lpcr = lpcr; in kvmhv_vcpu_entry_p9_nested()
4109 * irq_work_raise could check a flag (or possibly LPCR[HDICE] in kvmhv_vcpu_entry_p9_nested()
4135 if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ in kvmhv_vcpu_entry_p9_nested()
4153 unsigned long lpcr, u64 *tb) in kvmhv_p9_guest_entry() argument
4173 trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4185 trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4192 trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4604 unsigned long lpcr) in kvmhv_run_single_vcpu() argument
4687 lpcr |= LPCR_MER; in kvmhv_run_single_vcpu()
4713 trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr, &tb); in kvmhv_run_single_vcpu()
4886 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
5132 * Update LPCR values in kvm->arch and in vcores.
5134 * of kvm->arch.lpcr update).
5136 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) in kvmppc_update_lpcr() argument
5141 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
5144 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5152 vc->lpcr = (vc->lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5153 verify_lpcr(kvm, vc->lpcr); in kvmppc_update_lpcr()
5192 unsigned long lpcr = 0, senc; in kvmppc_hv_setup_htab_rma() local
5251 /* Update VRMASD field in the LPCR */ in kvmppc_hv_setup_htab_rma()
5254 lpcr = senc << (LPCR_VRMASD_SH - 4); in kvmppc_hv_setup_htab_rma()
5255 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); in kvmppc_hv_setup_htab_rma()
5258 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ in kvmppc_hv_setup_htab_rma()
5277 unsigned long lpcr, lpcr_mask; in kvmppc_switch_mmu_to_hpt() local
5289 lpcr = LPCR_VPM1; in kvmppc_switch_mmu_to_hpt()
5293 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); in kvmppc_switch_mmu_to_hpt()
5304 unsigned long lpcr, lpcr_mask; in kvmppc_switch_mmu_to_radix() local
5317 lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR; in kvmppc_switch_mmu_to_radix()
5323 lpcr |= LPCR_HAIL; in kvmppc_switch_mmu_to_radix()
5325 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); in kvmppc_switch_mmu_to_radix()
5415 unsigned long lpcr, lpid; in kvmppc_core_init_vm_hv() local
5450 /* Init LPCR for virtual RMA mode */ in kvmppc_core_init_vm_hv()
5453 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
5454 lpcr &= LPCR_PECE | LPCR_LPES; in kvmppc_core_init_vm_hv()
5460 lpcr = 0; in kvmppc_core_init_vm_hv()
5462 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | in kvmppc_core_init_vm_hv()
5468 lpcr |= LPCR_ONL; in kvmppc_core_init_vm_hv()
5474 * EE in HV mode with this LPCR still set) in kvmppc_core_init_vm_hv()
5477 lpcr &= ~LPCR_VPM0; in kvmppc_core_init_vm_hv()
5478 lpcr |= LPCR_HVICE | LPCR_HEIC; in kvmppc_core_init_vm_hv()
5485 lpcr |= LPCR_LPES; in kvmppc_core_init_vm_hv()
5494 lpcr &= ~LPCR_VPM1; in kvmppc_core_init_vm_hv()
5495 lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; in kvmppc_core_init_vm_hv()
5499 lpcr |= LPCR_HAIL; in kvmppc_core_init_vm_hv()
5508 verify_lpcr(kvm, lpcr); in kvmppc_core_init_vm_hv()
5509 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
5938 unsigned long lpcr; in kvmhv_configure_mmu() local
5990 lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; in kvmhv_configure_mmu()
5991 kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE); in kvmhv_configure_mmu()