• Home
  • Raw
  • Download

Lines Matching full:arch

14  * This file is derived from arch/powerpc/kvm/book3s.c,
134 return kvm->arch.nested_enable && kvm_is_radix(kvm); in nesting_enabled()
241 cpu = READ_ONCE(vcpu->arch.thread_cpu); in kvmppc_fast_vcpu_kick_hv()
277 * Updates to busy_stolen are protected by arch.tbacct_lock;
307 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
319 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
320 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
321 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
322 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
323 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
325 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
330 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
336 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
337 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
338 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
339 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
344 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
353 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
414 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
420 vcpu->arch.regs.ctr, vcpu->arch.regs.link); in kvmppc_dump_regs()
422 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
424 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
426 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
428 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
429 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
431 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
432 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
433 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
435 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
437 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
438 vcpu->arch.last_inst); in kvmppc_dump_regs()
458 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
464 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
527 spin_lock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
540 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
551 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
554 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
561 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
564 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
571 if (vpa_is_registered(&tvcpu->arch.dtl) || in do_h_register_vpa()
572 vpa_is_registered(&tvcpu->arch.slb_shadow)) in do_h_register_vpa()
575 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
580 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
585 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
596 spin_unlock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
618 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
623 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
653 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
654 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
655 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
658 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
659 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
660 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
661 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
662 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
664 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
665 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); in kvmppc_update_vpas()
666 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
667 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
669 if (vcpu->arch.slb_shadow.update_pending) in kvmppc_update_vpas()
670 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); in kvmppc_update_vpas()
671 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
702 dt = vcpu->arch.dtl_ptr; in kvmppc_create_dtl_entry()
703 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_create_dtl_entry()
706 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_create_dtl_entry()
707 vcpu->arch.stolen_logged = core_stolen; in kvmppc_create_dtl_entry()
708 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_create_dtl_entry()
709 stolen += vcpu->arch.busy_stolen; in kvmppc_create_dtl_entry()
710 vcpu->arch.busy_stolen = 0; in kvmppc_create_dtl_entry()
711 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_create_dtl_entry()
716 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); in kvmppc_create_dtl_entry()
720 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in kvmppc_create_dtl_entry()
722 if (dt == vcpu->arch.dtl.pinned_end) in kvmppc_create_dtl_entry()
723 dt = vcpu->arch.dtl.pinned_addr; in kvmppc_create_dtl_entry()
724 vcpu->arch.dtl_ptr = dt; in kvmppc_create_dtl_entry()
727 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in kvmppc_create_dtl_entry()
728 vcpu->arch.dtl.dirty = true; in kvmppc_create_dtl_entry()
737 if (vcpu->arch.doorbell_request) in kvmppc_doorbell_pending()
745 vc = vcpu->arch.vcore; in kvmppc_doorbell_pending()
752 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) in kvmppc_power8_compatible()
754 if ((!vcpu->arch.vcore->arch_compat) && in kvmppc_power8_compatible()
775 vcpu->arch.ciabr = value1; in kvmppc_h_set_mode()
786 vcpu->arch.dawr = value1; in kvmppc_h_set_mode()
787 vcpu->arch.dawrx = value2; in kvmppc_h_set_mode()
875 struct kvmppc_vcore *vcore = target->arch.vcore; in kvm_arch_vcpu_yield_to()
886 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && in kvm_arch_vcpu_yield_to()
900 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
901 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
904 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
917 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
930 tvcpu->arch.prodded = 1; in kvmppc_pseries_do_hcall()
932 if (tvcpu->arch.ceded) in kvmppc_pseries_do_hcall()
955 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
1041 if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4])) in kvmppc_pseries_do_hcall()
1057 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1061 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1111 * Instead the kvm->arch.secure_guest flag is checked inside in kvmppc_pseries_do_hcall()
1121 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1133 vcpu->arch.shregs.msr |= MSR_EE; in kvmppc_nested_cede()
1134 vcpu->arch.ceded = 1; in kvmppc_nested_cede()
1136 if (vcpu->arch.prodded) { in kvmppc_nested_cede()
1137 vcpu->arch.prodded = 0; in kvmppc_nested_cede()
1139 vcpu->arch.ceded = 0; in kvmppc_nested_cede()
1184 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
1202 nthreads = vcpu->kvm->arch.emul_smt_mode; in kvmppc_read_dpdes()
1241 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); in kvmppc_emulate_doorbell_instr()
1248 if (arg >= kvm->arch.emul_smt_mode) in kvmppc_emulate_doorbell_instr()
1253 if (!tvcpu->arch.doorbell_request) { in kvmppc_emulate_doorbell_instr()
1254 tvcpu->arch.doorbell_request = 1; in kvmppc_emulate_doorbell_instr()
1262 vcpu->arch.vcore->dpdes = 0; in kvmppc_emulate_doorbell_instr()
1263 vcpu->arch.doorbell_request = 0; in kvmppc_emulate_doorbell_instr()
1301 if (vcpu->arch.shregs.msr & MSR_HV) { in kvmppc_handle_exit_hv()
1304 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1305 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1308 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1313 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
1333 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); in kvmppc_handle_exit_hv()
1341 if (!vcpu->kvm->arch.fwnmi_enabled) { in kvmppc_handle_exit_hv()
1342 ulong flags = vcpu->arch.shregs.msr & 0x083c0000; in kvmppc_handle_exit_hv()
1350 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1354 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) in kvmppc_handle_exit_hv()
1370 flags = vcpu->arch.shregs.msr & 0x1f0000ull; in kvmppc_handle_exit_hv()
1388 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
1403 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
1404 vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & in kvmppc_handle_exit_hv()
1406 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) in kvmppc_handle_exit_hv()
1407 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_exit_hv()
1418 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
1419 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
1420 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
1421 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
1438 if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && in kvmppc_handle_exit_hv()
1465 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1466 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1467 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1490 if (vcpu->arch.shregs.msr & MSR_HV) { in kvmppc_handle_nested_exit()
1493 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_nested_exit()
1494 vcpu->arch.shregs.msr); in kvmppc_handle_nested_exit()
1498 switch (vcpu->arch.trap) { in kvmppc_handle_nested_exit()
1523 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); in kvmppc_handle_nested_exit()
1537 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_nested_exit()
1538 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & in kvmppc_handle_nested_exit()
1540 if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) in kvmppc_handle_nested_exit()
1541 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_nested_exit()
1560 vcpu->arch.trap = 0; in kvmppc_handle_nested_exit()
1579 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1580 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
1581 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1582 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
1594 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
1598 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
1600 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1601 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1605 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
1614 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
1627 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
1630 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
1632 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
1672 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
1675 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
1678 *val = get_reg_val(id, vcpu->arch.dscr); in kvmppc_get_one_reg_hv()
1681 *val = get_reg_val(id, vcpu->arch.purr); in kvmppc_get_one_reg_hv()
1684 *val = get_reg_val(id, vcpu->arch.spurr); in kvmppc_get_one_reg_hv()
1687 *val = get_reg_val(id, vcpu->arch.amr); in kvmppc_get_one_reg_hv()
1690 *val = get_reg_val(id, vcpu->arch.uamor); in kvmppc_get_one_reg_hv()
1694 *val = get_reg_val(id, vcpu->arch.mmcr[i]); in kvmppc_get_one_reg_hv()
1697 *val = get_reg_val(id, vcpu->arch.mmcr[2]); in kvmppc_get_one_reg_hv()
1700 *val = get_reg_val(id, vcpu->arch.mmcra); in kvmppc_get_one_reg_hv()
1703 *val = get_reg_val(id, vcpu->arch.mmcrs); in kvmppc_get_one_reg_hv()
1706 *val = get_reg_val(id, vcpu->arch.mmcr[3]); in kvmppc_get_one_reg_hv()
1710 *val = get_reg_val(id, vcpu->arch.pmc[i]); in kvmppc_get_one_reg_hv()
1714 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
1717 *val = get_reg_val(id, vcpu->arch.siar); in kvmppc_get_one_reg_hv()
1720 *val = get_reg_val(id, vcpu->arch.sdar); in kvmppc_get_one_reg_hv()
1723 *val = get_reg_val(id, vcpu->arch.sier[0]); in kvmppc_get_one_reg_hv()
1726 *val = get_reg_val(id, vcpu->arch.sier[1]); in kvmppc_get_one_reg_hv()
1729 *val = get_reg_val(id, vcpu->arch.sier[2]); in kvmppc_get_one_reg_hv()
1732 *val = get_reg_val(id, vcpu->arch.iamr); in kvmppc_get_one_reg_hv()
1735 *val = get_reg_val(id, vcpu->arch.pspb); in kvmppc_get_one_reg_hv()
1744 *val = get_reg_val(id, vcpu->arch.vcore->dpdes | in kvmppc_get_one_reg_hv()
1745 vcpu->arch.doorbell_request); in kvmppc_get_one_reg_hv()
1748 *val = get_reg_val(id, vcpu->arch.vcore->vtb); in kvmppc_get_one_reg_hv()
1751 *val = get_reg_val(id, vcpu->arch.dawr); in kvmppc_get_one_reg_hv()
1754 *val = get_reg_val(id, vcpu->arch.dawrx); in kvmppc_get_one_reg_hv()
1757 *val = get_reg_val(id, vcpu->arch.ciabr); in kvmppc_get_one_reg_hv()
1760 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
1763 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
1766 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
1769 *val = get_reg_val(id, vcpu->arch.pid); in kvmppc_get_one_reg_hv()
1772 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
1775 *val = get_reg_val(id, vcpu->arch.wort); in kvmppc_get_one_reg_hv()
1778 *val = get_reg_val(id, vcpu->arch.tid); in kvmppc_get_one_reg_hv()
1781 *val = get_reg_val(id, vcpu->arch.psscr); in kvmppc_get_one_reg_hv()
1784 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1785 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
1786 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1789 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1790 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
1791 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
1792 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1795 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1796 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
1797 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
1798 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1801 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
1805 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
1808 *val = get_reg_val(id, vcpu->arch.ppr); in kvmppc_get_one_reg_hv()
1812 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
1815 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
1818 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
1822 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
1830 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
1833 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
1840 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
1843 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_hv()
1846 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
1849 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
1852 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
1855 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
1858 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
1861 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
1865 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
1870 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
1873 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
1877 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); in kvmppc_get_one_reg_hv()
1880 *val = get_reg_val(id, vcpu->arch.dec_expires + in kvmppc_get_one_reg_hv()
1881 vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
1884 *val = get_reg_val(id, vcpu->arch.online); in kvmppc_get_one_reg_hv()
1887 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); in kvmppc_get_one_reg_hv()
1911 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1914 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
1917 vcpu->arch.dscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1920 vcpu->arch.purr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1923 vcpu->arch.spurr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1926 vcpu->arch.amr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1929 vcpu->arch.uamor = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1933 vcpu->arch.mmcr[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1936 vcpu->arch.mmcr[2] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1939 vcpu->arch.mmcra = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1942 vcpu->arch.mmcrs = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1945 *val = get_reg_val(id, vcpu->arch.mmcr[3]); in kvmppc_set_one_reg_hv()
1949 vcpu->arch.pmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1953 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1956 vcpu->arch.siar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1959 vcpu->arch.sdar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1962 vcpu->arch.sier[0] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1965 vcpu->arch.sier[1] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1968 vcpu->arch.sier[2] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1971 vcpu->arch.iamr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1974 vcpu->arch.pspb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1977 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1980 vcpu->arch.vcore->vtb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1983 vcpu->arch.dawr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1986 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
1989 vcpu->arch.ciabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1991 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
1992 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ in kvmppc_set_one_reg_hv()
1995 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1998 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2001 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2004 vcpu->arch.pid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2007 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2010 vcpu->arch.wort = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2013 vcpu->arch.tid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2016 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; in kvmppc_set_one_reg_hv()
2021 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
2022 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
2024 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
2030 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
2032 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
2039 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
2042 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
2046 vcpu->arch.vcore->tb_offset = in kvmppc_set_one_reg_hv()
2056 vcpu->arch.ppr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2060 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2063 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2066 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2070 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2078 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
2081 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
2087 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2090 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2093 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2096 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2099 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2102 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2105 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2108 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2112 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2117 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2120 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2127 vcpu->arch.dec_expires = set_reg_val(id, *val) - in kvmppc_set_one_reg_hv()
2128 vcpu->arch.vcore->tb_offset; in kvmppc_set_one_reg_hv()
2132 if (i && !vcpu->arch.online) in kvmppc_set_one_reg_hv()
2133 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2134 else if (!i && vcpu->arch.online) in kvmppc_set_one_reg_hv()
2135 atomic_dec(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2136 vcpu->arch.online = i; in kvmppc_set_one_reg_hv()
2139 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2158 if (kvm->arch.threads_indep) in threads_per_vcore()
2176 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
2189 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
2190 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
2191 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
2192 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
2193 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
2316 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); in debugfs_vcpu_init()
2317 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, vcpu, in debugfs_vcpu_init()
2338 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
2345 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
2347 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
2350 vcpu->arch.mmcr[0] = MMCR0_FC; in kvmppc_core_vcpu_create_hv()
2351 vcpu->arch.ctrl = CTRL_RUNLATCH; in kvmppc_core_vcpu_create_hv()
2354 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
2355 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
2356 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
2357 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
2366 vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | in kvmppc_core_vcpu_create_hv()
2369 vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); in kvmppc_core_vcpu_create_hv()
2372 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2376 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
2380 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
2382 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
2388 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { in kvmppc_core_vcpu_create_hv()
2392 BUG_ON(kvm->arch.smt_mode != 1); in kvmppc_core_vcpu_create_hv()
2396 core = id / kvm->arch.smt_mode; in kvmppc_core_vcpu_create_hv()
2399 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
2410 id & ~(kvm->arch.smt_mode - 1)); in kvmppc_core_vcpu_create_hv()
2411 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
2412 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
2413 kvm->arch.online_vcores++; in kvmppc_core_vcpu_create_hv()
2414 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
2425 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
2426 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
2427 vcpu->arch.thread_cpu = -1; in kvmppc_core_vcpu_create_hv()
2428 vcpu->arch.prev_cpu = -1; in kvmppc_core_vcpu_create_hv()
2430 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
2465 if (!kvm->arch.online_vcores) { in kvmhv_set_smt_mode()
2466 kvm->arch.smt_mode = smt_mode; in kvmhv_set_smt_mode()
2467 kvm->arch.emul_smt_mode = esmt; in kvmhv_set_smt_mode()
2484 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
2485 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
2486 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
2487 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
2488 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
2502 if (now > vcpu->arch.dec_expires) { in kvmppc_set_timer()
2508 dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now); in kvmppc_set_timer()
2509 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); in kvmppc_set_timer()
2510 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
2520 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
2522 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
2524 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
2525 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
2526 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
2527 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
2528 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
2530 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); in kvmppc_remove_runnable()
2580 struct kvm_nested_guest *nested = vcpu->arch.nested; in radix_flush_cpu()
2589 cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); in radix_flush_cpu()
2590 cpu_in_guest = &kvm->arch.cpu_in_guest; in radix_flush_cpu()
2606 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmppc_prepare_radix_vcpu()
2614 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; in kvmppc_prepare_radix_vcpu()
2616 prev_cpu = vcpu->arch.prev_cpu; in kvmppc_prepare_radix_vcpu()
2636 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; in kvmppc_prepare_radix_vcpu()
2638 vcpu->arch.prev_cpu = pcpu; in kvmppc_prepare_radix_vcpu()
2650 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
2651 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
2652 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
2654 cpu += vcpu->arch.ptid; in kvmppc_start_thread()
2656 vcpu->arch.thread_cpu = cpu; in kvmppc_start_thread()
2657 cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest); in kvmppc_start_thread()
2898 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
2899 vcpu->arch.ret = -EINTR; in prepare_threads()
2900 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
2901 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
2902 vcpu->arch.dtl.update_pending) in prepare_threads()
2903 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
2907 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
2921 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { in collect_piggybacks()
2950 if (!vc->kvm->arch.mmu_ready) in recheck_signals_and_mmu()
2953 if (signal_pending(vcpu->arch.run_task)) in recheck_signals_and_mmu()
2973 * so any vcpus becoming runnable will have their arch.trap in post_guest_process()
2978 if (now < vcpu->arch.dec_expires && in post_guest_process()
2985 if (vcpu->arch.trap) in post_guest_process()
2987 vcpu->arch.run_task); in post_guest_process()
2989 vcpu->arch.ret = ret; in post_guest_process()
2990 vcpu->arch.trap = 0; in post_guest_process()
2993 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
2994 if (vcpu->arch.pending_exceptions) in post_guest_process()
2996 if (vcpu->arch.ceded) in post_guest_process()
3002 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3018 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3114 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
3142 (hpt_on_radix && vc->kvm->arch.threads_indep)) { in kvmppc_run_core()
3144 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
3146 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
3235 split_info.lpidr_req = vc->kvm->arch.lpid; in kvmppc_run_core()
3236 split_info.host_lpcr = vc->kvm->arch.host_lpcr; in kvmppc_run_core()
3300 if (!vcpu->arch.ptid) in kvmppc_run_core()
3302 active |= 1 << (thr + vcpu->arch.ptid); in kvmppc_run_core()
3410 cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest); in kvmppc_run_core()
3438 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmhv_load_hv_regs_and_go()
3453 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE); in kvmhv_load_hv_regs_and_go()
3458 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); in kvmhv_load_hv_regs_and_go()
3480 mtspr(SPRN_PURR, vcpu->arch.purr); in kvmhv_load_hv_regs_and_go()
3481 mtspr(SPRN_SPURR, vcpu->arch.spurr); in kvmhv_load_hv_regs_and_go()
3484 mtspr(SPRN_DAWR0, vcpu->arch.dawr); in kvmhv_load_hv_regs_and_go()
3485 mtspr(SPRN_DAWRX0, vcpu->arch.dawrx); in kvmhv_load_hv_regs_and_go()
3487 mtspr(SPRN_CIABR, vcpu->arch.ciabr); in kvmhv_load_hv_regs_and_go()
3488 mtspr(SPRN_IC, vcpu->arch.ic); in kvmhv_load_hv_regs_and_go()
3489 mtspr(SPRN_PID, vcpu->arch.pid); in kvmhv_load_hv_regs_and_go()
3491 mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC | in kvmhv_load_hv_regs_and_go()
3494 mtspr(SPRN_HFSCR, vcpu->arch.hfscr); in kvmhv_load_hv_regs_and_go()
3496 mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0); in kvmhv_load_hv_regs_and_go()
3497 mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1); in kvmhv_load_hv_regs_and_go()
3498 mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2); in kvmhv_load_hv_regs_and_go()
3499 mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3); in kvmhv_load_hv_regs_and_go()
3508 mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0); in kvmhv_load_hv_regs_and_go()
3509 mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1); in kvmhv_load_hv_regs_and_go()
3517 purr - vcpu->arch.purr); in kvmhv_load_hv_regs_and_go()
3519 spurr - vcpu->arch.spurr); in kvmhv_load_hv_regs_and_go()
3520 vcpu->arch.purr = purr; in kvmhv_load_hv_regs_and_go()
3521 vcpu->arch.spurr = spurr; in kvmhv_load_hv_regs_and_go()
3523 vcpu->arch.ic = mfspr(SPRN_IC); in kvmhv_load_hv_regs_and_go()
3524 vcpu->arch.pid = mfspr(SPRN_PID); in kvmhv_load_hv_regs_and_go()
3525 vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS; in kvmhv_load_hv_regs_and_go()
3527 vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0); in kvmhv_load_hv_regs_and_go()
3528 vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1); in kvmhv_load_hv_regs_and_go()
3529 vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2); in kvmhv_load_hv_regs_and_go()
3530 vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3); in kvmhv_load_hv_regs_and_go()
3554 mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */ in kvmhv_load_hv_regs_and_go()
3573 mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr); in kvmhv_load_hv_regs_and_go()
3585 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmhv_p9_guest_entry()
3603 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
3612 if (vcpu->arch.vpa.pinned_addr) { in kvmhv_p9_guest_entry()
3613 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in kvmhv_p9_guest_entry()
3616 vcpu->arch.vpa.dirty = 1; in kvmhv_p9_guest_entry()
3621 kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true); in kvmhv_p9_guest_entry()
3626 if (vcpu->arch.vpa.pinned_addr) { in kvmhv_p9_guest_entry()
3627 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in kvmhv_p9_guest_entry()
3638 load_fp_state(&vcpu->arch.fp); in kvmhv_p9_guest_entry()
3640 load_vr_state(&vcpu->arch.vr); in kvmhv_p9_guest_entry()
3642 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvmhv_p9_guest_entry()
3644 mtspr(SPRN_DSCR, vcpu->arch.dscr); in kvmhv_p9_guest_entry()
3645 mtspr(SPRN_IAMR, vcpu->arch.iamr); in kvmhv_p9_guest_entry()
3646 mtspr(SPRN_PSPB, vcpu->arch.pspb); in kvmhv_p9_guest_entry()
3647 mtspr(SPRN_FSCR, vcpu->arch.fscr); in kvmhv_p9_guest_entry()
3648 mtspr(SPRN_TAR, vcpu->arch.tar); in kvmhv_p9_guest_entry()
3649 mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); in kvmhv_p9_guest_entry()
3650 mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); in kvmhv_p9_guest_entry()
3651 mtspr(SPRN_BESCR, vcpu->arch.bescr); in kvmhv_p9_guest_entry()
3652 mtspr(SPRN_WORT, vcpu->arch.wort); in kvmhv_p9_guest_entry()
3653 mtspr(SPRN_TIDR, vcpu->arch.tid); in kvmhv_p9_guest_entry()
3654 mtspr(SPRN_DAR, vcpu->arch.shregs.dar); in kvmhv_p9_guest_entry()
3655 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); in kvmhv_p9_guest_entry()
3656 mtspr(SPRN_AMR, vcpu->arch.amr); in kvmhv_p9_guest_entry()
3657 mtspr(SPRN_UAMOR, vcpu->arch.uamor); in kvmhv_p9_guest_entry()
3659 if (!(vcpu->arch.ctrl & 1)) in kvmhv_p9_guest_entry()
3662 mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb()); in kvmhv_p9_guest_entry()
3676 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); in kvmhv_p9_guest_entry()
3679 vcpu->arch.regs.msr = vcpu->arch.shregs.msr; in kvmhv_p9_guest_entry()
3681 if (vcpu->arch.nested) { in kvmhv_p9_guest_entry()
3682 hvregs.lpid = vcpu->arch.nested->shadow_lpid; in kvmhv_p9_guest_entry()
3683 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; in kvmhv_p9_guest_entry()
3685 hvregs.lpid = vcpu->kvm->arch.lpid; in kvmhv_p9_guest_entry()
3690 __pa(&vcpu->arch.regs)); in kvmhv_p9_guest_entry()
3692 vcpu->arch.shregs.msr = vcpu->arch.regs.msr; in kvmhv_p9_guest_entry()
3693 vcpu->arch.shregs.dar = mfspr(SPRN_DAR); in kvmhv_p9_guest_entry()
3694 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); in kvmhv_p9_guest_entry()
3695 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); in kvmhv_p9_guest_entry()
3699 if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && in kvmhv_p9_guest_entry()
3709 vcpu->arch.slb_max = 0; in kvmhv_p9_guest_entry()
3714 vcpu->arch.dec_expires = dec + tb; in kvmhv_p9_guest_entry()
3716 vcpu->arch.thread_cpu = -1; in kvmhv_p9_guest_entry()
3718 vcpu->arch.ctrl = mfspr(SPRN_CTRLF); in kvmhv_p9_guest_entry()
3719 if (!(vcpu->arch.ctrl & 1)) in kvmhv_p9_guest_entry()
3720 mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1); in kvmhv_p9_guest_entry()
3722 vcpu->arch.iamr = mfspr(SPRN_IAMR); in kvmhv_p9_guest_entry()
3723 vcpu->arch.pspb = mfspr(SPRN_PSPB); in kvmhv_p9_guest_entry()
3724 vcpu->arch.fscr = mfspr(SPRN_FSCR); in kvmhv_p9_guest_entry()
3725 vcpu->arch.tar = mfspr(SPRN_TAR); in kvmhv_p9_guest_entry()
3726 vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); in kvmhv_p9_guest_entry()
3727 vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); in kvmhv_p9_guest_entry()
3728 vcpu->arch.bescr = mfspr(SPRN_BESCR); in kvmhv_p9_guest_entry()
3729 vcpu->arch.wort = mfspr(SPRN_WORT); in kvmhv_p9_guest_entry()
3730 vcpu->arch.tid = mfspr(SPRN_TIDR); in kvmhv_p9_guest_entry()
3731 vcpu->arch.amr = mfspr(SPRN_AMR); in kvmhv_p9_guest_entry()
3732 vcpu->arch.uamor = mfspr(SPRN_UAMOR); in kvmhv_p9_guest_entry()
3733 vcpu->arch.dscr = mfspr(SPRN_DSCR); in kvmhv_p9_guest_entry()
3743 if (host_amr != vcpu->arch.amr) in kvmhv_p9_guest_entry()
3746 if (host_fscr != vcpu->arch.fscr) in kvmhv_p9_guest_entry()
3750 store_fp_state(&vcpu->arch.fp); in kvmhv_p9_guest_entry()
3752 store_vr_state(&vcpu->arch.vr); in kvmhv_p9_guest_entry()
3754 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvmhv_p9_guest_entry()
3758 kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true); in kvmhv_p9_guest_entry()
3761 if (vcpu->arch.vpa.pinned_addr) { in kvmhv_p9_guest_entry()
3762 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in kvmhv_p9_guest_entry()
3765 vcpu->arch.vpa.dirty = 1; in kvmhv_p9_guest_entry()
3802 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
3803 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_wait_for_exec()
3808 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
3834 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < in xive_interrupt_pending()
3835 vcpu->arch.xive_saved_state.cppr; in xive_interrupt_pending()
3846 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || in kvmppc_vcpu_woken()
3863 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) in kvmppc_vcore_check_block()
3980 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
3981 if (!kvm->arch.mmu_ready) { in kvmhv_setup_mmu()
3987 kvm->arch.mmu_ready = 1; in kvmhv_setup_mmu()
3990 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
4004 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
4005 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
4011 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
4013 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
4014 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
4015 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
4016 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
4017 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
4018 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); in kvmppc_run_vcpu()
4039 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4042 if (!vcpu->kvm->arch.mmu_ready) { in kvmppc_run_vcpu()
4050 vcpu->arch.ret = r; in kvmppc_run_vcpu()
4064 if (signal_pending(v->arch.run_task)) { in kvmppc_run_vcpu()
4068 v->arch.ret = -EINTR; in kvmppc_run_vcpu()
4069 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4072 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
4077 n_ceded += v->arch.ceded; in kvmppc_run_vcpu()
4079 v->arch.ceded = 0; in kvmppc_run_vcpu()
4096 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4105 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
4109 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
4116 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4121 return vcpu->arch.ret; in kvmppc_run_vcpu()
4132 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_run_single_vcpu()
4137 vcpu->arch.ret = RESUME_GUEST; in kvmhv_run_single_vcpu()
4138 vcpu->arch.trap = 0; in kvmhv_run_single_vcpu()
4140 vc = vcpu->arch.vcore; in kvmhv_run_single_vcpu()
4141 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4142 vcpu->arch.run_task = current; in kvmhv_run_single_vcpu()
4143 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmhv_run_single_vcpu()
4144 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmhv_run_single_vcpu()
4145 vcpu->arch.busy_preempt = TB_NIL; in kvmhv_run_single_vcpu()
4146 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; in kvmhv_run_single_vcpu()
4152 if (!kvm->arch.mmu_ready) in kvmhv_run_single_vcpu()
4172 if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready) in kvmhv_run_single_vcpu()
4177 if (vcpu->arch.doorbell_request) { in kvmhv_run_single_vcpu()
4180 vcpu->arch.doorbell_request = 0; in kvmhv_run_single_vcpu()
4183 &vcpu->arch.pending_exceptions)) in kvmhv_run_single_vcpu()
4185 } else if (vcpu->arch.pending_exceptions || in kvmhv_run_single_vcpu()
4186 vcpu->arch.doorbell_request || in kvmhv_run_single_vcpu()
4188 vcpu->arch.ret = RESUME_HOST; in kvmhv_run_single_vcpu()
4205 lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; in kvmhv_run_single_vcpu()
4221 vcpu->arch.trap = trap; in kvmhv_run_single_vcpu()
4230 mtspr(SPRN_LPID, kvm->arch.host_lpid); in kvmhv_run_single_vcpu()
4241 cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest); in kvmhv_run_single_vcpu()
4251 ((get_tb() < vcpu->arch.dec_expires) || in kvmhv_run_single_vcpu()
4264 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4266 if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded && in kvmhv_run_single_vcpu()
4269 while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) { in kvmhv_run_single_vcpu()
4273 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4281 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4290 return vcpu->arch.ret; in kvmhv_run_single_vcpu()
4295 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
4312 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
4344 if (!vcpu->arch.online) { in kvmppc_vcpu_run_hv()
4345 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_vcpu_run_hv()
4346 vcpu->arch.online = 1; in kvmppc_vcpu_run_hv()
4358 atomic_inc(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
4373 vcpu->arch.waitp = &vcpu->arch.vcore->wait; in kvmppc_vcpu_run_hv()
4374 vcpu->arch.pgdir = kvm->mm->pgd; in kvmppc_vcpu_run_hv()
4375 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
4386 if (kvm->arch.threads_indep && kvm_is_radix(kvm) && in kvmppc_vcpu_run_hv()
4389 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
4394 !(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_vcpu_run_hv()
4402 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
4422 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
4423 atomic_dec(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
4530 spin_lock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4531 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
4532 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
4533 spin_unlock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
4548 vfree(slot->arch.rmap); in kvmppc_core_free_memslot_hv()
4549 slot->arch.rmap = NULL; in kvmppc_core_free_memslot_hv()
4560 unsigned long size = array_size(npages, sizeof(*slot->arch.rmap)); in kvmppc_core_prepare_memory_region_hv()
4565 slot->arch.rmap = vzalloc(size); in kvmppc_core_prepare_memory_region_hv()
4566 if (!slot->arch.rmap) in kvmppc_core_prepare_memory_region_hv()
4588 atomic64_inc(&kvm->arch.mmio_update); in kvmppc_core_commit_memory_region_hv()
4609 if (!kvm->arch.secure_guest) in kvmppc_core_commit_memory_region_hv()
4630 * Update LPCR values in kvm->arch and in vcores.
4631 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
4632 * of kvm->arch.lpcr update).
4639 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
4642 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
4645 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr()
4651 if (++cores_done >= kvm->arch.online_vcores) in kvmppc_update_lpcr()
4662 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | in kvmppc_setup_partition_table()
4663 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); in kvmppc_setup_partition_table()
4665 dw0 |= kvm->arch.sdr1; in kvmppc_setup_partition_table()
4668 dw1 = kvm->arch.process_table; in kvmppc_setup_partition_table()
4671 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; in kvmppc_setup_partition_table()
4672 dw1 = PATB_GR | kvm->arch.process_table; in kvmppc_setup_partition_table()
4674 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); in kvmppc_setup_partition_table()
4679 * Must be called with kvm->arch.mmu_setup_lock held.
4693 if (!kvm->arch.hpt.virt) { in kvmppc_hv_setup_htab_rma()
4742 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvmppc_hv_setup_htab_rma()
4754 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ in kvmppc_hv_setup_htab_rma()
4768 * Must be called with kvm->arch.mmu_setup_lock held and
4776 kvm->arch.process_table = 0; in kvmppc_switch_mmu_to_hpt()
4779 kvm->arch.radix = 0; in kvmppc_switch_mmu_to_hpt()
4788 * Must be called with kvm->arch.mmu_setup_lock held and
4801 kvm->arch.radix = 1; in kvmppc_switch_mmu_to_radix()
4803 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_switch_mmu_to_radix()
4895 mutex_init(&kvm->arch.uvmem_lock); in kvmppc_core_init_vm_hv()
4896 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); in kvmppc_core_init_vm_hv()
4897 mutex_init(&kvm->arch.mmu_setup_lock); in kvmppc_core_init_vm_hv()
4904 kvm->arch.lpid = lpid; in kvmppc_core_init_vm_hv()
4918 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_core_init_vm_hv()
4921 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, in kvmppc_core_init_vm_hv()
4922 sizeof(kvm->arch.enabled_hcalls)); in kvmppc_core_init_vm_hv()
4925 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); in kvmppc_core_init_vm_hv()
4929 kvm->arch.host_lpid = mfspr(SPRN_LPID); in kvmppc_core_init_vm_hv()
4930 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
4937 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | in kvmppc_core_init_vm_hv()
4965 kvm->arch.radix = 1; in kvmppc_core_init_vm_hv()
4966 kvm->arch.mmu_ready = 1; in kvmppc_core_init_vm_hv()
4971 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_init_vm_hv()
4977 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
4980 kvm->arch.resize_hpt = NULL; in kvmppc_core_init_vm_hv()
4987 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ in kvmppc_core_init_vm_hv()
4989 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ in kvmppc_core_init_vm_hv()
4991 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ in kvmppc_core_init_vm_hv()
4993 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ in kvmppc_core_init_vm_hv()
5004 kvm->arch.threads_indep = true; in kvmppc_core_init_vm_hv()
5006 kvm->arch.threads_indep = indep_threads_mode; in kvmppc_core_init_vm_hv()
5009 if (!kvm->arch.threads_indep) in kvmppc_core_init_vm_hv()
5020 kvm->arch.smt_mode = threads_per_subcore; in kvmppc_core_init_vm_hv()
5022 kvm->arch.smt_mode = 1; in kvmppc_core_init_vm_hv()
5023 kvm->arch.emul_smt_mode = 1; in kvmppc_core_init_vm_hv()
5029 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); in kvmppc_core_init_vm_hv()
5042 kfree(kvm->arch.vcores[i]); in kvmppc_free_vcores()
5043 kvm->arch.online_vcores = 0; in kvmppc_free_vcores()
5048 debugfs_remove_recursive(kvm->arch.debugfs_dir); in kvmppc_core_destroy_vm_hv()
5050 if (!kvm->arch.threads_indep) in kvmppc_core_destroy_vm_hv()
5059 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_core_destroy_vm_hv()
5065 kvm->arch.process_table = 0; in kvmppc_core_destroy_vm_hv()
5066 if (kvm->arch.secure_guest) in kvmppc_core_destroy_vm_hv()
5067 uv_svm_terminate(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5068 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); in kvmppc_core_destroy_vm_hv()
5071 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5112 kfree(kvm->arch.pimap); in kvmppc_free_pimap()
5137 pimap = kvm->arch.pimap; in kvmppc_set_passthru_irq()
5145 kvm->arch.pimap = pimap; in kvmppc_set_passthru_irq()
5222 if (!kvm->arch.pimap) in kvmppc_clr_passthru_irq()
5225 pimap = kvm->arch.pimap; in kvmppc_clr_passthru_irq()
5433 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
5435 if (kvm->arch.mmu_ready) { in kvmhv_configure_mmu()
5436 kvm->arch.mmu_ready = 0; in kvmhv_configure_mmu()
5439 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_configure_mmu()
5440 kvm->arch.mmu_ready = 1; in kvmhv_configure_mmu()
5453 kvm->arch.process_table = cfg->process_table; in kvmhv_configure_mmu()
5461 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
5474 kvm->arch.nested_enable = true; in kvmhv_enable_nested()
5491 if (rc && vcpu->arch.nested) in kvmhv_load_from_eaddr()
5510 if (rc && vcpu->arch.nested) in kvmhv_store_to_eaddr()
5536 kvm->arch.svm_enabled = 1; in kvmhv_enable_svm()
5556 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmhv_svm_off()
5559 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
5560 mmu_was_ready = kvm->arch.mmu_ready; in kvmhv_svm_off()
5561 if (kvm->arch.mmu_ready) { in kvmhv_svm_off()
5562 kvm->arch.mmu_ready = 0; in kvmhv_svm_off()
5565 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_svm_off()
5566 kvm->arch.mmu_ready = 1; in kvmhv_svm_off()
5582 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in kvmhv_svm_off()
5587 ret = uv_svm_terminate(kvm->arch.lpid); in kvmhv_svm_off()
5604 spin_lock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
5605 unpin_vpa_reset(kvm, &vcpu->arch.dtl); in kvmhv_svm_off()
5606 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); in kvmhv_svm_off()
5607 unpin_vpa_reset(kvm, &vcpu->arch.vpa); in kvmhv_svm_off()
5608 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
5612 kvm->arch.secure_guest = 0; in kvmhv_svm_off()
5613 kvm->arch.mmu_ready = mmu_was_ready; in kvmhv_svm_off()
5615 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()