• Home
  • Raw
  • Download

Lines Matching refs:arch

100 		int real_cpu = cpu + vcpu->arch.ptid;  in kvmppc_fast_vcpu_kick_hv()
147 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
150 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
156 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
157 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
158 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
159 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
161 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
166 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
169 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
172 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
173 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
174 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
179 vcpu->arch.shregs.msr = msr; in kvmppc_set_msr_hv()
185 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
191 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
237 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
243 vcpu->arch.ctr, vcpu->arch.lr); in kvmppc_dump_regs()
245 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
247 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
249 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
251 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
252 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
254 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
255 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
256 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
258 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
260 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
261 vcpu->arch.last_inst); in kvmppc_dump_regs()
292 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
298 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
361 spin_lock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
367 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
378 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
381 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
388 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
391 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
398 if (vpa_is_registered(&tvcpu->arch.dtl) || in do_h_register_vpa()
399 vpa_is_registered(&tvcpu->arch.slb_shadow)) in do_h_register_vpa()
402 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
407 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
412 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
423 spin_unlock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
445 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
450 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
480 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
481 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
482 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
485 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
486 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
487 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); in kvmppc_update_vpas()
488 if (vcpu->arch.vpa.pinned_addr) in kvmppc_update_vpas()
489 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
491 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
492 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); in kvmppc_update_vpas()
493 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
494 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
496 if (vcpu->arch.slb_shadow.update_pending) in kvmppc_update_vpas()
497 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); in kvmppc_update_vpas()
498 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
518 vc->runner->arch.run_task != current) { in vcore_stolen_time()
519 spin_lock_irq(&vc->runner->arch.tbacct_lock); in vcore_stolen_time()
523 spin_unlock_irq(&vc->runner->arch.tbacct_lock); in vcore_stolen_time()
539 dt = vcpu->arch.dtl_ptr; in kvmppc_create_dtl_entry()
540 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_create_dtl_entry()
543 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_create_dtl_entry()
544 vcpu->arch.stolen_logged = core_stolen; in kvmppc_create_dtl_entry()
545 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_create_dtl_entry()
546 stolen += vcpu->arch.busy_stolen; in kvmppc_create_dtl_entry()
547 vcpu->arch.busy_stolen = 0; in kvmppc_create_dtl_entry()
548 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_create_dtl_entry()
553 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); in kvmppc_create_dtl_entry()
557 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in kvmppc_create_dtl_entry()
559 if (dt == vcpu->arch.dtl.pinned_end) in kvmppc_create_dtl_entry()
560 dt = vcpu->arch.dtl.pinned_addr; in kvmppc_create_dtl_entry()
561 vcpu->arch.dtl_ptr = dt; in kvmppc_create_dtl_entry()
564 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in kvmppc_create_dtl_entry()
565 vcpu->arch.dtl.dirty = true; in kvmppc_create_dtl_entry()
570 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) in kvmppc_power8_compatible()
572 if ((!vcpu->arch.vcore->arch_compat) && in kvmppc_power8_compatible()
593 vcpu->arch.ciabr = value1; in kvmppc_h_set_mode()
602 vcpu->arch.dawr = value1; in kvmppc_h_set_mode()
603 vcpu->arch.dawrx = value2; in kvmppc_h_set_mode()
618 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
639 tvcpu->arch.prodded = 1; in kvmppc_pseries_do_hcall()
641 if (vcpu->arch.ceded) { in kvmppc_pseries_do_hcall()
665 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
701 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
744 run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
761 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
795 flags = vcpu->arch.shregs.msr & 0x1f0000ull; in kvmppc_handle_exit_hv()
813 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
828 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
829 vcpu->arch.fault_dsisr = 0; in kvmppc_handle_exit_hv()
859 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
860 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
861 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
875 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
876 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
877 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
878 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
890 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
894 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
896 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
897 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
901 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
909 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
924 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
927 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
929 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
964 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
967 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
970 *val = get_reg_val(id, vcpu->arch.dscr); in kvmppc_get_one_reg_hv()
973 *val = get_reg_val(id, vcpu->arch.purr); in kvmppc_get_one_reg_hv()
976 *val = get_reg_val(id, vcpu->arch.spurr); in kvmppc_get_one_reg_hv()
979 *val = get_reg_val(id, vcpu->arch.amr); in kvmppc_get_one_reg_hv()
982 *val = get_reg_val(id, vcpu->arch.uamor); in kvmppc_get_one_reg_hv()
986 *val = get_reg_val(id, vcpu->arch.mmcr[i]); in kvmppc_get_one_reg_hv()
990 *val = get_reg_val(id, vcpu->arch.pmc[i]); in kvmppc_get_one_reg_hv()
994 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
997 *val = get_reg_val(id, vcpu->arch.siar); in kvmppc_get_one_reg_hv()
1000 *val = get_reg_val(id, vcpu->arch.sdar); in kvmppc_get_one_reg_hv()
1003 *val = get_reg_val(id, vcpu->arch.sier); in kvmppc_get_one_reg_hv()
1006 *val = get_reg_val(id, vcpu->arch.iamr); in kvmppc_get_one_reg_hv()
1009 *val = get_reg_val(id, vcpu->arch.pspb); in kvmppc_get_one_reg_hv()
1012 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
1015 *val = get_reg_val(id, vcpu->arch.dawr); in kvmppc_get_one_reg_hv()
1018 *val = get_reg_val(id, vcpu->arch.dawrx); in kvmppc_get_one_reg_hv()
1021 *val = get_reg_val(id, vcpu->arch.ciabr); in kvmppc_get_one_reg_hv()
1024 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
1027 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
1030 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
1033 *val = get_reg_val(id, vcpu->arch.pid); in kvmppc_get_one_reg_hv()
1036 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
1039 *val = get_reg_val(id, vcpu->arch.wort); in kvmppc_get_one_reg_hv()
1042 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1043 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
1044 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1047 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1048 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
1049 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
1050 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1053 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1054 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
1055 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
1056 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
1059 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); in kvmppc_get_one_reg_hv()
1063 *val = get_reg_val(id, vcpu->arch.vcore->lpcr); in kvmppc_get_one_reg_hv()
1066 *val = get_reg_val(id, vcpu->arch.ppr); in kvmppc_get_one_reg_hv()
1070 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
1073 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
1076 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
1080 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
1088 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
1091 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
1098 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
1101 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_hv()
1104 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
1107 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
1110 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
1113 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
1116 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
1119 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
1123 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
1128 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
1131 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
1135 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); in kvmppc_get_one_reg_hv()
1159 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1162 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
1165 vcpu->arch.dscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1168 vcpu->arch.purr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1171 vcpu->arch.spurr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1174 vcpu->arch.amr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1177 vcpu->arch.uamor = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1181 vcpu->arch.mmcr[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1185 vcpu->arch.pmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1189 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1192 vcpu->arch.siar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1195 vcpu->arch.sdar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1198 vcpu->arch.sier = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1201 vcpu->arch.iamr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1204 vcpu->arch.pspb = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1207 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1210 vcpu->arch.dawr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1213 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; in kvmppc_set_one_reg_hv()
1216 vcpu->arch.ciabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1218 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) in kvmppc_set_one_reg_hv()
1219 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ in kvmppc_set_one_reg_hv()
1222 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1225 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1228 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1231 vcpu->arch.pid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1234 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1237 vcpu->arch.wort = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1242 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
1243 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
1245 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
1251 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
1253 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
1260 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
1263 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
1267 vcpu->arch.vcore->tb_offset = in kvmppc_set_one_reg_hv()
1277 vcpu->arch.ppr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1281 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1284 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1287 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1291 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1299 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
1302 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
1308 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1311 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1314 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1317 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1320 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1323 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1326 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1329 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1333 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1338 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1341 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
1368 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
1403 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
1410 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
1412 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
1415 vcpu->arch.mmcr[0] = MMCR0_FC; in kvmppc_core_vcpu_create_hv()
1416 vcpu->arch.ctrl = CTRL_RUNLATCH; in kvmppc_core_vcpu_create_hv()
1419 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
1420 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
1421 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
1422 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
1426 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
1428 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
1431 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
1434 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
1435 kvm->arch.online_vcores++; in kvmppc_core_vcpu_create_hv()
1445 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
1446 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
1448 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
1468 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
1469 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
1470 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
1471 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
1472 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
1488 if (now > vcpu->arch.dec_expires) { in kvmppc_set_timer()
1494 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC in kvmppc_set_timer()
1496 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), in kvmppc_set_timer()
1498 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
1503 vcpu->arch.ceded = 0; in kvmppc_end_cede()
1504 if (vcpu->arch.timer_running) { in kvmppc_end_cede()
1505 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_end_cede()
1506 vcpu->arch.timer_running = 0; in kvmppc_end_cede()
1517 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
1519 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
1521 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
1522 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
1523 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
1524 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
1525 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
1527 list_del(&vcpu->arch.run_list); in kvmppc_remove_runnable()
1574 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_start_thread()
1576 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
1577 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
1578 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
1580 cpu = vc->pcpu + vcpu->arch.ptid; in kvmppc_start_thread()
1584 tpaca->kvm_hstate.ptid = vcpu->arch.ptid; in kvmppc_start_thread()
1590 if (vcpu->arch.ptid) in kvmppc_start_thread()
1687 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_run_core()
1688 if (signal_pending(vcpu->arch.run_task)) in kvmppc_run_core()
1690 if (vcpu->arch.vpa.update_pending || in kvmppc_run_core()
1691 vcpu->arch.slb_shadow.update_pending || in kvmppc_run_core()
1692 vcpu->arch.dtl.update_pending) in kvmppc_run_core()
1725 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) in kvmppc_run_core()
1726 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
1732 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_run_core()
1760 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) in kvmppc_run_core()
1782 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { in kvmppc_run_core()
1784 if (now < vcpu->arch.dec_expires && in kvmppc_run_core()
1789 if (vcpu->arch.trap) in kvmppc_run_core()
1790 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, in kvmppc_run_core()
1791 vcpu->arch.run_task); in kvmppc_run_core()
1793 vcpu->arch.ret = ret; in kvmppc_run_core()
1794 vcpu->arch.trap = 0; in kvmppc_run_core()
1796 if (vcpu->arch.ceded) { in kvmppc_run_core()
1807 arch.run_list) { in kvmppc_run_core()
1808 if (!is_kvmppc_resume_guest(vcpu->arch.ret)) { in kvmppc_run_core()
1810 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
1823 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
1824 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) in kvmppc_wait_for_exec()
1826 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
1853 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
1854 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
1860 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
1862 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
1863 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
1864 vcpu->arch.kvm_run = kvm_run; in kvmppc_run_vcpu()
1865 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
1866 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
1867 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
1868 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); in kvmppc_run_vcpu()
1887 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
1896 arch.run_list) { in kvmppc_run_vcpu()
1898 if (signal_pending(v->arch.run_task)) { in kvmppc_run_vcpu()
1901 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; in kvmppc_run_vcpu()
1902 v->arch.ret = -EINTR; in kvmppc_run_vcpu()
1903 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
1906 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
1910 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { in kvmppc_run_vcpu()
1911 if (!v->arch.pending_exceptions) in kvmppc_run_vcpu()
1912 n_ceded += v->arch.ceded; in kvmppc_run_vcpu()
1914 v->arch.ceded = 0; in kvmppc_run_vcpu()
1923 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
1931 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
1935 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
1941 struct kvm_vcpu, arch.run_list); in kvmppc_run_vcpu()
1942 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
1946 return vcpu->arch.ret; in kvmppc_run_vcpu()
1958 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
1993 atomic_inc(&vcpu->kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
1998 if (!vcpu->kvm->arch.rma_setup_done) { in kvmppc_vcpu_run_hv()
2018 vcpu->arch.wqp = &vcpu->arch.vcore->wq; in kvmppc_vcpu_run_hv()
2019 vcpu->arch.pgdir = current->mm->pgd; in kvmppc_vcpu_run_hv()
2020 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
2026 !(vcpu->arch.shregs.msr & MSR_PR)) { in kvmppc_vcpu_run_hv()
2032 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
2061 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
2062 atomic_dec(&vcpu->kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
2242 physp = memslot->arch.slot_phys; in unpin_slot()
2259 if (!dont || free->arch.rmap != dont->arch.rmap) { in kvmppc_core_free_memslot_hv()
2260 vfree(free->arch.rmap); in kvmppc_core_free_memslot_hv()
2261 free->arch.rmap = NULL; in kvmppc_core_free_memslot_hv()
2263 if (!dont || free->arch.slot_phys != dont->arch.slot_phys) { in kvmppc_core_free_memslot_hv()
2265 vfree(free->arch.slot_phys); in kvmppc_core_free_memslot_hv()
2266 free->arch.slot_phys = NULL; in kvmppc_core_free_memslot_hv()
2273 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); in kvmppc_core_create_memslot_hv()
2274 if (!slot->arch.rmap) in kvmppc_core_create_memslot_hv()
2276 slot->arch.slot_phys = NULL; in kvmppc_core_create_memslot_hv()
2288 phys = memslot->arch.slot_phys; in kvmppc_core_prepare_memory_region_hv()
2289 if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) { in kvmppc_core_prepare_memory_region_hv()
2293 memslot->arch.slot_phys = phys; in kvmppc_core_prepare_memory_region_hv()
2327 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
2330 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
2333 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr()
2339 if (++cores_done >= kvm->arch.online_vcores) in kvmppc_update_lpcr()
2367 if (kvm->arch.rma_setup_done) in kvmppc_hv_setup_htab_rma()
2371 if (!kvm->arch.hpt_virt) { in kvmppc_hv_setup_htab_rma()
2421 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvmppc_hv_setup_htab_rma()
2443 kvm->arch.rma = ri; in kvmppc_hv_setup_htab_rma()
2459 kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT; in kvmppc_hv_setup_htab_rma()
2467 physp = memslot->arch.slot_phys; in kvmppc_hv_setup_htab_rma()
2471 spin_lock(&kvm->arch.slot_phys_lock); in kvmppc_hv_setup_htab_rma()
2475 spin_unlock(&kvm->arch.slot_phys_lock); in kvmppc_hv_setup_htab_rma()
2483 kvm->arch.rma_setup_done = 1; in kvmppc_hv_setup_htab_rma()
2505 kvm->arch.lpid = lpid; in kvmppc_core_init_vm_hv()
2512 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_core_init_vm_hv()
2515 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, in kvmppc_core_init_vm_hv()
2516 sizeof(kvm->arch.enabled_hcalls)); in kvmppc_core_init_vm_hv()
2518 kvm->arch.rma = NULL; in kvmppc_core_init_vm_hv()
2520 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); in kvmppc_core_init_vm_hv()
2524 kvm->arch.host_lpid = 0; in kvmppc_core_init_vm_hv()
2525 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); in kvmppc_core_init_vm_hv()
2531 kvm->arch.host_lpid = mfspr(SPRN_LPID); in kvmppc_core_init_vm_hv()
2532 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
2536 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | in kvmppc_core_init_vm_hv()
2542 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
2544 kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206); in kvmppc_core_init_vm_hv()
2545 spin_lock_init(&kvm->arch.slot_phys_lock); in kvmppc_core_init_vm_hv()
2561 if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) { in kvmppc_free_vcores()
2562 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_free_vcores()
2566 kfree(kvm->arch.vcores[i]); in kvmppc_free_vcores()
2568 kvm->arch.online_vcores = 0; in kvmppc_free_vcores()
2576 if (kvm->arch.rma) { in kvmppc_core_destroy_vm_hv()
2577 kvm_release_rma(kvm->arch.rma); in kvmppc_core_destroy_vm_hv()
2578 kvm->arch.rma = NULL; in kvmppc_core_destroy_vm_hv()