• Home
  • Raw
  • Download

Lines Matching refs:vcpu

52 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
55 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
66 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) in kvmppc_is_split_real() argument
68 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_is_split_real()
72 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_fixup_split_real() argument
74 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_fixup_split_real()
75 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_fixup_split_real()
82 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) in kvmppc_fixup_split_real()
89 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_fixup_split_real()
90 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); in kvmppc_fixup_split_real()
93 static void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_unfixup_split_real() argument
95 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { in kvmppc_unfixup_split_real()
96 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_unfixup_split_real()
97 ulong lr = kvmppc_get_lr(vcpu); in kvmppc_unfixup_split_real()
99 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK); in kvmppc_unfixup_split_real()
101 kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK); in kvmppc_unfixup_split_real()
102 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_unfixup_split_real()
106 static void kvmppc_inject_interrupt_pr(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) in kvmppc_inject_interrupt_pr() argument
110 kvmppc_unfixup_split_real(vcpu); in kvmppc_inject_interrupt_pr()
112 msr = kvmppc_get_msr(vcpu); in kvmppc_inject_interrupt_pr()
113 pc = kvmppc_get_pc(vcpu); in kvmppc_inject_interrupt_pr()
114 new_msr = vcpu->arch.intr_msr; in kvmppc_inject_interrupt_pr()
115 new_pc = to_book3s(vcpu)->hior + vec; in kvmppc_inject_interrupt_pr()
125 kvmppc_set_srr0(vcpu, pc); in kvmppc_inject_interrupt_pr()
126 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); in kvmppc_inject_interrupt_pr()
127 kvmppc_set_pc(vcpu, new_pc); in kvmppc_inject_interrupt_pr()
128 kvmppc_set_msr(vcpu, new_msr); in kvmppc_inject_interrupt_pr()
131 static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) in kvmppc_core_vcpu_load_pr() argument
134 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_core_vcpu_load_pr()
135 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); in kvmppc_core_vcpu_load_pr()
136 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; in kvmppc_core_vcpu_load_pr()
146 vcpu->cpu = smp_processor_id(); in kvmppc_core_vcpu_load_pr()
148 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; in kvmppc_core_vcpu_load_pr()
151 if (kvmppc_is_split_real(vcpu)) in kvmppc_core_vcpu_load_pr()
152 kvmppc_fixup_split_real(vcpu); in kvmppc_core_vcpu_load_pr()
154 kvmppc_restore_tm_pr(vcpu); in kvmppc_core_vcpu_load_pr()
157 static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_put_pr() argument
160 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_core_vcpu_put_pr()
162 kvmppc_copy_from_svcpu(vcpu); in kvmppc_core_vcpu_put_pr()
164 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); in kvmppc_core_vcpu_put_pr()
165 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; in kvmppc_core_vcpu_put_pr()
169 if (kvmppc_is_split_real(vcpu)) in kvmppc_core_vcpu_put_pr()
170 kvmppc_unfixup_split_real(vcpu); in kvmppc_core_vcpu_put_pr()
172 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); in kvmppc_core_vcpu_put_pr()
173 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); in kvmppc_core_vcpu_put_pr()
174 kvmppc_save_tm_pr(vcpu); in kvmppc_core_vcpu_put_pr()
181 vcpu->cpu = -1; in kvmppc_core_vcpu_put_pr()
185 void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu) in kvmppc_copy_to_svcpu() argument
187 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_copy_to_svcpu()
189 svcpu->gpr[0] = vcpu->arch.regs.gpr[0]; in kvmppc_copy_to_svcpu()
190 svcpu->gpr[1] = vcpu->arch.regs.gpr[1]; in kvmppc_copy_to_svcpu()
191 svcpu->gpr[2] = vcpu->arch.regs.gpr[2]; in kvmppc_copy_to_svcpu()
192 svcpu->gpr[3] = vcpu->arch.regs.gpr[3]; in kvmppc_copy_to_svcpu()
193 svcpu->gpr[4] = vcpu->arch.regs.gpr[4]; in kvmppc_copy_to_svcpu()
194 svcpu->gpr[5] = vcpu->arch.regs.gpr[5]; in kvmppc_copy_to_svcpu()
195 svcpu->gpr[6] = vcpu->arch.regs.gpr[6]; in kvmppc_copy_to_svcpu()
196 svcpu->gpr[7] = vcpu->arch.regs.gpr[7]; in kvmppc_copy_to_svcpu()
197 svcpu->gpr[8] = vcpu->arch.regs.gpr[8]; in kvmppc_copy_to_svcpu()
198 svcpu->gpr[9] = vcpu->arch.regs.gpr[9]; in kvmppc_copy_to_svcpu()
199 svcpu->gpr[10] = vcpu->arch.regs.gpr[10]; in kvmppc_copy_to_svcpu()
200 svcpu->gpr[11] = vcpu->arch.regs.gpr[11]; in kvmppc_copy_to_svcpu()
201 svcpu->gpr[12] = vcpu->arch.regs.gpr[12]; in kvmppc_copy_to_svcpu()
202 svcpu->gpr[13] = vcpu->arch.regs.gpr[13]; in kvmppc_copy_to_svcpu()
203 svcpu->cr = vcpu->arch.regs.ccr; in kvmppc_copy_to_svcpu()
204 svcpu->xer = vcpu->arch.regs.xer; in kvmppc_copy_to_svcpu()
205 svcpu->ctr = vcpu->arch.regs.ctr; in kvmppc_copy_to_svcpu()
206 svcpu->lr = vcpu->arch.regs.link; in kvmppc_copy_to_svcpu()
207 svcpu->pc = vcpu->arch.regs.nip; in kvmppc_copy_to_svcpu()
209 svcpu->shadow_fscr = vcpu->arch.shadow_fscr; in kvmppc_copy_to_svcpu()
215 vcpu->arch.entry_tb = get_tb(); in kvmppc_copy_to_svcpu()
216 vcpu->arch.entry_vtb = get_vtb(); in kvmppc_copy_to_svcpu()
218 vcpu->arch.entry_ic = mfspr(SPRN_IC); in kvmppc_copy_to_svcpu()
224 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) in kvmppc_recalc_shadow_msr() argument
226 ulong guest_msr = kvmppc_get_msr(vcpu); in kvmppc_recalc_shadow_msr()
239 smsr |= (guest_msr & vcpu->arch.guest_owned_ext); in kvmppc_recalc_shadow_msr()
253 vcpu->arch.shadow_msr = smsr; in kvmppc_recalc_shadow_msr()
257 void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu) in kvmppc_copy_from_svcpu() argument
259 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); in kvmppc_copy_from_svcpu()
271 vcpu->arch.regs.gpr[0] = svcpu->gpr[0]; in kvmppc_copy_from_svcpu()
272 vcpu->arch.regs.gpr[1] = svcpu->gpr[1]; in kvmppc_copy_from_svcpu()
273 vcpu->arch.regs.gpr[2] = svcpu->gpr[2]; in kvmppc_copy_from_svcpu()
274 vcpu->arch.regs.gpr[3] = svcpu->gpr[3]; in kvmppc_copy_from_svcpu()
275 vcpu->arch.regs.gpr[4] = svcpu->gpr[4]; in kvmppc_copy_from_svcpu()
276 vcpu->arch.regs.gpr[5] = svcpu->gpr[5]; in kvmppc_copy_from_svcpu()
277 vcpu->arch.regs.gpr[6] = svcpu->gpr[6]; in kvmppc_copy_from_svcpu()
278 vcpu->arch.regs.gpr[7] = svcpu->gpr[7]; in kvmppc_copy_from_svcpu()
279 vcpu->arch.regs.gpr[8] = svcpu->gpr[8]; in kvmppc_copy_from_svcpu()
280 vcpu->arch.regs.gpr[9] = svcpu->gpr[9]; in kvmppc_copy_from_svcpu()
281 vcpu->arch.regs.gpr[10] = svcpu->gpr[10]; in kvmppc_copy_from_svcpu()
282 vcpu->arch.regs.gpr[11] = svcpu->gpr[11]; in kvmppc_copy_from_svcpu()
283 vcpu->arch.regs.gpr[12] = svcpu->gpr[12]; in kvmppc_copy_from_svcpu()
284 vcpu->arch.regs.gpr[13] = svcpu->gpr[13]; in kvmppc_copy_from_svcpu()
285 vcpu->arch.regs.ccr = svcpu->cr; in kvmppc_copy_from_svcpu()
286 vcpu->arch.regs.xer = svcpu->xer; in kvmppc_copy_from_svcpu()
287 vcpu->arch.regs.ctr = svcpu->ctr; in kvmppc_copy_from_svcpu()
288 vcpu->arch.regs.link = svcpu->lr; in kvmppc_copy_from_svcpu()
289 vcpu->arch.regs.nip = svcpu->pc; in kvmppc_copy_from_svcpu()
290 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; in kvmppc_copy_from_svcpu()
291 vcpu->arch.fault_dar = svcpu->fault_dar; in kvmppc_copy_from_svcpu()
292 vcpu->arch.fault_dsisr = svcpu->fault_dsisr; in kvmppc_copy_from_svcpu()
293 vcpu->arch.last_inst = svcpu->last_inst; in kvmppc_copy_from_svcpu()
295 vcpu->arch.shadow_fscr = svcpu->shadow_fscr; in kvmppc_copy_from_svcpu()
300 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; in kvmppc_copy_from_svcpu()
301 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; in kvmppc_copy_from_svcpu()
302 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb; in kvmppc_copy_from_svcpu()
304 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; in kvmppc_copy_from_svcpu()
318 old_msr = kvmppc_get_msr(vcpu); in kvmppc_copy_from_svcpu()
320 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) != in kvmppc_copy_from_svcpu()
323 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)); in kvmppc_copy_from_svcpu()
324 kvmppc_set_msr_fast(vcpu, old_msr); in kvmppc_copy_from_svcpu()
325 kvmppc_recalc_shadow_msr(vcpu); in kvmppc_copy_from_svcpu()
336 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) in kvmppc_save_tm_sprs() argument
339 vcpu->arch.tfhar = mfspr(SPRN_TFHAR); in kvmppc_save_tm_sprs()
340 vcpu->arch.texasr = mfspr(SPRN_TEXASR); in kvmppc_save_tm_sprs()
341 vcpu->arch.tfiar = mfspr(SPRN_TFIAR); in kvmppc_save_tm_sprs()
345 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) in kvmppc_restore_tm_sprs() argument
348 mtspr(SPRN_TFHAR, vcpu->arch.tfhar); in kvmppc_restore_tm_sprs()
349 mtspr(SPRN_TEXASR, vcpu->arch.texasr); in kvmppc_restore_tm_sprs()
350 mtspr(SPRN_TFIAR, vcpu->arch.tfiar); in kvmppc_restore_tm_sprs()
357 static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) in kvmppc_handle_lost_math_exts() argument
360 ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & in kvmppc_handle_lost_math_exts()
373 kvmppc_handle_ext(vcpu, exit_nr, ext_diff); in kvmppc_handle_lost_math_exts()
376 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) in kvmppc_save_tm_pr() argument
378 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { in kvmppc_save_tm_pr()
379 kvmppc_save_tm_sprs(vcpu); in kvmppc_save_tm_pr()
383 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); in kvmppc_save_tm_pr()
384 kvmppc_giveup_ext(vcpu, MSR_VSX); in kvmppc_save_tm_pr()
387 _kvmppc_save_tm_pr(vcpu, mfmsr()); in kvmppc_save_tm_pr()
391 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) in kvmppc_restore_tm_pr() argument
393 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { in kvmppc_restore_tm_pr()
394 kvmppc_restore_tm_sprs(vcpu); in kvmppc_restore_tm_pr()
395 if (kvmppc_get_msr(vcpu) & MSR_TM) { in kvmppc_restore_tm_pr()
396 kvmppc_handle_lost_math_exts(vcpu); in kvmppc_restore_tm_pr()
397 if (vcpu->arch.fscr & FSCR_TAR) in kvmppc_restore_tm_pr()
398 kvmppc_handle_fac(vcpu, FSCR_TAR_LG); in kvmppc_restore_tm_pr()
404 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); in kvmppc_restore_tm_pr()
407 if (kvmppc_get_msr(vcpu) & MSR_TM) { in kvmppc_restore_tm_pr()
408 kvmppc_handle_lost_math_exts(vcpu); in kvmppc_restore_tm_pr()
409 if (vcpu->arch.fscr & FSCR_TAR) in kvmppc_restore_tm_pr()
410 kvmppc_handle_fac(vcpu, FSCR_TAR_LG); in kvmppc_restore_tm_pr()
415 static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) in kvmppc_core_check_requests_pr() argument
421 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in kvmppc_core_check_requests_pr()
422 kvmppc_mmu_pte_flush(vcpu, 0, 0); in kvmppc_core_check_requests_pr()
432 struct kvm_vcpu *vcpu; in do_kvm_unmap_hva() local
452 kvm_for_each_vcpu(i, vcpu, kvm) in do_kvm_unmap_hva()
453 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, in do_kvm_unmap_hva()
487 static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) in kvmppc_set_msr_pr() argument
492 if (vcpu->arch.papr_enabled) in kvmppc_set_msr_pr()
505 kvmppc_emulate_tabort(vcpu, in kvmppc_set_msr_pr()
509 old_msr = kvmppc_get_msr(vcpu); in kvmppc_set_msr_pr()
510 msr &= to_book3s(vcpu)->msr_mask; in kvmppc_set_msr_pr()
511 kvmppc_set_msr_fast(vcpu, msr); in kvmppc_set_msr_pr()
512 kvmppc_recalc_shadow_msr(vcpu); in kvmppc_set_msr_pr()
515 if (!vcpu->arch.pending_exceptions) { in kvmppc_set_msr_pr()
516 kvm_vcpu_block(vcpu); in kvmppc_set_msr_pr()
517 kvm_clear_request(KVM_REQ_UNHALT, vcpu); in kvmppc_set_msr_pr()
518 vcpu->stat.halt_wakeup++; in kvmppc_set_msr_pr()
522 kvmppc_set_msr_fast(vcpu, msr); in kvmppc_set_msr_pr()
526 if (kvmppc_is_split_real(vcpu)) in kvmppc_set_msr_pr()
527 kvmppc_fixup_split_real(vcpu); in kvmppc_set_msr_pr()
529 kvmppc_unfixup_split_real(vcpu); in kvmppc_set_msr_pr()
531 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != in kvmppc_set_msr_pr()
533 kvmppc_mmu_flush_segments(vcpu); in kvmppc_set_msr_pr()
534 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); in kvmppc_set_msr_pr()
537 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { in kvmppc_set_msr_pr()
538 struct kvm_vcpu_arch *a = &vcpu->arch; in kvmppc_set_msr_pr()
541 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); in kvmppc_set_msr_pr()
543 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); in kvmppc_set_msr_pr()
555 if (vcpu->arch.magic_page_pa && in kvmppc_set_msr_pr()
558 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, in kvmppc_set_msr_pr()
563 if (kvmppc_get_msr(vcpu) & MSR_FP) in kvmppc_set_msr_pr()
564 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); in kvmppc_set_msr_pr()
567 if (kvmppc_get_msr(vcpu) & MSR_TM) in kvmppc_set_msr_pr()
568 kvmppc_handle_lost_math_exts(vcpu); in kvmppc_set_msr_pr()
572 static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) in kvmppc_set_pvr_pr() argument
576 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; in kvmppc_set_pvr_pr()
577 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_pr()
580 kvmppc_mmu_book3s_64_init(vcpu); in kvmppc_set_pvr_pr()
581 if (!to_book3s(vcpu)->hior_explicit) in kvmppc_set_pvr_pr()
582 to_book3s(vcpu)->hior = 0xfff00000; in kvmppc_set_pvr_pr()
583 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; in kvmppc_set_pvr_pr()
584 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_set_pvr_pr()
588 kvmppc_mmu_book3s_32_init(vcpu); in kvmppc_set_pvr_pr()
589 if (!to_book3s(vcpu)->hior_explicit) in kvmppc_set_pvr_pr()
590 to_book3s(vcpu)->hior = 0; in kvmppc_set_pvr_pr()
591 to_book3s(vcpu)->msr_mask = 0xffffffffULL; in kvmppc_set_pvr_pr()
592 vcpu->arch.cpu_type = KVM_CPU_3S_32; in kvmppc_set_pvr_pr()
595 kvmppc_sanity_check(vcpu); in kvmppc_set_pvr_pr()
599 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; in kvmppc_set_pvr_pr()
600 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && in kvmppc_set_pvr_pr()
602 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; in kvmppc_set_pvr_pr()
607 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); in kvmppc_set_pvr_pr()
624 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | in kvmppc_set_pvr_pr()
631 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; in kvmppc_set_pvr_pr()
646 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; in kvmppc_set_pvr_pr()
661 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) in kvmppc_patch_dcbz() argument
668 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); in kvmppc_patch_dcbz()
688 static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) in kvmppc_visible_gpa() argument
690 ulong mp_pa = vcpu->arch.magic_page_pa; in kvmppc_visible_gpa()
692 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) in kvmppc_visible_gpa()
700 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); in kvmppc_visible_gpa()
703 static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu, in kvmppc_handle_pagefault() argument
712 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; in kvmppc_handle_pagefault()
713 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; in kvmppc_handle_pagefault()
717 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) in kvmppc_handle_pagefault()
722 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); in kvmppc_handle_pagefault()
734 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { in kvmppc_handle_pagefault()
740 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && in kvmppc_handle_pagefault()
745 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_handle_pagefault()
747 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) in kvmppc_handle_pagefault()
758 if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault()
759 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { in kvmppc_handle_pagefault()
777 flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE; in kvmppc_handle_pagefault()
778 kvmppc_core_queue_data_storage(vcpu, eaddr, flags); in kvmppc_handle_pagefault()
780 kvmppc_core_queue_inst_storage(vcpu, flags); in kvmppc_handle_pagefault()
784 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); in kvmppc_handle_pagefault()
785 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); in kvmppc_handle_pagefault()
786 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) { in kvmppc_handle_pagefault()
787 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { in kvmppc_handle_pagefault()
793 kvmppc_mmu_unmap_page(vcpu, &pte); in kvmppc_handle_pagefault()
796 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) { in kvmppc_handle_pagefault()
798 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_handle_pagefault()
802 vcpu->stat.sp_storage++; in kvmppc_handle_pagefault()
803 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_pagefault()
804 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) in kvmppc_handle_pagefault()
805 kvmppc_patch_dcbz(vcpu, &pte); in kvmppc_handle_pagefault()
808 vcpu->stat.mmio_exits++; in kvmppc_handle_pagefault()
809 vcpu->arch.paddr_accessed = pte.raddr; in kvmppc_handle_pagefault()
810 vcpu->arch.vaddr_accessed = pte.eaddr; in kvmppc_handle_pagefault()
811 r = kvmppc_emulate_mmio(vcpu); in kvmppc_handle_pagefault()
820 void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) in kvmppc_giveup_ext() argument
831 msr &= vcpu->arch.guest_owned_ext; in kvmppc_giveup_ext()
858 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); in kvmppc_giveup_ext()
859 kvmppc_recalc_shadow_msr(vcpu); in kvmppc_giveup_ext()
863 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_giveup_fac() argument
866 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { in kvmppc_giveup_fac()
873 vcpu->arch.tar = mfspr(SPRN_TAR); in kvmppc_giveup_fac()
875 vcpu->arch.shadow_fscr &= ~FSCR_TAR; in kvmppc_giveup_fac()
882 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, in kvmppc_handle_ext() argument
888 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) in kvmppc_handle_ext()
891 if (!(kvmppc_get_msr(vcpu) & msr)) { in kvmppc_handle_ext()
892 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_ext()
902 kvmppc_core_queue_program(vcpu, SRR1_PROGILL); in kvmppc_handle_ext()
914 msr &= ~vcpu->arch.guest_owned_ext; in kvmppc_handle_ext()
925 load_fp_state(&vcpu->arch.fp); in kvmppc_handle_ext()
927 t->fp_save_area = &vcpu->arch.fp; in kvmppc_handle_ext()
935 load_vr_state(&vcpu->arch.vr); in kvmppc_handle_ext()
937 t->vr_save_area = &vcpu->arch.vr; in kvmppc_handle_ext()
943 vcpu->arch.guest_owned_ext |= msr; in kvmppc_handle_ext()
944 kvmppc_recalc_shadow_msr(vcpu); in kvmppc_handle_ext()
953 static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) in kvmppc_handle_lost_ext() argument
957 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; in kvmppc_handle_lost_ext()
964 load_fp_state(&vcpu->arch.fp); in kvmppc_handle_lost_ext()
972 load_vr_state(&vcpu->arch.vr); in kvmppc_handle_lost_ext()
982 void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_trigger_fac_interrupt() argument
985 vcpu->arch.fscr &= ~(0xffULL << 56); in kvmppc_trigger_fac_interrupt()
986 vcpu->arch.fscr |= (fac << 56); in kvmppc_trigger_fac_interrupt()
987 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); in kvmppc_trigger_fac_interrupt()
990 static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_emulate_fac() argument
994 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) in kvmppc_emulate_fac()
995 er = kvmppc_emulate_instruction(vcpu); in kvmppc_emulate_fac()
999 kvmppc_trigger_fac_interrupt(vcpu, fac); in kvmppc_emulate_fac()
1004 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) in kvmppc_handle_fac() argument
1016 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); in kvmppc_handle_fac()
1019 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; in kvmppc_handle_fac()
1028 kvmppc_trigger_fac_interrupt(vcpu, fac); in kvmppc_handle_fac()
1036 mtspr(SPRN_TAR, vcpu->arch.tar); in kvmppc_handle_fac()
1037 vcpu->arch.shadow_fscr |= FSCR_TAR; in kvmppc_handle_fac()
1040 kvmppc_emulate_fac(vcpu, fac); in kvmppc_handle_fac()
1052 if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR)) in kvmppc_handle_fac()
1059 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) in kvmppc_set_fscr() argument
1061 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { in kvmppc_set_fscr()
1063 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); in kvmppc_set_fscr()
1064 } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) { in kvmppc_set_fscr()
1065 vcpu->arch.fscr = fscr; in kvmppc_set_fscr()
1066 kvmppc_handle_fac(vcpu, FSCR_TAR_LG); in kvmppc_set_fscr()
1070 vcpu->arch.fscr = fscr; in kvmppc_set_fscr()
1074 static void kvmppc_setup_debug(struct kvm_vcpu *vcpu) in kvmppc_setup_debug() argument
1076 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvmppc_setup_debug()
1077 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_setup_debug()
1079 kvmppc_set_msr(vcpu, msr | MSR_SE); in kvmppc_setup_debug()
1083 static void kvmppc_clear_debug(struct kvm_vcpu *vcpu) in kvmppc_clear_debug() argument
1085 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvmppc_clear_debug()
1086 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_clear_debug()
1088 kvmppc_set_msr(vcpu, msr & ~MSR_SE); in kvmppc_clear_debug()
1092 static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr) in kvmppc_exit_pr_progint() argument
1106 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; in kvmppc_exit_pr_progint()
1110 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_exit_pr_progint()
1114 if (kvmppc_get_msr(vcpu) & MSR_PR) { in kvmppc_exit_pr_progint()
1117 kvmppc_get_pc(vcpu), last_inst); in kvmppc_exit_pr_progint()
1120 kvmppc_core_queue_program(vcpu, flags); in kvmppc_exit_pr_progint()
1125 vcpu->stat.emulated_inst_exits++; in kvmppc_exit_pr_progint()
1126 er = kvmppc_emulate_instruction(vcpu); in kvmppc_exit_pr_progint()
1136 __func__, kvmppc_get_pc(vcpu), last_inst); in kvmppc_exit_pr_progint()
1137 kvmppc_core_queue_program(vcpu, flags); in kvmppc_exit_pr_progint()
1141 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvmppc_exit_pr_progint()
1154 int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr) in kvmppc_handle_exit_pr() argument
1156 struct kvm_run *run = vcpu->run; in kvmppc_handle_exit_pr()
1160 vcpu->stat.sum_exits++; in kvmppc_handle_exit_pr()
1167 trace_kvm_exit(exit_nr, vcpu); in kvmppc_handle_exit_pr()
1173 ulong shadow_srr1 = vcpu->arch.shadow_srr1; in kvmppc_handle_exit_pr()
1174 vcpu->stat.pf_instruc++; in kvmppc_handle_exit_pr()
1176 if (kvmppc_is_split_real(vcpu)) in kvmppc_handle_exit_pr()
1177 kvmppc_fixup_split_real(vcpu); in kvmppc_handle_exit_pr()
1186 svcpu = svcpu_get(vcpu); in kvmppc_handle_exit_pr()
1187 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; in kvmppc_handle_exit_pr()
1190 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); in kvmppc_handle_exit_pr()
1199 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_exit_pr()
1200 r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr); in kvmppc_handle_exit_pr()
1201 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_exit_pr()
1202 vcpu->stat.sp_instruc++; in kvmppc_handle_exit_pr()
1203 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && in kvmppc_handle_exit_pr()
1204 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { in kvmppc_handle_exit_pr()
1210 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); in kvmppc_handle_exit_pr()
1213 kvmppc_core_queue_inst_storage(vcpu, in kvmppc_handle_exit_pr()
1221 ulong dar = kvmppc_get_fault_dar(vcpu); in kvmppc_handle_exit_pr()
1222 u32 fault_dsisr = vcpu->arch.fault_dsisr; in kvmppc_handle_exit_pr()
1223 vcpu->stat.pf_storage++; in kvmppc_handle_exit_pr()
1232 svcpu = svcpu_get(vcpu); in kvmppc_handle_exit_pr()
1236 kvmppc_mmu_map_segment(vcpu, dar); in kvmppc_handle_exit_pr()
1249 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_exit_pr()
1250 r = kvmppc_handle_pagefault(vcpu, dar, exit_nr); in kvmppc_handle_exit_pr()
1251 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_exit_pr()
1253 kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr); in kvmppc_handle_exit_pr()
1259 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { in kvmppc_handle_exit_pr()
1260 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); in kvmppc_handle_exit_pr()
1261 kvmppc_book3s_queue_irqprio(vcpu, in kvmppc_handle_exit_pr()
1267 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { in kvmppc_handle_exit_pr()
1268 kvmppc_book3s_queue_irqprio(vcpu, in kvmppc_handle_exit_pr()
1278 vcpu->stat.dec_exits++; in kvmppc_handle_exit_pr()
1284 vcpu->stat.ext_intr_exits++; in kvmppc_handle_exit_pr()
1294 r = kvmppc_exit_pr_progint(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1302 if (vcpu->arch.papr_enabled) { in kvmppc_handle_exit_pr()
1304 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); in kvmppc_handle_exit_pr()
1306 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); in kvmppc_handle_exit_pr()
1312 if (vcpu->arch.papr_enabled && in kvmppc_handle_exit_pr()
1314 !(kvmppc_get_msr(vcpu) & MSR_PR)) { in kvmppc_handle_exit_pr()
1316 ulong cmd = kvmppc_get_gpr(vcpu, 3); in kvmppc_handle_exit_pr()
1320 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { in kvmppc_handle_exit_pr()
1328 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); in kvmppc_handle_exit_pr()
1332 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_pr()
1334 } else if (vcpu->arch.osi_enabled && in kvmppc_handle_exit_pr()
1335 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && in kvmppc_handle_exit_pr()
1336 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { in kvmppc_handle_exit_pr()
1343 gprs[i] = kvmppc_get_gpr(vcpu, i); in kvmppc_handle_exit_pr()
1344 vcpu->arch.osi_needed = 1; in kvmppc_handle_exit_pr()
1346 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && in kvmppc_handle_exit_pr()
1347 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { in kvmppc_handle_exit_pr()
1349 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); in kvmppc_handle_exit_pr()
1353 vcpu->stat.syscall_exits++; in kvmppc_handle_exit_pr()
1354 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1367 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { in kvmppc_handle_exit_pr()
1369 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, in kvmppc_handle_exit_pr()
1372 r = kvmppc_exit_pr_progint(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1394 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); in kvmppc_handle_exit_pr()
1400 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); in kvmppc_handle_exit_pr()
1406 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); in kvmppc_handle_exit_pr()
1407 dar = kvmppc_alignment_dar(vcpu, last_inst); in kvmppc_handle_exit_pr()
1409 kvmppc_set_dsisr(vcpu, dsisr); in kvmppc_handle_exit_pr()
1410 kvmppc_set_dar(vcpu, dar); in kvmppc_handle_exit_pr()
1412 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1419 r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); in kvmppc_handle_exit_pr()
1423 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1427 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvmppc_handle_exit_pr()
1431 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); in kvmppc_handle_exit_pr()
1437 ulong shadow_srr1 = vcpu->arch.shadow_srr1; in kvmppc_handle_exit_pr()
1440 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); in kvmppc_handle_exit_pr()
1458 s = kvmppc_prepare_to_enter(vcpu); in kvmppc_handle_exit_pr()
1466 kvmppc_handle_lost_ext(vcpu); in kvmppc_handle_exit_pr()
1469 trace_kvm_book3s_reenter(r, vcpu); in kvmppc_handle_exit_pr()
1474 static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs_pr() argument
1477 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvm_arch_vcpu_ioctl_get_sregs_pr()
1480 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1482 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1483 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { in kvm_arch_vcpu_ioctl_get_sregs_pr()
1485 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1486 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_pr()
1490 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); in kvm_arch_vcpu_ioctl_get_sregs_pr()
1501 static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs_pr() argument
1504 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1507 kvmppc_set_pvr_pr(vcpu, sregs->pvr); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1511 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { in kvm_arch_vcpu_ioctl_set_sregs_pr()
1513 vcpu->arch.mmu.slbmte(vcpu, 0, 0); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1514 vcpu->arch.mmu.slbia(vcpu); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1521 vcpu->arch.mmu.slbmte(vcpu, rs, rb); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1527 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1530 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1532 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1534 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1536 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, in kvm_arch_vcpu_ioctl_set_sregs_pr()
1542 kvmppc_mmu_pte_flush(vcpu, 0, 0); in kvm_arch_vcpu_ioctl_set_sregs_pr()
1547 static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, in kvmppc_get_one_reg_pr() argument
1557 *val = get_reg_val(id, to_book3s(vcpu)->hior); in kvmppc_get_one_reg_pr()
1560 *val = get_reg_val(id, to_book3s(vcpu)->vtb); in kvmppc_get_one_reg_pr()
1567 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_get_one_reg_pr()
1574 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_pr()
1577 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_pr()
1580 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_pr()
1584 vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]); in kvmppc_get_one_reg_pr()
1593 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_pr()
1596 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_pr()
1603 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_pr()
1606 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_pr()
1609 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_pr()
1612 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_pr()
1615 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_pr()
1618 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_pr()
1621 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_pr()
1624 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_pr()
1628 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_pr()
1633 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_pr()
1636 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_pr()
1647 static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) in kvmppc_set_lpcr_pr() argument
1650 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr_pr()
1652 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr_pr()
1655 static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, in kvmppc_set_one_reg_pr() argument
1662 to_book3s(vcpu)->hior = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1663 to_book3s(vcpu)->hior_explicit = true; in kvmppc_set_one_reg_pr()
1666 to_book3s(vcpu)->vtb = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1670 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); in kvmppc_set_one_reg_pr()
1674 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1677 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1680 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1683 vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] = in kvmppc_set_one_reg_pr()
1693 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_pr()
1696 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_pr()
1702 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1705 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1708 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1711 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1714 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1717 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1720 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1723 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1727 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1732 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1735 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_pr()
1746 static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_create_pr() argument
1757 vcpu->arch.book3s = vcpu_book3s; in kvmppc_core_vcpu_create_pr()
1760 vcpu->arch.shadow_vcpu = in kvmppc_core_vcpu_create_pr()
1761 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); in kvmppc_core_vcpu_create_pr()
1762 if (!vcpu->arch.shadow_vcpu) in kvmppc_core_vcpu_create_pr()
1769 vcpu->arch.shared = (void *)p; in kvmppc_core_vcpu_create_pr()
1773 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_pr()
1775 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_pr()
1783 vcpu->arch.pvr = 0x3C0301; in kvmppc_core_vcpu_create_pr()
1785 vcpu->arch.pvr = mfspr(SPRN_PVR); in kvmppc_core_vcpu_create_pr()
1786 vcpu->arch.intr_msr = MSR_SF; in kvmppc_core_vcpu_create_pr()
1789 vcpu->arch.pvr = 0x84202; in kvmppc_core_vcpu_create_pr()
1790 vcpu->arch.intr_msr = 0; in kvmppc_core_vcpu_create_pr()
1792 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); in kvmppc_core_vcpu_create_pr()
1793 vcpu->arch.slb_nr = 64; in kvmppc_core_vcpu_create_pr()
1795 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; in kvmppc_core_vcpu_create_pr()
1797 err = kvmppc_mmu_init_pr(vcpu); in kvmppc_core_vcpu_create_pr()
1804 free_page((unsigned long)vcpu->arch.shared); in kvmppc_core_vcpu_create_pr()
1807 kfree(vcpu->arch.shadow_vcpu); in kvmppc_core_vcpu_create_pr()
1815 static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) in kvmppc_core_vcpu_free_pr() argument
1817 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in kvmppc_core_vcpu_free_pr()
1819 kvmppc_mmu_destroy_pr(vcpu); in kvmppc_core_vcpu_free_pr()
1820 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); in kvmppc_core_vcpu_free_pr()
1822 kfree(vcpu->arch.shadow_vcpu); in kvmppc_core_vcpu_free_pr()
1827 static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu) in kvmppc_vcpu_run_pr() argument
1832 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_pr()
1833 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_vcpu_run_pr()
1838 kvmppc_setup_debug(vcpu); in kvmppc_vcpu_run_pr()
1846 ret = kvmppc_prepare_to_enter(vcpu); in kvmppc_vcpu_run_pr()
1855 if (kvmppc_get_msr(vcpu) & MSR_FP) in kvmppc_vcpu_run_pr()
1856 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); in kvmppc_vcpu_run_pr()
1860 ret = __kvmppc_vcpu_run(vcpu); in kvmppc_vcpu_run_pr()
1862 kvmppc_clear_debug(vcpu); in kvmppc_vcpu_run_pr()
1868 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); in kvmppc_vcpu_run_pr()
1871 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); in kvmppc_vcpu_run_pr()
1874 vcpu->mode = OUTSIDE_GUEST_MODE; in kvmppc_vcpu_run_pr()
1885 struct kvm_vcpu *vcpu; in kvm_vm_ioctl_get_dirty_log_pr() local
1902 kvm_for_each_vcpu(n, vcpu, kvm) in kvm_vm_ioctl_get_dirty_log_pr()
1903 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); in kvm_vm_ioctl_get_dirty_log_pr()
1948 struct kvm_vcpu *vcpu; in kvm_vm_ioctl_get_smmu_info_pr() local
1971 vcpu = kvm_get_vcpu(kvm, 0); in kvm_vm_ioctl_get_smmu_info_pr()
1972 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { in kvm_vm_ioctl_get_smmu_info_pr()