• Home
  • Raw
  • Download

Lines Matching +full:cpu +full:- +full:crit

6  *    Kevin Wolf <mail@kevin-wolf.de>
79 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) { in kvmppc_unfixup_split_real()
86 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_unfixup_split_real()
93 if (!is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_interrupt_offset()
94 return to_book3s(vcpu)->hior; in kvmppc_interrupt_offset()
101 if (is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_update_int_pending()
113 bool crit; in kvmppc_critical_section() local
115 if (is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_critical_section()
121 /* Truncate crit indicators in 32 bit mode */ in kvmppc_critical_section()
127 /* Critical section when crit == r1 */ in kvmppc_critical_section()
128 crit = (crit_raw == crit_r1); in kvmppc_critical_section()
130 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); in kvmppc_critical_section()
132 return crit; in kvmppc_critical_section()
141 vcpu->arch.mmu.reset_msr(vcpu); in kvmppc_inject_interrupt()
175 unsigned long old_pending = vcpu->arch.pending_exceptions; in kvmppc_book3s_dequeue_irqprio()
178 &vcpu->arch.pending_exceptions); in kvmppc_book3s_dequeue_irqprio()
180 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions, in kvmppc_book3s_dequeue_irqprio()
186 vcpu->stat.queue_intr++; in kvmppc_book3s_queue_irqprio()
189 &vcpu->arch.pending_exceptions); in kvmppc_book3s_queue_irqprio()
229 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); in kvmppc_core_pending_dec()
244 if (irq->irq == KVM_INTERRUPT_SET_LEVEL) in kvmppc_core_queue_external()
276 bool crit = kvmppc_critical_section(vcpu); in kvmppc_book3s_irqprio_deliver() local
280 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; in kvmppc_book3s_irqprio_deliver()
285 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit; in kvmppc_book3s_irqprio_deliver()
368 unsigned long *pending = &vcpu->arch.pending_exceptions; in kvmppc_core_prepare_to_enter()
369 unsigned long old_pending = vcpu->arch.pending_exceptions; in kvmppc_core_prepare_to_enter()
373 if (vcpu->arch.pending_exceptions) in kvmppc_core_prepare_to_enter()
374 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); in kvmppc_core_prepare_to_enter()
380 clear_bit(priority, &vcpu->arch.pending_exceptions); in kvmppc_core_prepare_to_enter()
399 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM; in kvmppc_gpa_to_pfn()
408 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; in kvmppc_gpa_to_pfn()
418 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable); in kvmppc_gpa_to_pfn()
431 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); in kvmppc_xlate()
433 pte->eaddr = eaddr; in kvmppc_xlate()
434 pte->raddr = eaddr & KVM_PAM; in kvmppc_xlate()
435 pte->vpage = VSID_REAL | eaddr >> 12; in kvmppc_xlate()
436 pte->may_read = true; in kvmppc_xlate()
437 pte->may_write = true; in kvmppc_xlate()
438 pte->may_execute = true; in kvmppc_xlate()
443 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && in kvmppc_xlate()
445 pte->raddr &= ~SPLIT_HACK_MASK; in kvmppc_xlate()
459 pc -= 4; in kvmppc_load_last_inst()
489 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_get_sregs()
501 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_set_sregs()
511 regs->pc = kvmppc_get_pc(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
512 regs->cr = kvmppc_get_cr(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
513 regs->ctr = kvmppc_get_ctr(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
514 regs->lr = kvmppc_get_lr(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
515 regs->xer = kvmppc_get_xer(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
516 regs->msr = kvmppc_get_msr(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
517 regs->srr0 = kvmppc_get_srr0(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
518 regs->srr1 = kvmppc_get_srr1(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
519 regs->pid = vcpu->arch.pid; in kvm_arch_vcpu_ioctl_get_regs()
520 regs->sprg0 = kvmppc_get_sprg0(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
521 regs->sprg1 = kvmppc_get_sprg1(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
522 regs->sprg2 = kvmppc_get_sprg2(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
523 regs->sprg3 = kvmppc_get_sprg3(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
524 regs->sprg4 = kvmppc_get_sprg4(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
525 regs->sprg5 = kvmppc_get_sprg5(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
526 regs->sprg6 = kvmppc_get_sprg6(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
527 regs->sprg7 = kvmppc_get_sprg7(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
529 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) in kvm_arch_vcpu_ioctl_get_regs()
530 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); in kvm_arch_vcpu_ioctl_get_regs()
539 kvmppc_set_pc(vcpu, regs->pc); in kvm_arch_vcpu_ioctl_set_regs()
540 kvmppc_set_cr(vcpu, regs->cr); in kvm_arch_vcpu_ioctl_set_regs()
541 kvmppc_set_ctr(vcpu, regs->ctr); in kvm_arch_vcpu_ioctl_set_regs()
542 kvmppc_set_lr(vcpu, regs->lr); in kvm_arch_vcpu_ioctl_set_regs()
543 kvmppc_set_xer(vcpu, regs->xer); in kvm_arch_vcpu_ioctl_set_regs()
544 kvmppc_set_msr(vcpu, regs->msr); in kvm_arch_vcpu_ioctl_set_regs()
545 kvmppc_set_srr0(vcpu, regs->srr0); in kvm_arch_vcpu_ioctl_set_regs()
546 kvmppc_set_srr1(vcpu, regs->srr1); in kvm_arch_vcpu_ioctl_set_regs()
547 kvmppc_set_sprg0(vcpu, regs->sprg0); in kvm_arch_vcpu_ioctl_set_regs()
548 kvmppc_set_sprg1(vcpu, regs->sprg1); in kvm_arch_vcpu_ioctl_set_regs()
549 kvmppc_set_sprg2(vcpu, regs->sprg2); in kvm_arch_vcpu_ioctl_set_regs()
550 kvmppc_set_sprg3(vcpu, regs->sprg3); in kvm_arch_vcpu_ioctl_set_regs()
551 kvmppc_set_sprg4(vcpu, regs->sprg4); in kvm_arch_vcpu_ioctl_set_regs()
552 kvmppc_set_sprg5(vcpu, regs->sprg5); in kvm_arch_vcpu_ioctl_set_regs()
553 kvmppc_set_sprg6(vcpu, regs->sprg6); in kvm_arch_vcpu_ioctl_set_regs()
554 kvmppc_set_sprg7(vcpu, regs->sprg7); in kvm_arch_vcpu_ioctl_set_regs()
556 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) in kvm_arch_vcpu_ioctl_set_regs()
557 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); in kvm_arch_vcpu_ioctl_set_regs()
564 return -ENOTSUPP; in kvm_arch_vcpu_ioctl_get_fpu()
569 return -ENOTSUPP; in kvm_arch_vcpu_ioctl_set_fpu()
578 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); in kvmppc_get_one_reg()
579 if (r == -EINVAL) { in kvmppc_get_one_reg()
589 i = id - KVM_REG_PPC_FPR0; in kvmppc_get_one_reg()
593 *val = get_reg_val(id, vcpu->arch.fp.fpscr); in kvmppc_get_one_reg()
598 i = id - KVM_REG_PPC_VSR0; in kvmppc_get_one_reg()
599 val->vsxval[0] = vcpu->arch.fp.fpr[i][0]; in kvmppc_get_one_reg()
600 val->vsxval[1] = vcpu->arch.fp.fpr[i][1]; in kvmppc_get_one_reg()
602 r = -ENXIO; in kvmppc_get_one_reg()
611 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { in kvmppc_get_one_reg()
612 r = -ENXIO; in kvmppc_get_one_reg()
622 *val = get_reg_val(id, vcpu->arch.fscr); in kvmppc_get_one_reg()
625 *val = get_reg_val(id, vcpu->arch.tar); in kvmppc_get_one_reg()
628 *val = get_reg_val(id, vcpu->arch.ebbhr); in kvmppc_get_one_reg()
631 *val = get_reg_val(id, vcpu->arch.ebbrr); in kvmppc_get_one_reg()
634 *val = get_reg_val(id, vcpu->arch.bescr); in kvmppc_get_one_reg()
637 *val = get_reg_val(id, vcpu->arch.ic); in kvmppc_get_one_reg()
640 r = -EINVAL; in kvmppc_get_one_reg()
654 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); in kvmppc_set_one_reg()
655 if (r == -EINVAL) { in kvmppc_set_one_reg()
665 i = id - KVM_REG_PPC_FPR0; in kvmppc_set_one_reg()
669 vcpu->arch.fp.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg()
674 i = id - KVM_REG_PPC_VSR0; in kvmppc_set_one_reg()
675 vcpu->arch.fp.fpr[i][0] = val->vsxval[0]; in kvmppc_set_one_reg()
676 vcpu->arch.fp.fpr[i][1] = val->vsxval[1]; in kvmppc_set_one_reg()
678 r = -ENXIO; in kvmppc_set_one_reg()
684 if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) { in kvmppc_set_one_reg()
685 r = -ENXIO; in kvmppc_set_one_reg()
695 vcpu->arch.fscr = set_reg_val(id, *val); in kvmppc_set_one_reg()
698 vcpu->arch.tar = set_reg_val(id, *val); in kvmppc_set_one_reg()
701 vcpu->arch.ebbhr = set_reg_val(id, *val); in kvmppc_set_one_reg()
704 vcpu->arch.ebbrr = set_reg_val(id, *val); in kvmppc_set_one_reg()
707 vcpu->arch.bescr = set_reg_val(id, *val); in kvmppc_set_one_reg()
710 vcpu->arch.ic = set_reg_val(id, *val); in kvmppc_set_one_reg()
713 r = -EINVAL; in kvmppc_set_one_reg()
721 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvmppc_core_vcpu_load() argument
723 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); in kvmppc_core_vcpu_load()
728 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); in kvmppc_core_vcpu_put()
733 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr); in kvmppc_set_msr()
739 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu); in kvmppc_vcpu_run()
752 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
765 return kvm->arch.kvm_ops->vcpu_create(kvm, id); in kvmppc_core_vcpu_create()
770 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); in kvmppc_core_vcpu_free()
775 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu); in kvmppc_core_check_requests()
780 return kvm->arch.kvm_ops->get_dirty_log(kvm, log); in kvm_vm_ioctl_get_dirty_log()
786 kvm->arch.kvm_ops->free_memslot(free, dont); in kvmppc_core_free_memslot()
792 return kvm->arch.kvm_ops->create_memslot(slot, npages); in kvmppc_core_create_memslot()
797 kvm->arch.kvm_ops->flush_memslot(kvm, memslot); in kvmppc_core_flush_memslot()
804 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem); in kvmppc_core_prepare_memory_region()
812 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new); in kvmppc_core_commit_memory_region()
818 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end); in kvm_unmap_hva_range()
823 return kvm->arch.kvm_ops->age_hva(kvm, start, end); in kvm_age_hva()
828 return kvm->arch.kvm_ops->test_age_hva(kvm, hva); in kvm_test_age_hva()
833 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte); in kvm_set_spte_hva()
838 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); in kvmppc_mmu_destroy()
845 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables); in kvmppc_core_init_vm()
846 INIT_LIST_HEAD(&kvm->arch.rtas_tokens); in kvmppc_core_init_vm()
847 mutex_init(&kvm->arch.rtas_token_lock); in kvmppc_core_init_vm()
850 return kvm->arch.kvm_ops->init_vm(kvm); in kvmppc_core_init_vm()
855 kvm->arch.kvm_ops->destroy_vm(kvm); in kvmppc_core_destroy_vm()
859 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); in kvmppc_core_destroy_vm()
874 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_h_logical_ci_load()
876 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_h_logical_ci_load()
935 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_h_logical_ci_store()
937 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_h_logical_ci_store()
957 return kvm->arch.kvm_ops->hcall_implemented(hcall); in kvmppc_book3s_hcall_implemented()
976 return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi, in kvm_arch_set_irq_inatomic()
983 return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status); in kvmppc_book3s_set_irq()
989 entries->gsi = gsi; in kvm_irq_map_gsi()
990 entries->type = KVM_IRQ_ROUTING_IRQCHIP; in kvm_irq_map_gsi()
991 entries->set = kvmppc_book3s_set_irq; in kvm_irq_map_gsi()
992 entries->irqchip.irqchip = 0; in kvm_irq_map_gsi()
993 entries->irqchip.pin = gsi; in kvm_irq_map_gsi()