Lines Matching +full:non +full:- +full:masked
1 // SPDX-License-Identifier: GPL-2.0-only
6 #define pr_fmt(fmt) "xive-kvm: " fmt
22 #include <asm/xive-regs.h>
46 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
47 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
67 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; in kvmppc_xive_push_vcpu()
73 * (e.g. because it's not using an in-kernel interrupt controller). in kvmppc_xive_push_vcpu()
75 if (!tima || !vcpu->arch.xive_cam_word) in kvmppc_xive_push_vcpu()
79 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS); in kvmppc_xive_push_vcpu()
80 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2); in kvmppc_xive_push_vcpu()
81 vcpu->arch.xive_pushed = 1; in kvmppc_xive_push_vcpu()
91 vcpu->arch.irq_pending = 0; in kvmppc_xive_push_vcpu()
97 if (vcpu->arch.xive_esc_on) { in kvmppc_xive_push_vcpu()
98 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + in kvmppc_xive_push_vcpu()
106 * early enough (re-cede right away), there is a in kvmppc_xive_push_vcpu()
109 * a big no-no. in kvmppc_xive_push_vcpu()
120 * before re-enabling the escalation interrupt, and if in kvmppc_xive_push_vcpu()
125 vcpu->arch.xive_esc_on = 0; in kvmppc_xive_push_vcpu()
137 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_irq_trigger()
141 if (WARN_ON(!xd->trig_mmio)) in xive_irq_trigger()
144 out_be64(xd->trig_mmio, 0); in xive_irq_trigger()
153 vcpu->arch.irq_pending = 1; in xive_esc_irq()
155 if (vcpu->arch.ceded) in xive_esc_irq()
158 /* Since we have the no-EOI flag, the interrupt is effectively in xive_esc_irq()
167 vcpu->arch.xive_esc_on = false; in xive_esc_irq()
178 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_attach_escalation()
179 struct xive_q *q = &xc->queues[prio]; in kvmppc_xive_attach_escalation()
184 if (xc->esc_virq[prio]) in kvmppc_xive_attach_escalation()
188 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); in kvmppc_xive_attach_escalation()
189 if (!xc->esc_virq[prio]) { in kvmppc_xive_attach_escalation()
191 prio, xc->server_num); in kvmppc_xive_attach_escalation()
192 return -EIO; in kvmppc_xive_attach_escalation()
196 name = kasprintf(GFP_KERNEL, "kvm-%d-%d", in kvmppc_xive_attach_escalation()
197 vcpu->kvm->arch.lpid, xc->server_num); in kvmppc_xive_attach_escalation()
199 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d", in kvmppc_xive_attach_escalation()
200 vcpu->kvm->arch.lpid, xc->server_num, prio); in kvmppc_xive_attach_escalation()
203 prio, xc->server_num); in kvmppc_xive_attach_escalation()
204 rc = -ENOMEM; in kvmppc_xive_attach_escalation()
208 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); in kvmppc_xive_attach_escalation()
210 rc = request_irq(xc->esc_virq[prio], xive_esc_irq, in kvmppc_xive_attach_escalation()
214 prio, xc->server_num); in kvmppc_xive_attach_escalation()
217 xc->esc_virq_names[prio] = name; in kvmppc_xive_attach_escalation()
224 * interrupt, thus leaving it effectively masked after in kvmppc_xive_attach_escalation()
228 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
232 vcpu->arch.xive_esc_raddr = xd->eoi_page; in kvmppc_xive_attach_escalation()
233 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; in kvmppc_xive_attach_escalation()
234 xd->flags |= XIVE_IRQ_NO_EOI; in kvmppc_xive_attach_escalation()
239 irq_dispose_mapping(xc->esc_virq[prio]); in kvmppc_xive_attach_escalation()
240 xc->esc_virq[prio] = 0; in kvmppc_xive_attach_escalation()
247 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_provision_queue()
248 struct kvmppc_xive *xive = xc->xive; in xive_provision_queue()
249 struct xive_q *q = &xc->queues[prio]; in xive_provision_queue()
253 if (WARN_ON(q->qpage)) in xive_provision_queue()
257 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order); in xive_provision_queue()
260 prio, xc->server_num); in xive_provision_queue()
261 return -ENOMEM; in xive_provision_queue()
263 memset(qpage, 0, 1 << xive->q_order); in xive_provision_queue()
266 * Reconfigure the queue. This will set q->qpage only once the in xive_provision_queue()
269 * qpage being non-NULL, and instead will only EOI when we receive in xive_provision_queue()
272 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, in xive_provision_queue()
273 xive->q_order, true); in xive_provision_queue()
276 prio, xc->server_num); in xive_provision_queue()
280 /* Called with xive->lock held */
283 struct kvmppc_xive *xive = kvm->arch.xive; in xive_check_provisioning()
287 lockdep_assert_held(&xive->lock); in xive_check_provisioning()
290 if (xive->qmap & (1 << prio)) in xive_check_provisioning()
297 if (!vcpu->arch.xive_vcpu) in xive_check_provisioning()
300 if (rc == 0 && !xive->single_escalation) in xive_check_provisioning()
302 xive->single_escalation); in xive_check_provisioning()
309 xive->qmap |= (1 << prio); in xive_check_provisioning()
325 xc = vcpu->arch.xive_vcpu; in xive_inc_q_pending()
329 q = &xc->queues[prio]; in xive_inc_q_pending()
330 atomic_inc(&q->pending_count); in xive_inc_q_pending()
335 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_try_pick_queue()
340 return -ENXIO; in xive_try_pick_queue()
341 if (!xc->valid) in xive_try_pick_queue()
342 return -ENXIO; in xive_try_pick_queue()
344 q = &xc->queues[prio]; in xive_try_pick_queue()
345 if (WARN_ON(!q->qpage)) in xive_try_pick_queue()
346 return -ENXIO; in xive_try_pick_queue()
349 max = (q->msk + 1) - XIVE_Q_GAP; in xive_try_pick_queue()
350 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; in xive_try_pick_queue()
362 return -EINVAL; in kvmppc_xive_select_target()
376 if (!vcpu->arch.xive_vcpu) in kvmppc_xive_select_target()
380 *server = vcpu->arch.xive_vcpu->server_num; in kvmppc_xive_select_target()
388 return -EBUSY; in kvmppc_xive_select_target()
401 * Take the lock, set masked, try again if racing in xive_lock_and_mask()
405 arch_spin_lock(&sb->lock); in xive_lock_and_mask()
406 old_prio = state->guest_priority; in xive_lock_and_mask()
407 state->guest_priority = MASKED; in xive_lock_and_mask()
409 if (!state->in_eoi) in xive_lock_and_mask()
411 state->guest_priority = old_prio; in xive_lock_and_mask()
412 arch_spin_unlock(&sb->lock); in xive_lock_and_mask()
416 if (old_prio == MASKED) in xive_lock_and_mask()
431 * an interrupt whenever we unmask a non-LSI via FW in xive_lock_and_mask()
434 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { in xive_lock_and_mask()
436 kvmppc_xive_vp(xive, state->act_server), in xive_lock_and_mask()
437 MASKED, state->number); in xive_lock_and_mask()
439 state->old_p = true; in xive_lock_and_mask()
440 state->old_q = false; in xive_lock_and_mask()
444 state->old_p = !!(val & 2); in xive_lock_and_mask()
445 state->old_q = !!(val & 1); in xive_lock_and_mask()
464 arch_spin_lock(&sb->lock); in xive_lock_for_unmask()
465 if (!state->in_eoi) in xive_lock_for_unmask()
467 arch_spin_unlock(&sb->lock); in xive_lock_for_unmask()
480 if (state->guest_priority != MASKED) in xive_finish_unmask()
490 if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { in xive_finish_unmask()
492 kvmppc_xive_vp(xive, state->act_server), in xive_finish_unmask()
493 state->act_priority, state->number); in xive_finish_unmask()
495 if (!state->old_p) in xive_finish_unmask()
498 if (!(xd->flags & OPAL_XIVE_IRQ_LSI)) in xive_finish_unmask()
504 if (state->old_q) in xive_finish_unmask()
512 if (!state->old_p) in xive_finish_unmask()
518 state->guest_priority = prio; in xive_finish_unmask()
532 struct kvmppc_xive *xive = kvm->arch.xive; in xive_target_interrupt()
555 if (state->act_priority != MASKED) in xive_target_interrupt()
557 state->act_server, in xive_target_interrupt()
558 state->act_priority); in xive_target_interrupt()
562 state->act_priority = prio; in xive_target_interrupt()
563 state->act_server = server; in xive_target_interrupt()
570 prio, state->number); in xive_target_interrupt()
578 * - Unless it was never enabled (or we run out of capacity)
580 * pair even when "masked" by the guest. This pair tends to
588 * - When masking, we set PQ to 10 and save the previous value
591 * - When unmasking, if saved Q was set, we set PQ to 11
594 * masked. Effectively we are OR'ing the previous Q into the
597 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
605 * - If H_EOI occurs while masked, we clear the saved P.
607 * - When changing target, we account on the new target and
616 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_xive()
624 return -ENODEV; in kvmppc_xive_set_xive()
630 if (priority != MASKED) { in kvmppc_xive_set_xive()
631 mutex_lock(&xive->lock); in kvmppc_xive_set_xive()
632 rc = xive_check_provisioning(xive->kvm, in kvmppc_xive_set_xive()
634 mutex_unlock(&xive->lock); in kvmppc_xive_set_xive()
643 return -EINVAL; in kvmppc_xive_set_xive()
644 state = &sb->irq_state[idx]; in kvmppc_xive_set_xive()
652 * xive_lock_and_mask() will also set state->guest_priority in kvmppc_xive_set_xive()
660 if (priority == MASKED) in kvmppc_xive_set_xive()
671 new_act_prio = state->act_priority; in kvmppc_xive_set_xive()
672 if (priority != MASKED) in kvmppc_xive_set_xive()
676 new_act_prio, state->act_server, state->act_priority); in kvmppc_xive_set_xive()
681 * The condition for re-targetting the interrupt is that in kvmppc_xive_set_xive()
692 if (new_act_prio != MASKED && in kvmppc_xive_set_xive()
693 (state->act_server != server || in kvmppc_xive_set_xive()
694 state->act_priority != new_act_prio)) in kvmppc_xive_set_xive()
701 if (priority != MASKED) in kvmppc_xive_set_xive()
708 state->saved_priority = priority; in kvmppc_xive_set_xive()
710 arch_spin_unlock(&sb->lock); in kvmppc_xive_set_xive()
717 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_get_xive()
723 return -ENODEV; in kvmppc_xive_get_xive()
727 return -EINVAL; in kvmppc_xive_get_xive()
728 state = &sb->irq_state[idx]; in kvmppc_xive_get_xive()
729 arch_spin_lock(&sb->lock); in kvmppc_xive_get_xive()
730 *server = state->act_server; in kvmppc_xive_get_xive()
731 *priority = state->guest_priority; in kvmppc_xive_get_xive()
732 arch_spin_unlock(&sb->lock); in kvmppc_xive_get_xive()
739 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_int_on()
745 return -ENODEV; in kvmppc_xive_int_on()
749 return -EINVAL; in kvmppc_xive_int_on()
750 state = &sb->irq_state[idx]; in kvmppc_xive_int_on()
757 if (state->act_priority == MASKED) { in kvmppc_xive_int_on()
759 return -EINVAL; in kvmppc_xive_int_on()
763 if (state->saved_priority == MASKED) in kvmppc_xive_int_on()
770 xive_finish_unmask(xive, sb, state, state->saved_priority); in kvmppc_xive_int_on()
771 arch_spin_unlock(&sb->lock); in kvmppc_xive_int_on()
778 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_int_off()
784 return -ENODEV; in kvmppc_xive_int_off()
788 return -EINVAL; in kvmppc_xive_int_off()
789 state = &sb->irq_state[idx]; in kvmppc_xive_int_off()
796 state->saved_priority = xive_lock_and_mask(xive, sb, state); in kvmppc_xive_int_off()
797 arch_spin_unlock(&sb->lock); in kvmppc_xive_int_off()
811 state = &sb->irq_state[idx]; in xive_restore_pending_irq()
812 if (!state->valid) in xive_restore_pending_irq()
816 * Trigger the IPI. This assumes we never restore a pass-through in xive_restore_pending_irq()
819 xive_irq_trigger(&state->ipi_data); in xive_restore_pending_irq()
826 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_get_icp()
831 /* Return the per-cpu state for state saving/migration */ in kvmppc_xive_get_icp()
832 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | in kvmppc_xive_get_icp()
833 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | in kvmppc_xive_get_icp()
839 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_set_icp()
840 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in kvmppc_xive_set_icp()
845 return -ENOENT; in kvmppc_xive_set_icp()
854 xc->server_num, cppr, mfrr, xisr); in kvmppc_xive_set_icp()
858 * shouldn't happen because the vcpu->mutex makes running a in kvmppc_xive_set_icp()
861 if (WARN_ON(vcpu->arch.xive_pushed)) in kvmppc_xive_set_icp()
862 return -EIO; in kvmppc_xive_set_icp()
865 vcpu->arch.xive_saved_state.cppr = cppr; in kvmppc_xive_set_icp()
866 xc->hw_cppr = xc->cppr = cppr; in kvmppc_xive_set_icp()
870 * having a pending MFRR change, which will re-evaluate the in kvmppc_xive_set_icp()
874 xc->mfrr = mfrr; in kvmppc_xive_set_icp()
876 xive_irq_trigger(&xc->vp_ipi_data); in kvmppc_xive_set_icp()
888 xc->delayed_irq = xisr; in kvmppc_xive_set_icp()
889 xive->delayed_irqs++; in kvmppc_xive_set_icp()
899 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_mapped()
910 return -ENODEV; in kvmppc_xive_set_mapped()
916 return -EINVAL; in kvmppc_xive_set_mapped()
917 state = &sb->irq_state[idx]; in kvmppc_xive_set_mapped()
920 * Mark the passed-through interrupt as going to a VCPU, in kvmppc_xive_set_mapped()
928 * non-NULL to switch to passed-through or NULL for the in kvmppc_xive_set_mapped()
945 state->old_p, state->old_q); in kvmppc_xive_set_mapped()
948 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_set_mapped()
954 if (xive->ops && xive->ops->reset_mapped) in kvmppc_xive_set_mapped()
955 xive->ops->reset_mapped(kvm, guest_irq); in kvmppc_xive_set_mapped()
958 state->pt_number = hw_irq; in kvmppc_xive_set_mapped()
959 state->pt_data = irq_data_get_irq_handler_data(host_data); in kvmppc_xive_set_mapped()
968 kvmppc_xive_vp(xive, state->act_server), in kvmppc_xive_set_mapped()
969 state->act_priority, state->number); in kvmppc_xive_set_mapped()
978 if (prio != MASKED && !state->old_p) in kvmppc_xive_set_mapped()
979 xive_vm_source_eoi(hw_irq, state->pt_data); in kvmppc_xive_set_mapped()
982 state->old_p = state->old_q = false; in kvmppc_xive_set_mapped()
986 state->guest_priority = prio; in kvmppc_xive_set_mapped()
987 arch_spin_unlock(&sb->lock); in kvmppc_xive_set_mapped()
996 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_clr_mapped()
1005 return -ENODEV; in kvmppc_xive_clr_mapped()
1011 return -EINVAL; in kvmppc_xive_clr_mapped()
1012 state = &sb->irq_state[idx]; in kvmppc_xive_clr_mapped()
1021 state->old_p, state->old_q); in kvmppc_xive_clr_mapped()
1028 if (state->old_p) in kvmppc_xive_clr_mapped()
1029 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11); in kvmppc_xive_clr_mapped()
1031 /* Release the passed-through interrupt to the host */ in kvmppc_xive_clr_mapped()
1039 state->pt_number = 0; in kvmppc_xive_clr_mapped()
1040 state->pt_data = NULL; in kvmppc_xive_clr_mapped()
1046 if (xive->ops && xive->ops->reset_mapped) { in kvmppc_xive_clr_mapped()
1047 xive->ops->reset_mapped(kvm, guest_irq); in kvmppc_xive_clr_mapped()
1051 xive_native_configure_irq(state->ipi_number, in kvmppc_xive_clr_mapped()
1052 kvmppc_xive_vp(xive, state->act_server), in kvmppc_xive_clr_mapped()
1053 state->act_priority, state->number); in kvmppc_xive_clr_mapped()
1057 * occupied) or the interrupt is masked, we set the IPI in kvmppc_xive_clr_mapped()
1058 * to PQ=10 state. Otherwise we just re-enable it (PQ=00). in kvmppc_xive_clr_mapped()
1060 if (prio == MASKED || state->old_p) in kvmppc_xive_clr_mapped()
1061 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10); in kvmppc_xive_clr_mapped()
1063 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00); in kvmppc_xive_clr_mapped()
1067 state->guest_priority = prio; in kvmppc_xive_clr_mapped()
1068 arch_spin_unlock(&sb->lock); in kvmppc_xive_clr_mapped()
1076 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_disable_vcpu_interrupts()
1077 struct kvm *kvm = vcpu->kvm; in kvmppc_xive_disable_vcpu_interrupts()
1078 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_disable_vcpu_interrupts()
1081 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_disable_vcpu_interrupts()
1082 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in kvmppc_xive_disable_vcpu_interrupts()
1087 struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; in kvmppc_xive_disable_vcpu_interrupts()
1089 if (!state->valid) in kvmppc_xive_disable_vcpu_interrupts()
1091 if (state->act_priority == MASKED) in kvmppc_xive_disable_vcpu_interrupts()
1093 if (state->act_server != xc->server_num) in kvmppc_xive_disable_vcpu_interrupts()
1097 arch_spin_lock(&sb->lock); in kvmppc_xive_disable_vcpu_interrupts()
1098 state->act_priority = MASKED; in kvmppc_xive_disable_vcpu_interrupts()
1099 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_disable_vcpu_interrupts()
1100 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1101 if (state->pt_number) { in kvmppc_xive_disable_vcpu_interrupts()
1102 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_disable_vcpu_interrupts()
1103 xive_native_configure_irq(state->pt_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1105 arch_spin_unlock(&sb->lock); in kvmppc_xive_disable_vcpu_interrupts()
1110 if (vcpu->arch.xive_esc_on) { in kvmppc_xive_disable_vcpu_interrupts()
1111 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + in kvmppc_xive_disable_vcpu_interrupts()
1113 vcpu->arch.xive_esc_on = false; in kvmppc_xive_disable_vcpu_interrupts()
1118 * This is safe because the vcpu->mutex is held, preventing in kvmppc_xive_disable_vcpu_interrupts()
1121 vcpu->arch.xive_esc_vaddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1122 vcpu->arch.xive_esc_raddr = 0; in kvmppc_xive_disable_vcpu_interrupts()
1127 * that EOI doesn't re-enable it, but just sets the stale_p flag to
1144 xd->stale_p = false; in xive_cleanup_single_escalation()
1146 if (!vcpu->arch.xive_esc_on) in xive_cleanup_single_escalation()
1147 xd->stale_p = true; in xive_cleanup_single_escalation()
1152 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_cleanup_vcpu()
1153 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; in kvmppc_xive_cleanup_vcpu()
1162 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); in kvmppc_xive_cleanup_vcpu()
1165 xc->valid = false; in kvmppc_xive_cleanup_vcpu()
1169 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_cleanup_vcpu()
1173 if (xc->esc_virq[i]) { in kvmppc_xive_cleanup_vcpu()
1174 if (xc->xive->single_escalation) in kvmppc_xive_cleanup_vcpu()
1176 xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1177 free_irq(xc->esc_virq[i], vcpu); in kvmppc_xive_cleanup_vcpu()
1178 irq_dispose_mapping(xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1179 kfree(xc->esc_virq_names[i]); in kvmppc_xive_cleanup_vcpu()
1184 xive_native_disable_vp(xc->vp_id); in kvmppc_xive_cleanup_vcpu()
1187 vcpu->arch.xive_cam_word = 0; in kvmppc_xive_cleanup_vcpu()
1191 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_cleanup_vcpu()
1193 xive_native_disable_queue(xc->vp_id, q, i); in kvmppc_xive_cleanup_vcpu()
1194 if (q->qpage) { in kvmppc_xive_cleanup_vcpu()
1195 free_pages((unsigned long)q->qpage, in kvmppc_xive_cleanup_vcpu()
1196 xive->q_page_order); in kvmppc_xive_cleanup_vcpu()
1197 q->qpage = NULL; in kvmppc_xive_cleanup_vcpu()
1202 if (xc->vp_ipi) { in kvmppc_xive_cleanup_vcpu()
1203 xive_cleanup_irq_data(&xc->vp_ipi_data); in kvmppc_xive_cleanup_vcpu()
1204 xive_native_free_irq(xc->vp_ipi); in kvmppc_xive_cleanup_vcpu()
1210 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; in kvmppc_xive_cleanup_vcpu()
1211 vcpu->arch.xive_vcpu = NULL; in kvmppc_xive_cleanup_vcpu()
1216 /* We have a block of xive->nr_servers VPs. We just need to check in kvmppc_xive_vcpu_id_valid()
1219 return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers; in kvmppc_xive_vcpu_id_valid()
1228 return -EINVAL; in kvmppc_xive_compute_vp_id()
1231 if (xive->vp_base == XIVE_INVALID_VP) { in kvmppc_xive_compute_vp_id()
1232 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers); in kvmppc_xive_compute_vp_id()
1233 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers); in kvmppc_xive_compute_vp_id()
1235 if (xive->vp_base == XIVE_INVALID_VP) in kvmppc_xive_compute_vp_id()
1236 return -ENOSPC; in kvmppc_xive_compute_vp_id()
1240 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { in kvmppc_xive_compute_vp_id()
1242 return -EEXIST; in kvmppc_xive_compute_vp_id()
1253 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_connect_vcpu()
1255 int i, r = -EBUSY; in kvmppc_xive_connect_vcpu()
1260 if (dev->ops != &kvm_xive_ops) { in kvmppc_xive_connect_vcpu()
1262 return -EPERM; in kvmppc_xive_connect_vcpu()
1264 if (xive->kvm != vcpu->kvm) in kvmppc_xive_connect_vcpu()
1265 return -EPERM; in kvmppc_xive_connect_vcpu()
1266 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) in kvmppc_xive_connect_vcpu()
1267 return -EBUSY; in kvmppc_xive_connect_vcpu()
1270 mutex_lock(&xive->lock); in kvmppc_xive_connect_vcpu()
1278 r = -ENOMEM; in kvmppc_xive_connect_vcpu()
1282 vcpu->arch.xive_vcpu = xc; in kvmppc_xive_connect_vcpu()
1283 xc->xive = xive; in kvmppc_xive_connect_vcpu()
1284 xc->vcpu = vcpu; in kvmppc_xive_connect_vcpu()
1285 xc->server_num = cpu; in kvmppc_xive_connect_vcpu()
1286 xc->vp_id = vp_id; in kvmppc_xive_connect_vcpu()
1287 xc->mfrr = 0xff; in kvmppc_xive_connect_vcpu()
1288 xc->valid = true; in kvmppc_xive_connect_vcpu()
1290 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); in kvmppc_xive_connect_vcpu()
1295 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); in kvmppc_xive_connect_vcpu()
1296 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); in kvmppc_xive_connect_vcpu()
1299 xc->vp_ipi = xive_native_alloc_irq(); in kvmppc_xive_connect_vcpu()
1300 if (!xc->vp_ipi) { in kvmppc_xive_connect_vcpu()
1302 r = -EIO; in kvmppc_xive_connect_vcpu()
1305 pr_devel(" IPI=0x%x\n", xc->vp_ipi); in kvmppc_xive_connect_vcpu()
1307 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); in kvmppc_xive_connect_vcpu()
1315 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation); in kvmppc_xive_connect_vcpu()
1324 * our mfrr change notifications. If the VCPU is hot-plugged, we in kvmppc_xive_connect_vcpu()
1329 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_connect_vcpu()
1332 if (i == 7 && xive->single_escalation) in kvmppc_xive_connect_vcpu()
1336 if (xive->qmap & (1 << i)) { in kvmppc_xive_connect_vcpu()
1338 if (r == 0 && !xive->single_escalation) in kvmppc_xive_connect_vcpu()
1340 vcpu, i, xive->single_escalation); in kvmppc_xive_connect_vcpu()
1344 r = xive_native_configure_queue(xc->vp_id, in kvmppc_xive_connect_vcpu()
1355 r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation); in kvmppc_xive_connect_vcpu()
1360 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); in kvmppc_xive_connect_vcpu()
1362 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); in kvmppc_xive_connect_vcpu()
1365 mutex_unlock(&xive->lock); in kvmppc_xive_connect_vcpu()
1371 vcpu->arch.irq_type = KVMPPC_IRQ_XICS; in kvmppc_xive_connect_vcpu()
1388 state = &sb->irq_state[idx]; in xive_pre_save_set_queued()
1391 if (!state->valid) { in xive_pre_save_set_queued()
1401 if (!state->saved_p) in xive_pre_save_set_queued()
1405 state->in_queue = true; in xive_pre_save_set_queued()
1412 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; in xive_pre_save_mask_irq()
1414 if (!state->valid) in xive_pre_save_mask_irq()
1418 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state); in xive_pre_save_mask_irq()
1421 state->saved_p = state->old_p; in xive_pre_save_mask_irq()
1422 state->saved_q = state->old_q; in xive_pre_save_mask_irq()
1425 arch_spin_unlock(&sb->lock); in xive_pre_save_mask_irq()
1432 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; in xive_pre_save_unmask_irq()
1434 if (!state->valid) in xive_pre_save_unmask_irq()
1444 /* Restore mask/prio if it wasn't masked */ in xive_pre_save_unmask_irq()
1445 if (state->saved_scan_prio != MASKED) in xive_pre_save_unmask_irq()
1446 xive_finish_unmask(xive, sb, state, state->saved_scan_prio); in xive_pre_save_unmask_irq()
1449 arch_spin_unlock(&sb->lock); in xive_pre_save_unmask_irq()
1454 u32 idx = q->idx; in xive_pre_save_queue()
1455 u32 toggle = q->toggle; in xive_pre_save_queue()
1459 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle); in xive_pre_save_queue()
1474 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
1475 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_pre_save_scan()
1483 kvm_for_each_vcpu(i, vcpu, xive->kvm) { in xive_pre_save_scan()
1484 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_pre_save_scan()
1488 if (xc->queues[j].qpage) in xive_pre_save_scan()
1489 xive_pre_save_queue(xive, &xc->queues[j]); in xive_pre_save_scan()
1494 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
1495 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_pre_save_scan()
1508 for (i = 0; i <= xive->max_sbid; i++) { in xive_post_save_scan()
1509 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; in xive_post_save_scan()
1513 sb->irq_state[j].in_queue = false; in xive_post_save_scan()
1517 xive->saved_src_count = 0; in xive_post_save_scan()
1533 return -ENOENT; in xive_get_source()
1535 state = &sb->irq_state[idx]; in xive_get_source()
1537 if (!state->valid) in xive_get_source()
1538 return -ENOENT; in xive_get_source()
1558 if (xive->saved_src_count == 0) in xive_get_source()
1560 xive->saved_src_count++; in xive_get_source()
1563 val = state->act_server; in xive_get_source()
1564 prio = state->saved_scan_prio; in xive_get_source()
1566 if (prio == MASKED) { in xive_get_source()
1568 prio = state->saved_priority; in xive_get_source()
1571 if (state->lsi) { in xive_get_source()
1573 if (state->saved_p) in xive_get_source()
1576 if (state->saved_p) in xive_get_source()
1579 if (state->saved_q) in xive_get_source()
1583 * We mark it pending (which will attempt a re-delivery) in xive_get_source()
1584 * if we are in a queue *or* we were masked and had in xive_get_source()
1585 * Q set which is equivalent to the XICS "masked pending" in xive_get_source()
1588 if (state->in_queue || (prio == MASKED && state->saved_q)) in xive_get_source()
1596 if (xive->saved_src_count == xive->src_count) in xive_get_source()
1601 return -EFAULT; in xive_get_source()
1614 mutex_lock(&xive->lock); in kvmppc_xive_create_src_block()
1616 /* block already exists - somebody else got here first */ in kvmppc_xive_create_src_block()
1617 if (xive->src_blocks[bid]) in kvmppc_xive_create_src_block()
1625 sb->id = bid; in kvmppc_xive_create_src_block()
1628 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i; in kvmppc_xive_create_src_block()
1629 sb->irq_state[i].eisn = 0; in kvmppc_xive_create_src_block()
1630 sb->irq_state[i].guest_priority = MASKED; in kvmppc_xive_create_src_block()
1631 sb->irq_state[i].saved_priority = MASKED; in kvmppc_xive_create_src_block()
1632 sb->irq_state[i].act_priority = MASKED; in kvmppc_xive_create_src_block()
1635 xive->src_blocks[bid] = sb; in kvmppc_xive_create_src_block()
1637 if (bid > xive->max_sbid) in kvmppc_xive_create_src_block()
1638 xive->max_sbid = bid; in kvmppc_xive_create_src_block()
1641 mutex_unlock(&xive->lock); in kvmppc_xive_create_src_block()
1642 return xive->src_blocks[bid]; in kvmppc_xive_create_src_block()
1647 struct kvm *kvm = xive->kvm; in xive_check_delayed_irq()
1652 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_check_delayed_irq()
1657 if (xc->delayed_irq == irq) { in xive_check_delayed_irq()
1658 xc->delayed_irq = 0; in xive_check_delayed_irq()
1659 xive->delayed_irqs--; in xive_check_delayed_irq()
1678 return -ENOENT; in xive_set_source()
1689 return -ENOMEM; in xive_set_source()
1692 state = &sb->irq_state[idx]; in xive_set_source()
1697 return -EFAULT; in xive_set_source()
1710 if (!state->ipi_number) { in xive_set_source()
1711 state->ipi_number = xive_native_alloc_irq(); in xive_set_source()
1712 if (state->ipi_number == 0) { in xive_set_source()
1714 return -ENOMEM; in xive_set_source()
1716 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data); in xive_set_source()
1717 pr_devel(" src_ipi=0x%x\n", state->ipi_number); in xive_set_source()
1721 * We use lock_and_mask() to set us in the right masked in xive_set_source()
1727 state->guest_priority = 0; in xive_set_source()
1733 * can become "untargetted" accross migration if it was masked in xive_set_source()
1739 state->act_priority = MASKED; in xive_set_source()
1746 arch_spin_unlock(&sb->lock); in xive_set_source()
1749 if (act_prio != MASKED) { in xive_set_source()
1751 mutex_lock(&xive->lock); in xive_set_source()
1752 rc = xive_check_provisioning(xive->kvm, act_prio); in xive_set_source()
1753 mutex_unlock(&xive->lock); in xive_set_source()
1757 rc = xive_target_interrupt(xive->kvm, state, in xive_set_source()
1761 * alone and masked. It will remain disabled until in xive_set_source()
1762 * the guest re-targets it. in xive_set_source()
1770 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) { in xive_set_source()
1776 state->old_p = false; in xive_set_source()
1777 state->old_q = false; in xive_set_source()
1778 state->lsi = false; in xive_set_source()
1779 state->asserted = false; in xive_set_source()
1783 state->lsi = true; in xive_set_source()
1785 state->asserted = true; in xive_set_source()
1786 pr_devel(" LSI ! Asserted=%d\n", state->asserted); in xive_set_source()
1800 state->old_p = true; in xive_set_source()
1802 state->old_q = true; in xive_set_source()
1804 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q); in xive_set_source()
1809 * re-trigger if necessary. in xive_set_source()
1812 pr_devel(" masked, saving prio\n"); in xive_set_source()
1813 state->guest_priority = MASKED; in xive_set_source()
1814 state->saved_priority = guest_prio; in xive_set_source()
1818 state->saved_priority = guest_prio; in xive_set_source()
1822 if (!state->valid) in xive_set_source()
1823 xive->src_count++; in xive_set_source()
1824 state->valid = true; in xive_set_source()
1832 struct kvmppc_xive *xive = kvm->arch.xive; in kvmppc_xive_set_irq()
1838 return -ENODEV; in kvmppc_xive_set_irq()
1842 return -EINVAL; in kvmppc_xive_set_irq()
1845 state = &sb->irq_state[idx]; in kvmppc_xive_set_irq()
1846 if (!state->valid) in kvmppc_xive_set_irq()
1847 return -EINVAL; in kvmppc_xive_set_irq()
1849 /* We don't allow a trigger on a passed-through interrupt */ in kvmppc_xive_set_irq()
1850 if (state->pt_number) in kvmppc_xive_set_irq()
1851 return -EINVAL; in kvmppc_xive_set_irq()
1853 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL) in kvmppc_xive_set_irq()
1854 state->asserted = 1; in kvmppc_xive_set_irq()
1856 state->asserted = 0; in kvmppc_xive_set_irq()
1861 xive_irq_trigger(&state->ipi_data); in kvmppc_xive_set_irq()
1873 return -EFAULT; in kvmppc_xive_set_nr_servers()
1878 return -EINVAL; in kvmppc_xive_set_nr_servers()
1880 mutex_lock(&xive->lock); in kvmppc_xive_set_nr_servers()
1881 if (xive->vp_base != XIVE_INVALID_VP) in kvmppc_xive_set_nr_servers()
1889 rc = -EBUSY; in kvmppc_xive_set_nr_servers()
1894 xive->nr_servers = KVM_MAX_VCPUS; in kvmppc_xive_set_nr_servers()
1896 xive->nr_servers = nr_servers; in kvmppc_xive_set_nr_servers()
1898 mutex_unlock(&xive->lock); in kvmppc_xive_set_nr_servers()
1905 struct kvmppc_xive *xive = dev->private; in xive_set_attr()
1908 switch (attr->group) { in xive_set_attr()
1910 return xive_set_source(xive, attr->attr, attr->addr); in xive_set_attr()
1912 switch (attr->attr) { in xive_set_attr()
1914 return kvmppc_xive_set_nr_servers(xive, attr->addr); in xive_set_attr()
1917 return -ENXIO; in xive_set_attr()
1922 struct kvmppc_xive *xive = dev->private; in xive_get_attr()
1925 switch (attr->group) { in xive_get_attr()
1927 return xive_get_source(xive, attr->attr, attr->addr); in xive_get_attr()
1929 return -ENXIO; in xive_get_attr()
1935 switch (attr->group) { in xive_has_attr()
1937 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && in xive_has_attr()
1938 attr->attr < KVMPPC_XICS_NR_IRQS) in xive_has_attr()
1942 switch (attr->attr) { in xive_has_attr()
1947 return -ENXIO; in xive_has_attr()
1953 xive_native_configure_irq(hw_num, 0, MASKED, 0); in kvmppc_xive_cleanup_irq()
1961 struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; in kvmppc_xive_free_sources()
1963 if (!state->valid) in kvmppc_xive_free_sources()
1966 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data); in kvmppc_xive_free_sources()
1967 xive_cleanup_irq_data(&state->ipi_data); in kvmppc_xive_free_sources()
1968 xive_native_free_irq(state->ipi_number); in kvmppc_xive_free_sources()
1970 /* Pass-through, cleanup too but keep IRQ hw data */ in kvmppc_xive_free_sources()
1971 if (state->pt_number) in kvmppc_xive_free_sources()
1972 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data); in kvmppc_xive_free_sources()
1974 state->valid = false; in kvmppc_xive_free_sources()
1979 * Called when device fd is closed. kvm->lock is held.
1983 struct kvmppc_xive *xive = dev->private; in kvmppc_xive_release()
1984 struct kvm *kvm = xive->kvm; in kvmppc_xive_release()
1999 debugfs_remove(xive->dentry); in kvmppc_xive_release()
2006 * Take vcpu->mutex to ensure that no one_reg get/set ioctl in kvmppc_xive_release()
2008 * Holding the vcpu->mutex also means that the vcpu cannot in kvmppc_xive_release()
2013 mutex_lock(&vcpu->mutex); in kvmppc_xive_release()
2015 mutex_unlock(&vcpu->mutex); in kvmppc_xive_release()
2019 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type in kvmppc_xive_release()
2020 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe in kvmppc_xive_release()
2024 kvm->arch.xive = NULL; in kvmppc_xive_release()
2027 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_release()
2028 if (xive->src_blocks[i]) in kvmppc_xive_release()
2029 kvmppc_xive_free_sources(xive->src_blocks[i]); in kvmppc_xive_release()
2030 kfree(xive->src_blocks[i]); in kvmppc_xive_release()
2031 xive->src_blocks[i] = NULL; in kvmppc_xive_release()
2034 if (xive->vp_base != XIVE_INVALID_VP) in kvmppc_xive_release()
2035 xive_native_free_vp_block(xive->vp_base); in kvmppc_xive_release()
2059 &kvm->arch.xive_devices.native : in kvmppc_xive_get_device()
2060 &kvm->arch.xive_devices.xics_on_xive; in kvmppc_xive_get_device()
2074 * Create a XICS device with XIVE backend. kvm->lock is held.
2079 struct kvm *kvm = dev->kvm; in kvmppc_xive_create()
2084 if (kvm->arch.xive) in kvmppc_xive_create()
2085 return -EEXIST; in kvmppc_xive_create()
2089 return -ENOMEM; in kvmppc_xive_create()
2091 dev->private = xive; in kvmppc_xive_create()
2092 xive->dev = dev; in kvmppc_xive_create()
2093 xive->kvm = kvm; in kvmppc_xive_create()
2094 mutex_init(&xive->lock); in kvmppc_xive_create()
2097 xive->q_order = xive_native_default_eq_shift(); in kvmppc_xive_create()
2098 if (xive->q_order < PAGE_SHIFT) in kvmppc_xive_create()
2099 xive->q_page_order = 0; in kvmppc_xive_create()
2101 xive->q_page_order = xive->q_order - PAGE_SHIFT; in kvmppc_xive_create()
2104 xive->vp_base = XIVE_INVALID_VP; in kvmppc_xive_create()
2108 xive->nr_servers = KVM_MAX_VCPUS; in kvmppc_xive_create()
2110 xive->single_escalation = xive_native_has_single_escalation(); in kvmppc_xive_create()
2112 kvm->arch.xive = xive; in kvmppc_xive_create()
2118 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_debug_show_queues()
2122 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_debug_show_queues()
2125 if (!q->qpage && !xc->esc_virq[i]) in kvmppc_xive_debug_show_queues()
2130 if (q->qpage) { in kvmppc_xive_debug_show_queues()
2131 idx = q->idx; in kvmppc_xive_debug_show_queues()
2132 i0 = be32_to_cpup(q->qpage + idx); in kvmppc_xive_debug_show_queues()
2133 idx = (idx + 1) & q->msk; in kvmppc_xive_debug_show_queues()
2134 i1 = be32_to_cpup(q->qpage + idx); in kvmppc_xive_debug_show_queues()
2135 seq_printf(m, "T=%d %08x %08x...\n", q->toggle, in kvmppc_xive_debug_show_queues()
2138 if (xc->esc_virq[i]) { in kvmppc_xive_debug_show_queues()
2139 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); in kvmppc_xive_debug_show_queues()
2147 xc->esc_virq[i], pq, xd->eoi_page); in kvmppc_xive_debug_show_queues()
2156 struct kvmppc_xive *xive = m->private; in xive_debug_show()
2157 struct kvm *kvm = xive->kvm; in xive_debug_show()
2177 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_debug_show()
2184 xc->server_num, xc->vp_id, xc->cppr, xc->hw_cppr, in xive_debug_show()
2185 xc->mfrr, xc->pending, in xive_debug_show()
2186 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); in xive_debug_show()
2190 t_rm_h_xirr += xc->stat_rm_h_xirr; in xive_debug_show()
2191 t_rm_h_ipoll += xc->stat_rm_h_ipoll; in xive_debug_show()
2192 t_rm_h_cppr += xc->stat_rm_h_cppr; in xive_debug_show()
2193 t_rm_h_eoi += xc->stat_rm_h_eoi; in xive_debug_show()
2194 t_rm_h_ipi += xc->stat_rm_h_ipi; in xive_debug_show()
2195 t_vm_h_xirr += xc->stat_vm_h_xirr; in xive_debug_show()
2196 t_vm_h_ipoll += xc->stat_vm_h_ipoll; in xive_debug_show()
2197 t_vm_h_cppr += xc->stat_vm_h_cppr; in xive_debug_show()
2198 t_vm_h_eoi += xc->stat_vm_h_eoi; in xive_debug_show()
2199 t_vm_h_ipi += xc->stat_vm_h_ipi; in xive_debug_show()
2218 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive); in xive_debugfs_init()
2224 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root, in xive_debugfs_init()
2233 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private; in kvmppc_xive_init()
2240 .name = "kvm-xive",