• Home
  • Raw
  • Download

Lines Matching +full:0 +full:xc

78 	out_be64(xd->trig_mmio, 0);  in xive_irq_trigger()
108 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_attach_escalation() local
109 struct xive_q *q = &xc->queues[prio]; in xive_attach_escalation()
114 if (xc->esc_virq[prio]) in xive_attach_escalation()
115 return 0; in xive_attach_escalation()
118 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); in xive_attach_escalation()
119 if (!xc->esc_virq[prio]) { in xive_attach_escalation()
121 prio, xc->server_num); in xive_attach_escalation()
125 if (xc->xive->single_escalation) in xive_attach_escalation()
127 vcpu->kvm->arch.lpid, xc->server_num); in xive_attach_escalation()
130 vcpu->kvm->arch.lpid, xc->server_num, prio); in xive_attach_escalation()
133 prio, xc->server_num); in xive_attach_escalation()
138 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); in xive_attach_escalation()
140 rc = request_irq(xc->esc_virq[prio], xive_esc_irq, in xive_attach_escalation()
144 prio, xc->server_num); in xive_attach_escalation()
147 xc->esc_virq_names[prio] = name; in xive_attach_escalation()
157 if (xc->xive->single_escalation) { in xive_attach_escalation()
158 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); in xive_attach_escalation()
167 return 0; in xive_attach_escalation()
169 irq_dispose_mapping(xc->esc_virq[prio]); in xive_attach_escalation()
170 xc->esc_virq[prio] = 0; in xive_attach_escalation()
177 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_provision_queue() local
178 struct kvmppc_xive *xive = xc->xive; in xive_provision_queue()
179 struct xive_q *q = &xc->queues[prio]; in xive_provision_queue()
184 return 0; in xive_provision_queue()
190 prio, xc->server_num); in xive_provision_queue()
193 memset(qpage, 0, 1 << xive->q_order); in xive_provision_queue()
197 * queue is fully configured. This is a requirement for prio 0 in xive_provision_queue()
200 * corresponding queue 0 entries in xive_provision_queue()
202 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, in xive_provision_queue()
206 prio, xc->server_num); in xive_provision_queue()
221 return 0; in xive_check_provisioning()
230 if (rc == 0 && !xive->single_escalation) in xive_check_provisioning()
239 return 0; in xive_check_provisioning()
245 struct kvmppc_xive_vcpu *xc; in xive_inc_q_pending() local
254 xc = vcpu->arch.xive_vcpu; in xive_inc_q_pending()
255 if (WARN_ON(!xc)) in xive_inc_q_pending()
258 q = &xc->queues[prio]; in xive_inc_q_pending()
264 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_try_pick_queue() local
268 if (WARN_ON(!xc)) in xive_try_pick_queue()
270 if (!xc->valid) in xive_try_pick_queue()
273 q = &xc->queues[prio]; in xive_try_pick_queue()
279 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; in xive_try_pick_queue()
294 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio); in xive_select_target()
298 if (rc == 0) in xive_select_target()
308 if (rc == 0) { in xive_select_target()
310 pr_devel(" found on 0x%x/%d\n", *server, prio); in xive_select_target()
554 int rc = 0; in kvmppc_xive_set_xive()
560 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n", in kvmppc_xive_set_xive()
613 * we have a valid new priority (new_act_prio is not 0xff) in kvmppc_xive_set_xive()
665 return 0; in kvmppc_xive_get_xive()
683 pr_devel("int_on(irq=0x%x)\n", irq); in kvmppc_xive_int_on()
693 /* If saved_priority is 0xff, do nothing */ in kvmppc_xive_int_on()
695 return 0; in kvmppc_xive_int_on()
704 return 0; in kvmppc_xive_int_on()
722 pr_devel("int_off(irq=0x%x)\n", irq); in kvmppc_xive_int_off()
730 return 0; in kvmppc_xive_int_off()
757 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_get_icp() local
759 if (!xc) in kvmppc_xive_get_icp()
760 return 0; in kvmppc_xive_get_icp()
763 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | in kvmppc_xive_get_icp()
764 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | in kvmppc_xive_get_icp()
765 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; in kvmppc_xive_get_icp()
770 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_set_icp() local
775 if (!xc || !xive) in kvmppc_xive_set_icp()
784 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n", in kvmppc_xive_set_icp()
785 xc->server_num, cppr, mfrr, xisr); in kvmppc_xive_set_icp()
796 xc->hw_cppr = xc->cppr = cppr; in kvmppc_xive_set_icp()
799 * Update MFRR state. If it's not 0xff, we mark the VCPU as in kvmppc_xive_set_icp()
804 xc->mfrr = mfrr; in kvmppc_xive_set_icp()
806 xive_irq_trigger(&xc->vp_ipi_data); in kvmppc_xive_set_icp()
818 xc->delayed_irq = xisr; in kvmppc_xive_set_icp()
823 return 0; in kvmppc_xive_set_icp()
842 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq); in kvmppc_xive_set_mapped()
887 * mask the interrupt in a lossy way (act_priority is 0xff) in kvmppc_xive_set_mapped()
912 return 0; in kvmppc_xive_set_mapped()
930 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq); in kvmppc_xive_clr_mapped()
962 state->pt_number = 0; in kvmppc_xive_clr_mapped()
985 return 0; in kvmppc_xive_clr_mapped()
991 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_disable_vcpu_interrupts() local
996 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_disable_vcpu_interrupts()
1001 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { in kvmppc_xive_disable_vcpu_interrupts()
1008 if (state->act_server != xc->server_num) in kvmppc_xive_disable_vcpu_interrupts()
1015 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1018 xive_native_configure_irq(state->pt_number, 0, MASKED, 0); in kvmppc_xive_disable_vcpu_interrupts()
1027 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in kvmppc_xive_cleanup_vcpu() local
1028 struct kvmppc_xive *xive = xc->xive; in kvmppc_xive_cleanup_vcpu()
1031 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); in kvmppc_xive_cleanup_vcpu()
1034 xc->valid = false; in kvmppc_xive_cleanup_vcpu()
1038 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); in kvmppc_xive_cleanup_vcpu()
1041 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_cleanup_vcpu()
1042 if (xc->esc_virq[i]) { in kvmppc_xive_cleanup_vcpu()
1043 free_irq(xc->esc_virq[i], vcpu); in kvmppc_xive_cleanup_vcpu()
1044 irq_dispose_mapping(xc->esc_virq[i]); in kvmppc_xive_cleanup_vcpu()
1045 kfree(xc->esc_virq_names[i]); in kvmppc_xive_cleanup_vcpu()
1050 xive_native_disable_vp(xc->vp_id); in kvmppc_xive_cleanup_vcpu()
1053 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_cleanup_vcpu()
1054 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_cleanup_vcpu()
1056 xive_native_disable_queue(xc->vp_id, q, i); in kvmppc_xive_cleanup_vcpu()
1065 if (xc->vp_ipi) { in kvmppc_xive_cleanup_vcpu()
1066 xive_cleanup_irq_data(&xc->vp_ipi_data); in kvmppc_xive_cleanup_vcpu()
1067 xive_native_free_irq(xc->vp_ipi); in kvmppc_xive_cleanup_vcpu()
1070 kfree(xc); in kvmppc_xive_cleanup_vcpu()
1077 struct kvmppc_xive_vcpu *xc; in kvmppc_xive_connect_vcpu() local
1098 xc = kzalloc(sizeof(*xc), GFP_KERNEL); in kvmppc_xive_connect_vcpu()
1099 if (!xc) in kvmppc_xive_connect_vcpu()
1104 vcpu->arch.xive_vcpu = xc; in kvmppc_xive_connect_vcpu()
1105 xc->xive = xive; in kvmppc_xive_connect_vcpu()
1106 xc->vcpu = vcpu; in kvmppc_xive_connect_vcpu()
1107 xc->server_num = cpu; in kvmppc_xive_connect_vcpu()
1108 xc->vp_id = xive_vp(xive, cpu); in kvmppc_xive_connect_vcpu()
1109 xc->mfrr = 0xff; in kvmppc_xive_connect_vcpu()
1110 xc->valid = true; in kvmppc_xive_connect_vcpu()
1112 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); in kvmppc_xive_connect_vcpu()
1117 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); in kvmppc_xive_connect_vcpu()
1118 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); in kvmppc_xive_connect_vcpu()
1121 xc->vp_ipi = xive_native_alloc_irq(); in kvmppc_xive_connect_vcpu()
1122 if (!xc->vp_ipi) { in kvmppc_xive_connect_vcpu()
1127 pr_devel(" IPI=0x%x\n", xc->vp_ipi); in kvmppc_xive_connect_vcpu()
1129 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); in kvmppc_xive_connect_vcpu()
1137 r = xive_native_enable_vp(xc->vp_id, xive->single_escalation); in kvmppc_xive_connect_vcpu()
1145 * and we enable escalation for queue 0 only which we'll use for in kvmppc_xive_connect_vcpu()
1150 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in kvmppc_xive_connect_vcpu()
1151 struct xive_q *q = &xc->queues[i]; in kvmppc_xive_connect_vcpu()
1160 if (r == 0 && !xive->single_escalation) in kvmppc_xive_connect_vcpu()
1165 r = xive_native_configure_queue(xc->vp_id, in kvmppc_xive_connect_vcpu()
1166 q, i, NULL, 0, true); in kvmppc_xive_connect_vcpu()
1175 /* If not done above, attach priority 0 escalation */ in kvmppc_xive_connect_vcpu()
1176 r = xive_attach_escalation(vcpu, 0); in kvmppc_xive_connect_vcpu()
1181 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); in kvmppc_xive_connect_vcpu()
1183 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); in kvmppc_xive_connect_vcpu()
1193 return 0; in kvmppc_xive_connect_vcpu()
1213 pr_err("invalid irq 0x%x in cpu queue!\n", irq); in xive_pre_save_set_queued()
1223 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq); in xive_pre_save_set_queued()
1295 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
1299 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_pre_save_scan()
1305 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_pre_save_scan() local
1306 if (!xc) in xive_pre_save_scan()
1308 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) { in xive_pre_save_scan()
1309 if (xc->queues[j].qpage) in xive_pre_save_scan()
1310 xive_pre_save_queue(xive, &xc->queues[j]); in xive_pre_save_scan()
1315 for (i = 0; i <= xive->max_sbid; i++) { in xive_pre_save_scan()
1319 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_pre_save_scan()
1329 for (i = 0; i <= xive->max_sbid; i++) { in xive_post_save_scan()
1333 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) in xive_post_save_scan()
1338 xive->saved_src_count = 0; in xive_post_save_scan()
1379 if (xive->saved_src_count == 0) in xive_get_source()
1424 return 0; in xive_get_source()
1449 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in xive_create_src_block()
1473 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_check_delayed_irq() local
1475 if (!xc) in xive_check_delayed_irq()
1478 if (xc->delayed_irq == irq) { in xive_check_delayed_irq()
1479 xc->delayed_irq = 0; in xive_check_delayed_irq()
1496 int rc = 0; in xive_set_source()
1501 pr_devel("set_source(irq=0x%lx)\n", irq); in xive_set_source()
1524 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n", in xive_set_source()
1533 if (state->ipi_number == 0) { in xive_set_source()
1538 pr_devel(" src_ipi=0x%x\n", state->ipi_number); in xive_set_source()
1546 * 0 before calling it to ensure it actually performs the masking. in xive_set_source()
1548 state->guest_priority = 0; in xive_set_source()
1577 if (rc == 0) in xive_set_source()
1647 return 0; in xive_set_source()
1676 else if (level == 0 || level == KVM_INTERRUPT_UNSET) { in kvmppc_xive_set_irq()
1677 state->asserted = 0; in kvmppc_xive_set_irq()
1678 return 0; in kvmppc_xive_set_irq()
1684 return 0; in kvmppc_xive_set_irq()
1718 return 0; in xive_has_attr()
1727 xive_native_configure_irq(hw_num, 0, MASKED, 0); in kvmppc_xive_cleanup_irq()
1734 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { in kvmppc_xive_free_sources()
1764 for (i = 0; i <= xive->max_sbid; i++) { in kvmppc_xive_free()
1783 int ret = 0; in kvmppc_xive_create()
1804 xive->q_page_order = 0; in kvmppc_xive_create()
1822 return 0; in kvmppc_xive_create()
1831 u64 t_rm_h_xirr = 0; in xive_debug_show()
1832 u64 t_rm_h_ipoll = 0; in xive_debug_show()
1833 u64 t_rm_h_cppr = 0; in xive_debug_show()
1834 u64 t_rm_h_eoi = 0; in xive_debug_show()
1835 u64 t_rm_h_ipi = 0; in xive_debug_show()
1836 u64 t_vm_h_xirr = 0; in xive_debug_show()
1837 u64 t_vm_h_ipoll = 0; in xive_debug_show()
1838 u64 t_vm_h_cppr = 0; in xive_debug_show()
1839 u64 t_vm_h_eoi = 0; in xive_debug_show()
1840 u64 t_vm_h_ipi = 0; in xive_debug_show()
1844 return 0; in xive_debug_show()
1849 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; in xive_debug_show() local
1852 if (!xc) in xive_debug_show()
1857 xc->server_num, xc->cppr, xc->hw_cppr, in xive_debug_show()
1858 xc->mfrr, xc->pending, in xive_debug_show()
1859 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); in xive_debug_show()
1860 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { in xive_debug_show()
1861 struct xive_q *q = &xc->queues[i]; in xive_debug_show()
1864 if (!q->qpage && !xc->esc_virq[i]) in xive_debug_show()
1876 if (xc->esc_virq[i]) { in xive_debug_show()
1877 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); in xive_debug_show()
1883 xc->esc_virq[i], pq, xd->eoi_page); in xive_debug_show()
1888 t_rm_h_xirr += xc->stat_rm_h_xirr; in xive_debug_show()
1889 t_rm_h_ipoll += xc->stat_rm_h_ipoll; in xive_debug_show()
1890 t_rm_h_cppr += xc->stat_rm_h_cppr; in xive_debug_show()
1891 t_rm_h_eoi += xc->stat_rm_h_eoi; in xive_debug_show()
1892 t_rm_h_ipi += xc->stat_rm_h_ipi; in xive_debug_show()
1893 t_vm_h_xirr += xc->stat_vm_h_xirr; in xive_debug_show()
1894 t_vm_h_ipoll += xc->stat_vm_h_ipoll; in xive_debug_show()
1895 t_vm_h_cppr += xc->stat_vm_h_cppr; in xive_debug_show()
1896 t_vm_h_eoi += xc->stat_vm_h_eoi; in xive_debug_show()
1897 t_vm_h_ipi += xc->stat_vm_h_ipi; in xive_debug_show()
1907 return 0; in xive_debug_show()