• Home
  • Raw
  • Download

Lines Matching +full:0 +full:xd

44 #define DBG_VERBOSE(fmt...)	do { } while(0)
78 * or 0 if there is no new entry.
87 return 0; in xive_read_eq()
92 return 0; in xive_read_eq()
100 if (q->idx == 0) in xive_read_eq()
104 return cur & 0x7fffffff; in xive_read_eq()
114 * (0xff if none) and return what was found (0 if none).
132 u32 irq = 0; in xive_scan_interrupts()
133 u8 prio = 0; in xive_scan_interrupts()
136 while (xc->pending_prio != 0) { in xive_scan_interrupts()
170 int p = atomic_xchg(&q->pending_count, 0); in xive_scan_interrupts()
178 /* If nothing was found, set CPPR to 0xff */ in xive_scan_interrupts()
179 if (irq == 0) in xive_scan_interrupts()
180 prio = 0xff; in xive_scan_interrupts()
196 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) in xive_esb_read() argument
200 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_esb_read()
204 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) in xive_esb_read()
207 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) in xive_esb_read()
208 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); in xive_esb_read()
210 val = in_be64(xd->eoi_mmio + offset); in xive_esb_read()
215 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) in xive_esb_write() argument
218 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) in xive_esb_write()
221 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) in xive_esb_write()
222 xive_ops->esb_rw(xd->hw_irq, offset, data, 1); in xive_esb_write()
224 out_be64(xd->eoi_mmio + offset, data); in xive_esb_write()
254 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi, in xmon_xive_do_dump()
280 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); in xmon_xive_get_irq_config()
284 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", in xmon_xive_get_irq_config()
291 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xmon_xive_get_irq_config() local
292 u64 val = xive_esb_read(xd, XIVE_ESB_GET); in xmon_xive_get_irq_config()
295 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', in xmon_xive_get_irq_config()
296 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', in xmon_xive_get_irq_config()
297 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', in xmon_xive_get_irq_config()
303 return 0; in xmon_xive_get_irq_config()
334 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", in xive_get_irq()
339 return 0; in xive_get_irq()
355 if (xive_scan_interrupts(xc, true) != 0) { in xive_do_queue_eoi()
356 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); in xive_do_queue_eoi()
365 static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) in xive_do_source_eoi() argument
367 xd->stale_p = false; in xive_do_source_eoi()
369 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) in xive_do_source_eoi()
370 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); in xive_do_source_eoi()
371 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { in xive_do_source_eoi()
376 * on P9 DD1.0 needed a latch to be clared in the LPC bridge in xive_do_source_eoi()
398 if (xd->flags & XIVE_IRQ_FLAG_LSI) in xive_do_source_eoi()
399 xive_esb_read(xd, XIVE_ESB_LOAD_EOI); in xive_do_source_eoi()
401 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00); in xive_do_source_eoi()
405 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) in xive_do_source_eoi()
406 out_be64(xd->trig_mmio, 0); in xive_do_source_eoi()
414 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_eoi() local
417 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", in xive_irq_eoi()
425 !(xd->flags & XIVE_IRQ_NO_EOI)) in xive_irq_eoi()
426 xive_do_source_eoi(irqd_to_hwirq(d), xd); in xive_irq_eoi()
428 xd->stale_p = true; in xive_irq_eoi()
434 xd->saved_p = false; in xive_irq_eoi()
445 static void xive_do_source_set_mask(struct xive_irq_data *xd, in xive_do_source_set_mask() argument
459 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01); in xive_do_source_set_mask()
460 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P)) in xive_do_source_set_mask()
461 xd->saved_p = true; in xive_do_source_set_mask()
462 xd->stale_p = false; in xive_do_source_set_mask()
463 } else if (xd->saved_p) { in xive_do_source_set_mask()
464 xive_esb_read(xd, XIVE_ESB_SET_PQ_10); in xive_do_source_set_mask()
465 xd->saved_p = false; in xive_do_source_set_mask()
467 xive_esb_read(xd, XIVE_ESB_SET_PQ_00); in xive_do_source_set_mask()
468 xd->stale_p = false; in xive_do_source_set_mask()
506 if (WARN_ON(cpu < 0 || !xc)) { in xive_dec_target_count()
533 for (i = 0; i < first && cpu < nr_cpu_ids; i++) in xive_find_target_in_mask()
572 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_pick_irq_target() local
580 if (xd->src_chip != XIVE_INVALID_CHIP_ID && in xive_pick_irq_target()
585 if (xc->chip_id == xd->src_chip) in xive_pick_irq_target()
594 if (cpu >= 0) in xive_pick_irq_target()
605 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_startup() local
609 xd->saved_p = false; in xive_irq_startup()
610 xd->stale_p = false; in xive_irq_startup()
611 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n", in xive_irq_startup()
639 xd->target = target; in xive_irq_startup()
652 xive_do_source_set_mask(xd, false); in xive_irq_startup()
654 return 0; in xive_irq_startup()
660 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_shutdown() local
663 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n", in xive_irq_shutdown()
666 if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) in xive_irq_shutdown()
670 xive_do_source_set_mask(xd, true); in xive_irq_shutdown()
677 get_hard_smp_processor_id(xd->target), in xive_irq_shutdown()
678 0xff, XIVE_BAD_IRQ); in xive_irq_shutdown()
680 xive_dec_target_count(xd->target); in xive_irq_shutdown()
681 xd->target = XIVE_INVALID_TARGET; in xive_irq_shutdown()
686 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_unmask() local
688 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); in xive_irq_unmask()
693 * be fixed by P9 DD2.0, if that is the case, firmware in xive_irq_unmask()
696 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { in xive_irq_unmask()
699 get_hard_smp_processor_id(xd->target), in xive_irq_unmask()
704 xive_do_source_set_mask(xd, false); in xive_irq_unmask()
709 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_mask() local
711 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); in xive_irq_mask()
716 * be fixed by P9 DD2.0, if that is the case, firmware in xive_irq_mask()
719 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { in xive_irq_mask()
722 get_hard_smp_processor_id(xd->target), in xive_irq_mask()
723 0xff, d->irq); in xive_irq_mask()
727 xive_do_source_set_mask(xd, true); in xive_irq_mask()
734 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_set_affinity() local
737 int rc = 0; in xive_irq_set_affinity()
753 if (xd->target != XIVE_INVALID_TARGET && in xive_irq_set_affinity()
754 cpu_online(xd->target) && in xive_irq_set_affinity()
755 cpumask_test_cpu(xd->target, cpumask)) in xive_irq_set_affinity()
769 old_target = xd->target; in xive_irq_set_affinity()
779 if (rc < 0) { in xive_irq_set_affinity()
784 pr_devel(" target: 0x%x\n", target); in xive_irq_set_affinity()
785 xd->target = target; in xive_irq_set_affinity()
796 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_set_type() local
823 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) { in xive_irq_set_type()
824 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", in xive_irq_set_type()
827 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge"); in xive_irq_set_type()
835 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_retrigger() local
838 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) in xive_irq_retrigger()
839 return 0; in xive_irq_retrigger()
845 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); in xive_irq_retrigger()
848 * Note: We pass "0" to the hw_irq argument in order to in xive_irq_retrigger()
853 xive_do_source_eoi(0, xd); in xive_irq_retrigger()
864 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); in xive_irq_set_vcpu_affinity() local
873 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) in xive_irq_set_vcpu_affinity()
884 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10); in xive_irq_set_vcpu_affinity()
885 if (!xd->stale_p) { in xive_irq_set_vcpu_affinity()
886 xd->saved_p = !!(pq & XIVE_ESB_VAL_P); in xive_irq_set_vcpu_affinity()
887 xd->stale_p = !xd->saved_p; in xive_irq_set_vcpu_affinity()
891 if (xd->target == XIVE_INVALID_TARGET) { in xive_irq_set_vcpu_affinity()
896 WARN_ON(xd->saved_p); in xive_irq_set_vcpu_affinity()
898 return 0; in xive_irq_set_vcpu_affinity()
916 if (xd->saved_p) { in xive_irq_set_vcpu_affinity()
917 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); in xive_irq_set_vcpu_affinity()
935 if (xd->target == XIVE_INVALID_TARGET) { in xive_irq_set_vcpu_affinity()
936 xive_do_source_set_mask(xd, true); in xive_irq_set_vcpu_affinity()
937 return 0; in xive_irq_set_vcpu_affinity()
957 get_hard_smp_processor_id(xd->target), in xive_irq_set_vcpu_affinity()
974 if (!xd->saved_p) in xive_irq_set_vcpu_affinity()
975 xive_do_source_eoi(hw_irq, xd); in xive_irq_set_vcpu_affinity()
978 return 0; in xive_irq_set_vcpu_affinity()
985 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); in xive_get_irqchip_state() local
990 pq = xive_esb_read(xd, XIVE_ESB_GET); in xive_get_irqchip_state()
999 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && in xive_get_irqchip_state()
1000 (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) && in xive_get_irqchip_state()
1002 return 0; in xive_get_irqchip_state()
1028 void xive_cleanup_irq_data(struct xive_irq_data *xd) in xive_cleanup_irq_data() argument
1030 if (xd->eoi_mmio) { in xive_cleanup_irq_data()
1031 unmap_kernel_range((unsigned long)xd->eoi_mmio, in xive_cleanup_irq_data()
1032 1u << xd->esb_shift); in xive_cleanup_irq_data()
1033 iounmap(xd->eoi_mmio); in xive_cleanup_irq_data()
1034 if (xd->eoi_mmio == xd->trig_mmio) in xive_cleanup_irq_data()
1035 xd->trig_mmio = NULL; in xive_cleanup_irq_data()
1036 xd->eoi_mmio = NULL; in xive_cleanup_irq_data()
1038 if (xd->trig_mmio) { in xive_cleanup_irq_data()
1039 unmap_kernel_range((unsigned long)xd->trig_mmio, in xive_cleanup_irq_data()
1040 1u << xd->esb_shift); in xive_cleanup_irq_data()
1041 iounmap(xd->trig_mmio); in xive_cleanup_irq_data()
1042 xd->trig_mmio = NULL; in xive_cleanup_irq_data()
1049 struct xive_irq_data *xd; in xive_irq_alloc_data() local
1052 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); in xive_irq_alloc_data()
1053 if (!xd) in xive_irq_alloc_data()
1055 rc = xive_ops->populate_irq_data(hw, xd); in xive_irq_alloc_data()
1057 kfree(xd); in xive_irq_alloc_data()
1060 xd->target = XIVE_INVALID_TARGET; in xive_irq_alloc_data()
1061 irq_set_handler_data(virq, xd); in xive_irq_alloc_data()
1070 xive_esb_read(xd, XIVE_ESB_SET_PQ_01); in xive_irq_alloc_data()
1072 return 0; in xive_irq_alloc_data()
1077 struct xive_irq_data *xd = irq_get_handler_data(virq); in xive_irq_free_data() local
1079 if (!xd) in xive_irq_free_data()
1082 xive_cleanup_irq_data(xd); in xive_irq_free_data()
1083 kfree(xd); in xive_irq_free_data()
1091 struct xive_irq_data *xd; in xive_cause_ipi() local
1095 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", in xive_cause_ipi()
1098 xd = &xc->ipi_data; in xive_cause_ipi()
1099 if (WARN_ON(!xd->trig_mmio)) in xive_cause_ipi()
1101 out_be64(xd->trig_mmio, 0); in xive_cause_ipi()
1117 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", in xive_ipi_eoi()
1152 virq = irq_create_mapping(xive_irq_domain, 0); in xive_request_ipi()
1170 return 0; in xive_setup_cpu_ipi()
1198 return 0; in xive_setup_cpu_ipi()
1220 0xff, xive_ipi_irq); in xive_cleanup_cpu_ipi()
1251 /* IPIs are special and come up with HW number 0 */ in xive_irq_domain_map()
1252 if (hw == 0) { in xive_irq_domain_map()
1259 return 0; in xive_irq_domain_map()
1269 return 0; in xive_irq_domain_map()
1290 *out_hwirq = intspec[0]; in xive_irq_domain_xlate()
1304 return 0; in xive_irq_domain_xlate()
1337 int rc = 0; in xive_setup_cpu_queues()
1379 /* Set CPPR to 0xff to enable flow of interrupts */ in xive_setup_cpu()
1380 xc->cppr = 0xff; in xive_setup_cpu()
1381 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); in xive_setup_cpu()
1417 while ((irq = xive_scan_interrupts(xc, false)) != 0) { in xive_flush_cpu_queue()
1424 struct xive_irq_data *xd; in xive_flush_cpu_queue() local
1431 if (d->domain != xive_irq_domain || hw_irq == 0) in xive_flush_cpu_queue()
1444 xd = irq_desc_get_handler_data(desc); in xive_flush_cpu_queue()
1449 xd->saved_p = false; in xive_flush_cpu_queue()
1455 if (xd->flags & XIVE_IRQ_FLAG_LSI) in xive_flush_cpu_queue()
1456 xive_do_source_eoi(irqd_to_hwirq(d), xd); in xive_flush_cpu_queue()
1472 /* Set CPPR to 0 to disable flow of interrupts */ in xive_smp_disable_cpu()
1473 xc->cppr = 0; in xive_smp_disable_cpu()
1474 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); in xive_smp_disable_cpu()
1480 xc->cppr = 0xff; in xive_smp_disable_cpu()
1481 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); in xive_smp_disable_cpu()
1502 /* Set CPPR to 0 to disable flow of interrupts */ in xive_teardown_cpu()
1503 xc->cppr = 0; in xive_teardown_cpu()
1504 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); in xive_teardown_cpu()
1563 memset(qpage, 0, 1 << queue_shift); in xive_queue_page_alloc()
1571 return 0; in xive_off()
1587 seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi, in xive_debug_show_cpu()
1616 struct xive_irq_data *xd; in xive_debug_show_irq() local
1624 seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); in xive_debug_show_irq()
1628 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", in xive_debug_show_irq()
1631 xd = irq_data_get_irq_handler_data(d); in xive_debug_show_irq()
1632 val = xive_esb_read(xd, XIVE_ESB_GET); in xive_debug_show_irq()
1634 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', in xive_debug_show_irq()
1635 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', in xive_debug_show_irq()
1636 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', in xive_debug_show_irq()
1663 /* IPIs are special (HW number 0) */ in xive_core_debug_show()
1667 return 0; in xive_core_debug_show()
1676 return 0; in xive_core_debug_init()