• Home
  • Raw
  • Download

Lines Matching +full:0 +full:xc

47 #define DBG_VERBOSE(fmt...)	do { } while(0)
81 * or 0 if there is no new entry.
90 return 0; in xive_read_eq()
95 return 0; in xive_read_eq()
103 if (q->idx == 0) in xive_read_eq()
107 return cur & 0x7fffffff; in xive_read_eq()
117 * (0xff if none) and return what was found (0 if none).
133 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) in xive_scan_interrupts() argument
135 u32 irq = 0; in xive_scan_interrupts()
139 while (xc->pending_prio != 0) { in xive_scan_interrupts()
142 prio = ffs(xc->pending_prio) - 1; in xive_scan_interrupts()
146 irq = xive_read_eq(&xc->queue[prio], just_peek); in xive_scan_interrupts()
153 xc->pending_prio &= ~(1 << prio); in xive_scan_interrupts()
160 q = &xc->queue[prio]; in xive_scan_interrupts()
162 int p = atomic_xchg(&q->pending_count, 0); in xive_scan_interrupts()
170 /* If nothing was found, set CPPR to 0xff */ in xive_scan_interrupts()
171 if (irq == 0) in xive_scan_interrupts()
172 prio = 0xff; in xive_scan_interrupts()
175 if (prio != xc->cppr) { in xive_scan_interrupts()
177 xc->cppr = prio; in xive_scan_interrupts()
197 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); in xive_esb_read()
233 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xmon_xive_do_dump() local
236 xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr); in xmon_xive_do_dump()
237 xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]); in xmon_xive_do_dump()
240 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); in xmon_xive_do_dump()
241 xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi, in xmon_xive_do_dump()
251 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_get_irq() local
268 xive_ops->update_pending(xc); in xive_get_irq()
270 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); in xive_get_irq()
273 irq = xive_scan_interrupts(xc, false); in xive_get_irq()
275 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", in xive_get_irq()
276 irq, xc->pending_prio); in xive_get_irq()
280 return 0; in xive_get_irq()
294 static void xive_do_queue_eoi(struct xive_cpu *xc) in xive_do_queue_eoi() argument
296 if (xive_scan_interrupts(xc, true) != 0) { in xive_do_queue_eoi()
297 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); in xive_do_queue_eoi()
310 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); in xive_do_source_eoi()
316 * on P9 DD1.0 needed a latch to be clared in the LPC bridge in xive_do_source_eoi()
346 out_be64(xd->trig_mmio, 0); in xive_do_source_eoi()
355 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_irq_eoi() local
357 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", in xive_irq_eoi()
358 d->irq, irqd_to_hwirq(d), xc->pending_prio); in xive_irq_eoi()
375 xive_do_queue_eoi(xc); in xive_irq_eoi()
412 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_try_pick_target() local
413 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_try_pick_target()
436 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_dec_target_count() local
437 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_dec_target_count()
439 if (unlikely(WARN_ON(cpu < 0 || !xc))) { in xive_dec_target_count()
440 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); in xive_dec_target_count()
466 for (i = 0; i < first && cpu < nr_cpu_ids; i++) in xive_find_target_in_mask()
517 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); in xive_pick_irq_target() local
518 if (xc->chip_id == xd->src_chip) in xive_pick_irq_target()
527 if (cpu >= 0) in xive_pick_irq_target()
542 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n", in xive_irq_startup()
585 return 0; in xive_irq_startup()
593 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n", in xive_irq_shutdown()
618 0xff, XIVE_BAD_IRQ); in xive_irq_shutdown()
633 * be fixed by P9 DD2.0, if that is the case, firmware in xive_irq_unmask()
656 * be fixed by P9 DD2.0, if that is the case, firmware in xive_irq_mask()
663 0xff, d->irq); in xive_irq_mask()
677 int rc = 0; in xive_irq_set_affinity()
719 if (rc < 0) { in xive_irq_set_affinity()
724 pr_devel(" target: 0x%x\n", target); in xive_irq_set_affinity()
764 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", in xive_irq_set_type()
779 return 0; in xive_irq_retrigger()
788 * Note: We pass "0" to the hw_irq argument in order to in xive_irq_retrigger()
793 xive_do_source_eoi(0, xd); in xive_irq_retrigger()
830 return 0; in xive_irq_set_vcpu_affinity()
871 return 0; in xive_irq_set_vcpu_affinity()
912 return 0; in xive_irq_set_vcpu_affinity()
978 return 0; in xive_irq_alloc_data()
996 struct xive_cpu *xc; in xive_cause_ipi() local
999 xc = per_cpu(xive_cpu, cpu); in xive_cause_ipi()
1001 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", in xive_cause_ipi()
1002 smp_processor_id(), cpu, xc->hw_ipi); in xive_cause_ipi()
1004 xd = &xc->ipi_data; in xive_cause_ipi()
1007 out_be64(xd->trig_mmio, 0); in xive_cause_ipi()
1017 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_ipi_eoi() local
1020 if (!xc) in xive_ipi_eoi()
1023 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", in xive_ipi_eoi()
1024 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); in xive_ipi_eoi()
1026 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data); in xive_ipi_eoi()
1027 xive_do_queue_eoi(xc); in xive_ipi_eoi()
1058 virq = irq_create_mapping(xive_irq_domain, 0); in xive_request_ipi()
1067 struct xive_cpu *xc; in xive_setup_cpu_ipi() local
1072 xc = per_cpu(xive_cpu, cpu); in xive_setup_cpu_ipi()
1075 if (xc->hw_ipi != XIVE_BAD_IRQ) in xive_setup_cpu_ipi()
1076 return 0; in xive_setup_cpu_ipi()
1078 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ in xive_setup_cpu_ipi()
1079 if (xive_ops->get_ipi(cpu, xc)) in xive_setup_cpu_ipi()
1086 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); in xive_setup_cpu_ipi()
1091 rc = xive_ops->configure_irq(xc->hw_ipi, in xive_setup_cpu_ipi()
1099 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); in xive_setup_cpu_ipi()
1102 xive_do_source_set_mask(&xc->ipi_data, false); in xive_setup_cpu_ipi()
1104 return 0; in xive_setup_cpu_ipi()
1107 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) in xive_cleanup_cpu_ipi() argument
1112 if (xc->hw_ipi == XIVE_BAD_IRQ) in xive_cleanup_cpu_ipi()
1116 xive_do_source_set_mask(&xc->ipi_data, true); in xive_cleanup_cpu_ipi()
1125 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), in xive_cleanup_cpu_ipi()
1126 0xff, xive_ipi_irq); in xive_cleanup_cpu_ipi()
1129 xive_ops->put_ipi(cpu, xc); in xive_cleanup_cpu_ipi()
1157 /* IPIs are special and come up with HW number 0 */ in xive_irq_domain_map()
1158 if (hw == 0) { in xive_irq_domain_map()
1165 return 0; in xive_irq_domain_map()
1175 return 0; in xive_irq_domain_map()
1196 *out_hwirq = intspec[0]; in xive_irq_domain_xlate()
1210 return 0; in xive_irq_domain_xlate()
1235 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) in xive_cleanup_cpu_queues() argument
1237 if (xc->queue[xive_irq_priority].qpage) in xive_cleanup_cpu_queues()
1238 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); in xive_cleanup_cpu_queues()
1241 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) in xive_setup_cpu_queues() argument
1243 int rc = 0; in xive_setup_cpu_queues()
1246 if (!xc->queue[xive_irq_priority].qpage) in xive_setup_cpu_queues()
1247 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); in xive_setup_cpu_queues()
1254 struct xive_cpu *xc; in xive_prepare_cpu() local
1256 xc = per_cpu(xive_cpu, cpu); in xive_prepare_cpu()
1257 if (!xc) { in xive_prepare_cpu()
1260 xc = kzalloc_node(sizeof(struct xive_cpu), in xive_prepare_cpu()
1262 if (!xc) in xive_prepare_cpu()
1266 xc->chip_id = of_get_ibm_chip_id(np); in xive_prepare_cpu()
1268 xc->hw_ipi = XIVE_BAD_IRQ; in xive_prepare_cpu()
1270 per_cpu(xive_cpu, cpu) = xc; in xive_prepare_cpu()
1274 return xive_setup_cpu_queues(cpu, xc); in xive_prepare_cpu()
1279 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_setup_cpu() local
1283 xive_ops->setup_cpu(smp_processor_id(), xc); in xive_setup_cpu()
1285 /* Set CPPR to 0xff to enable flow of interrupts */ in xive_setup_cpu()
1286 xc->cppr = 0xff; in xive_setup_cpu()
1287 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); in xive_setup_cpu()
1315 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) in xive_flush_cpu_queue() argument
1323 while ((irq = xive_scan_interrupts(xc, false)) != 0) { in xive_flush_cpu_queue()
1337 if (d->domain != xive_irq_domain || hw_irq == 0) in xive_flush_cpu_queue()
1367 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_smp_disable_cpu() local
1373 /* Set CPPR to 0 to disable flow of interrupts */ in xive_smp_disable_cpu()
1374 xc->cppr = 0; in xive_smp_disable_cpu()
1375 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); in xive_smp_disable_cpu()
1378 xive_flush_cpu_queue(cpu, xc); in xive_smp_disable_cpu()
1381 xc->cppr = 0xff; in xive_smp_disable_cpu()
1382 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); in xive_smp_disable_cpu()
1387 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_flush_interrupt() local
1391 xive_flush_cpu_queue(cpu, xc); in xive_flush_interrupt()
1400 struct xive_cpu *xc = __this_cpu_read(xive_cpu); in xive_teardown_cpu() local
1403 /* Set CPPR to 0 to disable flow of interrupts */ in xive_teardown_cpu()
1404 xc->cppr = 0; in xive_teardown_cpu()
1405 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); in xive_teardown_cpu()
1408 xive_ops->teardown_cpu(cpu, xc); in xive_teardown_cpu()
1412 xive_cleanup_cpu_ipi(cpu, xc); in xive_teardown_cpu()
1416 xive_cleanup_cpu_queues(cpu, xc); in xive_teardown_cpu()
1464 memset(qpage, 0, 1 << queue_shift); in xive_queue_page_alloc()
1472 return 0; in xive_off()