Lines Matching +full:vp +full:- +full:p
1 // SPDX-License-Identifier: GPL-2.0-or-later
30 #include <asm/xive-regs.h>
34 #include "xive-internal.h"
59 return -EINVAL; in xive_native_populate_irq_data()
64 data->flags |= XIVE_IRQ_FLAG_STORE_EOI; in xive_native_populate_irq_data()
66 data->flags |= XIVE_IRQ_FLAG_LSI; in xive_native_populate_irq_data()
68 data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG; in xive_native_populate_irq_data()
70 data->flags |= XIVE_IRQ_FLAG_MASK_FW; in xive_native_populate_irq_data()
72 data->flags |= XIVE_IRQ_FLAG_EOI_FW; in xive_native_populate_irq_data()
73 data->eoi_page = be64_to_cpu(eoi_page); in xive_native_populate_irq_data()
74 data->trig_page = be64_to_cpu(trig_page); in xive_native_populate_irq_data()
75 data->esb_shift = be32_to_cpu(esb_shift); in xive_native_populate_irq_data()
76 data->src_chip = be32_to_cpu(src_chip); in xive_native_populate_irq_data()
78 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift); in xive_native_populate_irq_data()
79 if (!data->eoi_mmio) { in xive_native_populate_irq_data()
81 return -ENOMEM; in xive_native_populate_irq_data()
84 data->hw_irq = hw_irq; in xive_native_populate_irq_data()
86 if (!data->trig_page) in xive_native_populate_irq_data()
88 if (data->trig_page == data->eoi_page) { in xive_native_populate_irq_data()
89 data->trig_mmio = data->eoi_mmio; in xive_native_populate_irq_data()
93 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift); in xive_native_populate_irq_data()
94 if (!data->trig_mmio) { in xive_native_populate_irq_data()
96 return -ENOMEM; in xive_native_populate_irq_data()
112 return rc == 0 ? 0 : -ENXIO; in xive_native_configure_irq()
120 __be64 vp; in xive_native_get_irq_config() local
123 rc = opal_xive_get_irq_config(hw_irq, &vp, prio, &lirq); in xive_native_get_irq_config()
125 *target = be64_to_cpu(vp); in xive_native_get_irq_config()
128 return rc == 0 ? 0 : -ENXIO; in xive_native_get_irq_config()
143 return -EINVAL; in xive_native_configure_queue()
149 q->msk = order ? ((1u << (order - 2)) - 1) : 0; in xive_native_configure_queue()
150 q->idx = 0; in xive_native_configure_queue()
151 q->toggle = 0; in xive_native_configure_queue()
159 rc = -EIO; in xive_native_configure_queue()
162 q->eoi_phys = be64_to_cpu(qeoi_page_be); in xive_native_configure_queue()
169 q->esc_irq = be32_to_cpu(esc_irq_be); in xive_native_configure_queue()
182 rc = -EIO; in xive_native_configure_queue()
186 * q->qpage is set due to how it manages IPI EOIs in xive_native_configure_queue()
189 q->qpage = qpage; in xive_native_configure_queue()
219 struct xive_q *q = &xc->queue[prio]; in xive_native_setup_queue()
232 struct xive_q *q = &xc->queue[prio]; in xive_native_cleanup_queue()
241 free_pages((unsigned long)q->qpage, alloc_order); in xive_native_cleanup_queue()
242 q->qpage = NULL; in xive_native_cleanup_queue()
247 return of_device_is_compatible(node, "ibm,opal-xive-vc"); in xive_native_match()
268 irq = opal_xive_allocate_irq(xc->chip_id); in xive_native_get_ipi()
275 return -ENXIO; in xive_native_get_ipi()
277 xc->hw_ipi = irq; in xive_native_get_ipi()
317 if (xc->hw_ipi == XIVE_BAD_IRQ) in xive_native_put_ipi()
320 rc = opal_xive_free_irq(xc->hw_ipi); in xive_native_put_ipi()
325 xc->hw_ipi = XIVE_BAD_IRQ; in xive_native_put_ipi()
366 xc->pending_prio |= 1 << cppr; in xive_native_update_pending()
372 if (cppr >= xc->cppr) in xive_native_update_pending()
374 smp_processor_id(), cppr, xc->cppr); in xive_native_update_pending()
377 xc->cppr = cppr; in xive_native_update_pending()
399 u32 vp; in xive_native_setup_cpu() local
406 /* Check if pool VP already active, if it is, pull it */ in xive_native_setup_cpu()
410 /* Enable the pool VP */ in xive_native_setup_cpu()
411 vp = xive_pool_vps + cpu; in xive_native_setup_cpu()
413 rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0); in xive_native_setup_cpu()
419 pr_err("Failed to enable pool VP on CPU %d\n", cpu); in xive_native_setup_cpu()
424 rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL); in xive_native_setup_cpu()
426 pr_err("Failed to get pool VP info CPU %d\n", cpu); in xive_native_setup_cpu()
439 u32 vp; in xive_native_teardown_cpu() local
444 /* Pull the pool VP from the CPU */ in xive_native_teardown_cpu()
448 vp = xive_pool_vps + cpu; in xive_native_teardown_cpu()
450 rc = opal_xive_set_vp_info(vp, 0, 0); in xive_native_teardown_cpu()
493 if (of_property_read_u32(np, "ibm,xive-provision-page-size", in xive_parse_provisioning()
496 rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4); in xive_parse_provisioning()
510 rc = of_property_read_u32_array(np, "ibm,xive-provision-chips", in xive_parse_provisioning()
518 xive_provision_cache = kmem_cache_create("xive-provision", in xive_parse_provisioning()
532 pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids); in xive_native_setup_pools()
536 pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n"); in xive_native_setup_pools()
558 const __be32 *p; in xive_native_init() local
566 np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe"); in xive_native_init()
585 if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0) in xive_native_init()
586 max_prio = val - 1; in xive_native_init()
589 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) { in xive_native_init()
596 if (of_get_property(np, "single-escalation-support", NULL) != NULL) in xive_native_init()
630 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10)); in xive_native_init()
637 void *p; in xive_native_provision_pages() local
646 p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL); in xive_native_provision_pages()
647 if (!p) { in xive_native_provision_pages()
651 kmemleak_ignore(p); in xive_native_provision_pages()
652 opal_xive_donate_page(chip, __pa(p)); in xive_native_provision_pages()
662 order = fls(max_vcpus) - 1; in xive_native_alloc_vp_block()
666 pr_debug("VP block alloc, for max VCPUs %d use order %d\n", in xive_native_alloc_vp_block()
700 pr_warn("OPAL error %lld freeing VP block\n", rc); in xive_native_free_vp_block()
717 return rc ? -EIO : 0; in xive_native_enable_vp()
731 return rc ? -EIO : 0; in xive_native_disable_vp()
743 return -EIO; in xive_native_get_vp_info()
776 return -EIO; in xive_native_get_queue_info()
805 return -EIO; in xive_native_get_queue_state()
825 return -EIO; in xive_native_set_queue_state()
846 pr_err("OPAL failed to get vp state for VCPU %d : %lld\n", in xive_native_get_vp_state()
848 return -EIO; in xive_native_get_vp_state()