/drivers/net/ipa/ |
D | gsi.c | 175 return channel - &channel->gsi->channel[0]; in gsi_channel_id() 181 return !!channel->gsi; in gsi_channel_initialized() 185 static void gsi_irq_type_update(struct gsi *gsi, u32 val) in gsi_irq_type_update() argument 187 gsi->type_enabled_bitmap = val; in gsi_irq_type_update() 188 iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); in gsi_irq_type_update() 191 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) in gsi_irq_type_enable() argument 193 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id)); in gsi_irq_type_enable() 196 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) in gsi_irq_type_disable() argument 198 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id)); in gsi_irq_type_disable() 206 static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) in gsi_irq_ev_ctrl_enable() argument [all …]
|
D | gsi.h | 29 struct gsi; 109 struct gsi *gsi; member 148 struct gsi { struct 177 int gsi_setup(struct gsi *gsi); 183 void gsi_teardown(struct gsi *gsi); 192 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id); 201 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id); 210 int gsi_channel_start(struct gsi *gsi, u32 channel_id); 219 int gsi_channel_stop(struct gsi *gsi, u32 channel_id); 233 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell); [all …]
|
D | ipa_gsi.c | 17 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_gsi_trans_complete() 24 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_gsi_trans_release() 29 void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count, in ipa_gsi_channel_tx_queued() argument 32 struct ipa *ipa = container_of(gsi, struct ipa, gsi); in ipa_gsi_channel_tx_queued() 40 void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count, in ipa_gsi_channel_tx_completed() argument 43 struct ipa *ipa = container_of(gsi, struct ipa, gsi); in ipa_gsi_channel_tx_completed()
|
D | gsi_trans.h | 21 struct gsi; 51 struct gsi *gsi; member 142 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, 216 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr); 226 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id);
|
D | gsi_trans.c | 259 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; in gsi_trans_move_pending() 274 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; in gsi_trans_move_complete() 290 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; in gsi_trans_move_polled() 324 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, in gsi_channel_trans_alloc() argument 328 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_trans_alloc() 332 if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id))) in gsi_channel_trans_alloc() 345 trans->gsi = gsi; in gsi_channel_trans_alloc() 378 trans_info = &trans->gsi->channel[trans->channel_id].trans_info; in gsi_trans_free() 448 ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction); in gsi_trans_page_add() 475 ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction); in gsi_trans_skb_add() [all …]
|
D | ipa_cmd.c | 352 struct device *dev = channel->gsi->dev; in ipa_cmd_pool_init() 381 struct device *dev = channel->gsi->dev; in ipa_cmd_pool_exit() 394 trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info; in ipa_cmd_payload_alloc() 405 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_table_init_add() 444 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_hdr_init_local_add() 474 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_register_write_add() 531 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_ip_packet_init_add() 552 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_dma_shared_mem_add() 595 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_ip_tag_status_add() 614 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_transfer_add() [all …]
|
D | ipa_endpoint.c | 230 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc() local 236 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); in ipa_endpoint_trans_alloc() 1072 struct gsi *gsi; in ipa_endpoint_replenish() local 1113 gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish() 1114 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) in ipa_endpoint_replenish() 1121 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish_enable() local 1130 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); in ipa_endpoint_replenish_enable() 1429 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr() local 1455 gsi_channel_reset(gsi, endpoint->channel_id, false); in ipa_endpoint_reset_rx_aggr() 1461 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr() [all …]
|
D | ipa_gsi.h | 11 struct gsi; 44 void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count, 57 void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
|
D | gsi_private.h | 83 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id);
|
D | ipa.h | 74 struct gsi gsi; member
|
D | ipa_main.c | 102 ret = gsi_setup(&ipa->gsi); in ipa_setup() 158 gsi_teardown(&ipa->gsi); in ipa_setup() 183 gsi_teardown(&ipa->gsi); in ipa_teardown() 720 ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count, in ipa_probe() 785 gsi_exit(&ipa->gsi); in ipa_probe() 833 gsi_exit(&ipa->gsi); in ipa_remove()
|
D | ipa_table.c | 243 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_table_reset_add() 415 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_table_init_add()
|
D | Makefile | 4 ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \
|
/drivers/infiniband/hw/mlx5/ |
D | gsi.c | 49 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in generate_completions() local 54 for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; in generate_completions() 56 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr]; in generate_completions() 65 gsi->outstanding_ci = index; in generate_completions() 70 struct mlx5_ib_gsi_qp *gsi = cq->cq_context; in handle_single_completion() local 73 struct mlx5_ib_qp *mqp = container_of(gsi, struct mlx5_ib_qp, gsi); in handle_single_completion() 77 spin_lock_irqsave(&gsi->lock, flags); in handle_single_completion() 85 spin_unlock_irqrestore(&gsi->lock, flags); in handle_single_completion() 92 struct mlx5_ib_gsi_qp *gsi; in mlx5_ib_create_gsi() local 106 gsi = &mqp->gsi; in mlx5_ib_create_gsi() [all …]
|
D | Makefile | 11 gsi.o \
|
/drivers/perf/ |
D | arm_pmu_acpi.c | 24 int gsi, trigger; in arm_pmu_acpi_register_irq() local 28 gsi = gicc->performance_interrupt; in arm_pmu_acpi_register_irq() 37 if (!gsi) in arm_pmu_acpi_register_irq() 56 return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH); in arm_pmu_acpi_register_irq() 62 int gsi; in arm_pmu_acpi_unregister_irq() local 66 gsi = gicc->performance_interrupt; in arm_pmu_acpi_unregister_irq() 67 if (gsi) in arm_pmu_acpi_unregister_irq() 68 acpi_unregister_gsi(gsi); in arm_pmu_acpi_unregister_irq() 95 u16 gsi = 0; in arm_spe_acpi_register_device() local 109 gsi = gicc->spe_interrupt; in arm_spe_acpi_register_device() [all …]
|
/drivers/acpi/ |
D | evged.c | 51 unsigned int gsi; member 61 acpi_ret = acpi_execute_simple_method(event->handle, NULL, event->gsi); in acpi_ged_irq_handler() 73 unsigned int gsi; in acpi_ged_request_interrupt() local 93 gsi = p->interrupts[0]; in acpi_ged_request_interrupt() 96 gsi = pext->interrupts[0]; in acpi_ged_request_interrupt() 102 switch (gsi) { in acpi_ged_request_interrupt() 105 trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi); in acpi_ged_request_interrupt() 122 event->gsi = gsi; in acpi_ged_request_interrupt() 136 dev_dbg(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq); in acpi_ged_request_interrupt() 172 event->gsi, event->irq); in ged_shutdown()
|
D | pci_irq.c | 386 int gsi; in acpi_pci_irq_enable() local 424 gsi = acpi_pci_link_allocate_irq(entry->link, in acpi_pci_irq_enable() 429 gsi = entry->index; in acpi_pci_irq_enable() 431 gsi = -1; in acpi_pci_irq_enable() 433 if (gsi < 0) { in acpi_pci_irq_enable() 451 rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); in acpi_pci_irq_enable() 467 pin_name(pin), link_desc, gsi, in acpi_pci_irq_enable() 478 int gsi; in acpi_pci_irq_disable() local 498 gsi = acpi_pci_link_free_irq(entry->link); in acpi_pci_irq_disable() 500 gsi = entry->index; in acpi_pci_irq_disable() [all …]
|
D | irq.c | 27 int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) in acpi_gsi_to_irq() argument 32 *irq = irq_find_mapping(d, gsi); in acpi_gsi_to_irq() 51 int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, in acpi_register_gsi() argument 63 fwspec.param[0] = gsi; in acpi_register_gsi() 79 void acpi_unregister_gsi(u32 gsi) in acpi_unregister_gsi() argument 85 if (WARN_ON(acpi_irq_model == ACPI_IRQ_MODEL_GIC && gsi < 16)) in acpi_unregister_gsi() 88 irq = irq_find_mapping(d, gsi); in acpi_unregister_gsi()
|
D | resource.c | 536 static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, in acpi_dev_irq_override() argument 545 entry->irq == gsi && in acpi_dev_irq_override() 565 static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, in acpi_dev_get_irqresource() argument 571 if (!valid_IRQ(gsi)) { in acpi_dev_get_irqresource() 572 irqresource_disabled(res, gsi); in acpi_dev_get_irqresource() 587 acpi_dev_irq_override(gsi, triggering, polarity, shareable) && in acpi_dev_get_irqresource() 588 !acpi_get_override_irq(gsi, &t, &p)) { in acpi_dev_get_irqresource() 593 pr_warn("ACPI: IRQ %d override to %s, %s\n", gsi, in acpi_dev_get_irqresource() 601 irq = acpi_register_gsi(NULL, gsi, triggering, polarity); in acpi_dev_get_irqresource() 606 irqresource_disabled(res, gsi); in acpi_dev_get_irqresource()
|
D | osl.c | 562 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, in acpi_os_install_interrupt_handler() argument 573 if (gsi != acpi_gbl_FADT.sci_interrupt) in acpi_os_install_interrupt_handler() 579 if (acpi_gsi_to_irq(gsi, &irq) < 0) { in acpi_os_install_interrupt_handler() 580 pr_err("SCI (ACPI GSI %d) not registered\n", gsi); in acpi_os_install_interrupt_handler() 596 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler) in acpi_os_remove_interrupt_handler() argument 598 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid()) in acpi_os_remove_interrupt_handler()
|
/drivers/platform/x86/ |
D | intel_scu_wdt.c | 31 int gsi = TANGIER_EXT_TIMER0_MSI; in tangier_probe() local 39 irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); in tangier_probe() 41 dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n", gsi); in tangier_probe()
|
/drivers/xen/events/ |
D | events_base.c | 122 unsigned short gsi; member 388 unsigned gsi, in xen_irq_info_pirq_setup() argument 395 info->u.pirq.gsi = gsi; in xen_irq_info_pirq_setup() 776 static int __must_check xen_allocate_irq_gsi(unsigned gsi) in xen_allocate_irq_gsi() argument 790 if (gsi < nr_legacy_irqs()) in xen_allocate_irq_gsi() 791 irq = gsi; in xen_allocate_irq_gsi() 793 irq = irq_alloc_desc_at(gsi, -1); in xen_allocate_irq_gsi() 953 int xen_irq_from_gsi(unsigned gsi) in xen_irq_from_gsi() argument 961 if (info->u.pirq.gsi == gsi) in xen_irq_from_gsi() 1018 int xen_bind_pirq_gsi_to_irq(unsigned gsi, in xen_bind_pirq_gsi_to_irq() argument [all …]
|
/drivers/firmware/ |
D | pcdp.h | 48 u32 gsi; member
|
/drivers/char/ |
D | hpet.c | 201 int irq, gsi; in hpet_timer_set_irq() local 238 gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE, in hpet_timer_set_irq() 240 if (gsi > 0) in hpet_timer_set_irq() 251 devp->hd_hdwirq = gsi; in hpet_timer_set_irq()
|