Home
last modified time | relevance | path

Searched refs:gsi (Results 1 – 25 of 36) sorted by relevance

12

/drivers/net/ipa/
Dgsi.c171 return channel - &channel->gsi->channel[0]; in gsi_channel_id()
177 return !!channel->gsi; in gsi_channel_initialized()
197 static void gsi_irq_type_update(struct gsi *gsi, u32 val) in gsi_irq_type_update() argument
199 const struct reg *reg = gsi_reg(gsi, CNTXT_TYPE_IRQ_MSK); in gsi_irq_type_update()
201 gsi->type_enabled_bitmap = val; in gsi_irq_type_update()
202 iowrite32(val, gsi->virt + reg_offset(reg)); in gsi_irq_type_update()
205 static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) in gsi_irq_type_enable() argument
207 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | type_id); in gsi_irq_type_enable()
210 static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) in gsi_irq_type_disable() argument
212 gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~type_id); in gsi_irq_type_disable()
[all …]
Dgsi.h29 struct gsi;
104 struct gsi *gsi; member
140 struct gsi { struct
170 int gsi_setup(struct gsi *gsi);
176 void gsi_teardown(struct gsi *gsi);
185 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
194 int gsi_channel_start(struct gsi *gsi, u32 channel_id);
203 int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
211 void gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id,
226 void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell);
[all …]
Dgsi_reg.c13 static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id) in gsi_reg_id_valid() argument
18 return gsi->version >= IPA_VERSION_3_5; in gsi_reg_id_valid()
21 return gsi->version >= IPA_VERSION_3_5_1; in gsi_reg_id_valid()
24 return gsi->version >= IPA_VERSION_5_0; in gsi_reg_id_valid()
82 const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id) in gsi_reg() argument
84 if (WARN(!gsi_reg_id_valid(gsi, reg_id), "invalid reg %u\n", reg_id)) in gsi_reg()
87 return reg(gsi->regs, reg_id); in gsi_reg()
90 static const struct regs *gsi_regs(struct gsi *gsi) in gsi_regs() argument
92 switch (gsi->version) { in gsi_regs()
121 int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev) in gsi_reg_init() argument
[all …]
Dipa_gsi.c17 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_gsi_trans_complete()
24 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_gsi_trans_release()
29 void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count, in ipa_gsi_channel_tx_queued() argument
32 struct ipa *ipa = container_of(gsi, struct ipa, gsi); in ipa_gsi_channel_tx_queued()
40 void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count, in ipa_gsi_channel_tx_completed() argument
43 struct ipa *ipa = container_of(gsi, struct ipa, gsi); in ipa_gsi_channel_tx_completed()
Dgsi_trans.h21 struct gsi;
52 struct gsi *gsi; member
143 bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id);
155 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
218 int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr);
228 void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id);
Dgsi_trans.c221 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; in gsi_trans_map()
256 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; in gsi_trans_move_committed()
266 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; in gsi_trans_move_pending()
279 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; in gsi_trans_move_complete()
293 struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id]; in gsi_trans_move_polled()
324 bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id) in gsi_channel_trans_idle() argument
326 u32 tre_max = gsi_channel_tre_max(gsi, channel_id); in gsi_channel_trans_idle()
329 trans_info = &gsi->channel[channel_id].trans_info; in gsi_channel_trans_idle()
335 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, in gsi_channel_trans_alloc() argument
339 struct gsi_channel *channel = &gsi->channel[channel_id]; in gsi_channel_trans_alloc()
[all …]
Dipa_cmd.c328 struct device *dev = channel->gsi->dev; in ipa_cmd_pool_init()
342 struct device *dev = channel->gsi->dev; in ipa_cmd_pool_exit()
354 trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info; in ipa_cmd_payload_alloc()
365 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_table_init_add()
403 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_hdr_init_local_add()
432 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_register_write_add()
489 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_ip_packet_init_add()
514 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_dma_shared_mem_add()
554 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_ip_tag_status_add()
572 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_cmd_transfer_add()
[all …]
Dipa_gsi.h11 struct gsi;
44 void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
57 void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
Dgsi_reg.h15 struct gsi;
365 const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id);
375 int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev);
381 void gsi_reg_exit(struct gsi *gsi);
Dipa_endpoint.c433 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc() local
439 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction); in ipa_endpoint_trans_alloc()
590 gsi_modem_channel_flow_control(&ipa->gsi, in ipa_endpoint_modem_pause_all()
1372 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
1382 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) in ipa_endpoint_replenish_enable()
1671 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr() local
1697 gsi_channel_reset(gsi, endpoint->channel_id, false); in ipa_endpoint_reset_rx_aggr()
1703 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1707 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); in ipa_endpoint_reset_rx_aggr()
1724 gsi_trans_read_byte_done(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
[all …]
Dgsi_private.h80 int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id);
Dipa.h80 struct gsi gsi; member
Dipa_main.c119 ret = gsi_setup(&ipa->gsi); in ipa_setup()
175 gsi_teardown(&ipa->gsi); in ipa_setup()
200 gsi_teardown(&ipa->gsi); in ipa_teardown()
864 ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count, in ipa_probe()
926 gsi_exit(&ipa->gsi); in ipa_probe()
974 gsi_exit(&ipa->gsi); in ipa_remove()
Dipa_table.c205 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_table_reset_add()
387 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_table_init_add()
/drivers/infiniband/hw/mlx5/
Dgsi.c49 struct mlx5_ib_gsi_qp *gsi = &mqp->gsi; in generate_completions() local
54 for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; in generate_completions()
56 wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr]; in generate_completions()
65 gsi->outstanding_ci = index; in generate_completions()
70 struct mlx5_ib_gsi_qp *gsi = cq->cq_context; in handle_single_completion() local
73 struct mlx5_ib_qp *mqp = container_of(gsi, struct mlx5_ib_qp, gsi); in handle_single_completion()
77 spin_lock_irqsave(&gsi->lock, flags); in handle_single_completion()
85 spin_unlock_irqrestore(&gsi->lock, flags); in handle_single_completion()
92 struct mlx5_ib_gsi_qp *gsi; in mlx5_ib_create_gsi() local
106 gsi = &mqp->gsi; in mlx5_ib_create_gsi()
[all …]
/drivers/perf/
Darm_pmu_acpi.c25 int gsi, trigger; in arm_pmu_acpi_register_irq() local
29 gsi = gicc->performance_interrupt; in arm_pmu_acpi_register_irq()
38 if (!gsi) in arm_pmu_acpi_register_irq()
57 return acpi_register_gsi(NULL, gsi, trigger, ACPI_ACTIVE_HIGH); in arm_pmu_acpi_register_irq()
63 int gsi; in arm_pmu_acpi_unregister_irq() local
67 gsi = gicc->performance_interrupt; in arm_pmu_acpi_unregister_irq()
68 if (gsi) in arm_pmu_acpi_unregister_irq()
69 acpi_unregister_gsi(gsi); in arm_pmu_acpi_unregister_irq()
77 u16 this_gsi = 0, gsi = 0; in arm_acpi_register_pmu_device() local
98 return gsi ? -ENXIO : 0; in arm_acpi_register_pmu_device()
[all …]
/drivers/acpi/
Dirq.c15 static struct fwnode_handle *(*acpi_get_gsi_domain_id)(u32 gsi);
16 static u32 (*acpi_gsi_to_irq_fallback)(u32 gsi);
28 int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) in acpi_gsi_to_irq() argument
32 d = irq_find_matching_fwnode(acpi_get_gsi_domain_id(gsi), in acpi_gsi_to_irq()
34 *irq = irq_find_mapping(d, gsi); in acpi_gsi_to_irq()
40 *irq = acpi_gsi_to_irq_fallback(gsi); in acpi_gsi_to_irq()
56 int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, in acpi_register_gsi() argument
62 fwspec.fwnode = acpi_get_gsi_domain_id(gsi); in acpi_register_gsi()
68 fwspec.param[0] = gsi; in acpi_register_gsi()
84 void acpi_unregister_gsi(u32 gsi) in acpi_unregister_gsi() argument
[all …]
Devged.c51 unsigned int gsi; member
61 acpi_ret = acpi_execute_simple_method(event->handle, NULL, event->gsi); in acpi_ged_irq_handler()
73 unsigned int gsi; in acpi_ged_request_interrupt() local
93 gsi = p->interrupts[0]; in acpi_ged_request_interrupt()
96 gsi = pext->interrupts[0]; in acpi_ged_request_interrupt()
102 switch (gsi) { in acpi_ged_request_interrupt()
105 trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi); in acpi_ged_request_interrupt()
122 event->gsi = gsi; in acpi_ged_request_interrupt()
136 dev_dbg(dev, "GED listening GSI %u @ IRQ %u\n", gsi, irq); in acpi_ged_request_interrupt()
172 event->gsi, event->irq); in ged_shutdown()
Dpci_irq.c386 int gsi; in acpi_pci_irq_enable() local
426 gsi = acpi_pci_link_allocate_irq(entry->link, in acpi_pci_irq_enable()
431 gsi = entry->index; in acpi_pci_irq_enable()
433 gsi = -1; in acpi_pci_irq_enable()
435 if (gsi < 0) { in acpi_pci_irq_enable()
453 rc = acpi_register_gsi(&dev->dev, gsi, triggering, polarity); in acpi_pci_irq_enable()
469 pin_name(pin), link_desc, gsi, in acpi_pci_irq_enable()
480 int gsi; in acpi_pci_irq_disable() local
500 gsi = acpi_pci_link_free_irq(entry->link); in acpi_pci_irq_disable()
502 gsi = entry->index; in acpi_pci_irq_disable()
[all …]
Dresource.c630 static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, in acpi_dev_irq_override() argument
639 entry->irq == gsi && in acpi_dev_irq_override()
652 if (gsi != 1 && gsi != 12) in acpi_dev_irq_override()
656 if (acpi_int_src_ovr[gsi]) in acpi_dev_irq_override()
671 static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, in acpi_dev_get_irqresource() argument
677 if (!valid_IRQ(gsi)) { in acpi_dev_get_irqresource()
678 irqresource_disabled(res, gsi); in acpi_dev_get_irqresource()
693 acpi_dev_irq_override(gsi, triggering, polarity, shareable) && in acpi_dev_get_irqresource()
694 !acpi_get_override_irq(gsi, &t, &p)) { in acpi_dev_get_irqresource()
699 pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi, in acpi_dev_get_irqresource()
[all …]
/drivers/virt/geniezone/
Dgzvm_irqfd.c13 unsigned int gsi; member
31 int gsi; member
131 irqfd_set_irq(gzvm, irqfd->gsi, 1); in irqfd_wakeup()
174 irqfd->gsi = args->gsi; in gzvm_irqfd_assign()
207 pr_err("already used: gsi=%d fd=%d\n", args->gsi, args->fd); in gzvm_irqfd_assign()
241 static void gzvm_notify_acked_gsi(struct gzvm *gzvm, int gsi) in gzvm_notify_acked_gsi() argument
247 if (gian->gsi == gsi) in gzvm_notify_acked_gsi()
251 void gzvm_notify_acked_irq(struct gzvm *gzvm, unsigned int gsi) in gzvm_notify_acked_irq() argument
256 gzvm_notify_acked_gsi(gzvm, gsi); in gzvm_notify_acked_irq()
281 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) in gzvm_irqfd_deassign()
/drivers/irqchip/
Dirq-loongarch-cpu.c19 static u32 lpic_gsi_to_irq(u32 gsi) in lpic_gsi_to_irq() argument
24 if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ) in lpic_gsi_to_irq()
25 irq = acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH); in lpic_gsi_to_irq()
30 static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi) in lpic_get_gsi_domain_id() argument
35 switch (gsi) { in lpic_get_gsi_domain_id()
47 id = find_pch_pic(gsi); in lpic_get_gsi_domain_id()
Dirq-loongson-pch-pic.c363 int find_pch_pic(u32 gsi) in find_pch_pic() argument
374 if (gsi >= priv->gsi_base && gsi < (priv->gsi_base + priv->vec_count)) in find_pch_pic()
378 pr_err("ERROR: Unable to locate PCH_PIC for GSI %d\n", gsi); in find_pch_pic()
/drivers/platform/x86/
Dintel_scu_wdt.c31 int gsi = TANGIER_EXT_TIMER0_MSI; in tangier_probe() local
39 irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); in tangier_probe()
41 dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n", gsi); in tangier_probe()
/drivers/xen/events/
Devents_base.c124 unsigned short gsi; member
389 unsigned int pirq, unsigned int gsi, in xen_irq_info_pirq_setup() argument
393 info->u.pirq.gsi = gsi; in xen_irq_info_pirq_setup()
756 static struct irq_info *xen_allocate_irq_gsi(unsigned int gsi) in xen_allocate_irq_gsi() argument
771 if (gsi < nr_legacy_irqs()) in xen_allocate_irq_gsi()
772 irq = gsi; in xen_allocate_irq_gsi()
774 irq = irq_alloc_desc_at(gsi, -1); in xen_allocate_irq_gsi()
925 int xen_irq_from_gsi(unsigned gsi) in xen_irq_from_gsi() argument
933 if (info->u.pirq.gsi == gsi) in xen_irq_from_gsi()
1002 int xen_bind_pirq_gsi_to_irq(unsigned gsi, in xen_bind_pirq_gsi_to_irq() argument
[all …]

12