Lines Matching full:ipa
16 #include "ipa.h"
81 * IPA hardware as a number of KB. We don't use "hard byte in ipa_endpoint_validate_build()
102 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid_one() argument
107 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid_one()
183 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid() argument
187 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid()
217 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) in ipa_endpoint_data_valid()
225 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid() argument
237 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
247 * Note that suspend is not supported starting with IPA v4.0.
253 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl() local
258 /* Suspend is not supported for IPA v4.0+. Delay doesn't work in ipa_endpoint_init_ctrl()
259 * correctly on IPA v4.2. in ipa_endpoint_init_ctrl()
262 * assert(ipa->version != IPA_VERSION_4.2); in ipa_endpoint_init_ctrl()
264 * assert(ipa->version == IPA_VERSION_3_5_1); in ipa_endpoint_init_ctrl()
268 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
273 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
285 /* Delay mode doesn't work properly for IPA v4.2 */ in ipa_endpoint_program_delay()
286 if (endpoint->ipa->version != IPA_VERSION_4_2) in ipa_endpoint_program_delay()
293 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active() local
297 /* assert(mask & ipa->available); */ in ipa_endpoint_aggr_active()
298 offset = ipa_reg_state_aggr_active_offset(ipa->version); in ipa_endpoint_aggr_active()
299 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_aggr_active()
307 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close() local
309 /* assert(mask & ipa->available); */ in ipa_endpoint_force_close()
310 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); in ipa_endpoint_force_close()
317 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
319 * issue in IPA version 3.5.1 where the suspend interrupt will not be
324 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr() local
336 ipa_interrupt_simulate_suspend(ipa->interrupt); in ipa_endpoint_suspend_aggr()
345 if (endpoint->ipa->version != IPA_VERSION_3_5_1) in ipa_endpoint_program_suspend()
346 return enable; /* For IPA v4.0+, no change made */ in ipa_endpoint_program_suspend()
353 * generate a SUSPEND IPA interrupt. If enabling suspend, have in ipa_endpoint_program_suspend()
363 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) in ipa_endpoint_modem_pause_all() argument
367 /* DELAY mode doesn't work correctly on IPA v4.2 */ in ipa_endpoint_modem_pause_all()
368 if (ipa->version == IPA_VERSION_4_2) in ipa_endpoint_modem_pause_all()
372 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_pause_all()
386 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) in ipa_endpoint_modem_exception_reset_all() argument
388 u32 initialized = ipa->initialized; in ipa_endpoint_modem_exception_reset_all()
393 * bound on that by assuming all initialized endpoints are modem->IPA. in ipa_endpoint_modem_exception_reset_all()
398 trans = ipa_cmd_trans_alloc(ipa, count); in ipa_endpoint_modem_exception_reset_all()
400 dev_err(&ipa->pdev->dev, in ipa_endpoint_modem_exception_reset_all()
413 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
461 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_cfg()
470 * packet size field, and we have the IPA hardware populate both for each
502 /* Where IPA will write the metadata value */ in ipa_endpoint_init_hdr()
506 /* Where IPA will write the length */ in ipa_endpoint_init_hdr()
520 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr()
548 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_ext()
567 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
582 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
591 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_mode()
644 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_aggr()
648 * tick represents 128 cycles of the IPA core clock. Return the value
652 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) in ipa_reg_init_hol_block_timer_val() argument
665 rate = ipa_clock_rate(ipa); in ipa_reg_init_hol_block_timer_val()
670 /* IPA v3.5.1 just records the tick count */ in ipa_reg_init_hol_block_timer_val()
671 if (ipa->version == IPA_VERSION_3_5_1) in ipa_reg_init_hol_block_timer_val()
674 /* For IPA v4.2, the tick count is represented by base and in ipa_reg_init_hol_block_timer_val()
704 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer() local
710 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); in ipa_endpoint_init_hol_block_timer()
711 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_timer()
723 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_enable()
726 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) in ipa_endpoint_modem_hol_block_clear_all() argument
731 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; in ipa_endpoint_modem_hol_block_clear_all()
755 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_deaggr()
774 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_seq()
823 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status() local
836 status_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_status()
842 /* The next field is present for IPA v4.0 and above */ in ipa_endpoint_status()
846 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_status()
942 gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish()
950 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish_enable()
1086 dev_err(&endpoint->ipa->pdev->dev, in ipa_endpoint_status_parse()
1169 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release() local
1172 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1186 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) in ipa_endpoint_default_route_set() argument
1197 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); in ipa_endpoint_default_route_set()
1200 void ipa_endpoint_default_route_clear(struct ipa *ipa) in ipa_endpoint_default_route_clear() argument
1202 ipa_endpoint_default_route_set(ipa, 0); in ipa_endpoint_default_route_clear()
1211 * taken to ensure the IPA pipeline is properly cleared.
1217 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_reset_rx_aggr()
1218 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr() local
1219 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr()
1284 legacy = ipa->version == IPA_VERSION_3_5_1; in ipa_endpoint_reset_rx_aggr()
1306 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset() local
1311 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1315 * IPA v3.5.1 enables the doorbell engine. Newer versions do not. in ipa_endpoint_reset()
1317 legacy = ipa->version == IPA_VERSION_3_5_1; in ipa_endpoint_reset()
1322 gsi_channel_reset(&ipa->gsi, channel_id, legacy); in ipa_endpoint_reset()
1325 dev_err(&ipa->pdev->dev, in ipa_endpoint_reset()
1349 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one() local
1350 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one()
1355 dev_err(&ipa->pdev->dev, in ipa_endpoint_enable_one()
1363 ipa_interrupt_suspend_enable(ipa->interrupt, in ipa_endpoint_enable_one()
1368 ipa->enabled |= BIT(endpoint->endpoint_id); in ipa_endpoint_enable_one()
1376 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one() local
1377 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one()
1380 if (!(ipa->enabled & mask)) in ipa_endpoint_disable_one()
1383 ipa->enabled ^= mask; in ipa_endpoint_disable_one()
1387 ipa_interrupt_suspend_disable(ipa->interrupt, in ipa_endpoint_disable_one()
1394 dev_err(&ipa->pdev->dev, in ipa_endpoint_disable_one()
1401 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_suspend_one()
1402 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1406 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_suspend_one()
1414 /* IPA v3.5.1 doesn't use channel stop for suspend */ in ipa_endpoint_suspend_one()
1415 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_suspend_one()
1424 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_resume_one()
1425 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1429 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_resume_one()
1435 /* IPA v3.5.1 doesn't use channel start for resume */ in ipa_endpoint_resume_one()
1436 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_resume_one()
1445 void ipa_endpoint_suspend(struct ipa *ipa) in ipa_endpoint_suspend() argument
1447 if (!ipa->setup_complete) in ipa_endpoint_suspend()
1450 if (ipa->modem_netdev) in ipa_endpoint_suspend()
1451 ipa_modem_suspend(ipa->modem_netdev); in ipa_endpoint_suspend()
1453 ipa_cmd_tag_process(ipa); in ipa_endpoint_suspend()
1455 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_suspend()
1456 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_suspend()
1459 void ipa_endpoint_resume(struct ipa *ipa) in ipa_endpoint_resume() argument
1461 if (!ipa->setup_complete) in ipa_endpoint_resume()
1464 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_resume()
1465 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_resume()
1467 if (ipa->modem_netdev) in ipa_endpoint_resume()
1468 ipa_modem_resume(ipa->modem_netdev); in ipa_endpoint_resume()
1473 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1496 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); in ipa_endpoint_setup_one()
1501 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_teardown_one()
1509 void ipa_endpoint_setup(struct ipa *ipa) in ipa_endpoint_setup() argument
1511 u32 initialized = ipa->initialized; in ipa_endpoint_setup()
1513 ipa->set_up = 0; in ipa_endpoint_setup()
1519 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1523 void ipa_endpoint_teardown(struct ipa *ipa) in ipa_endpoint_teardown() argument
1525 u32 set_up = ipa->set_up; in ipa_endpoint_teardown()
1532 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1534 ipa->set_up = 0; in ipa_endpoint_teardown()
1537 int ipa_endpoint_config(struct ipa *ipa) in ipa_endpoint_config() argument
1539 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_config()
1551 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); in ipa_endpoint_config()
1553 /* Our RX is an IPA producer */ in ipa_endpoint_config()
1563 /* Our TX is an IPA consumer */ in ipa_endpoint_config()
1567 ipa->available = rx_mask | tx_mask; in ipa_endpoint_config()
1570 if (ipa->initialized & ~ipa->available) { in ipa_endpoint_config()
1572 ipa->initialized & ~ipa->available); in ipa_endpoint_config()
1576 initialized = ipa->initialized; in ipa_endpoint_config()
1584 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
1595 void ipa_endpoint_deconfig(struct ipa *ipa) in ipa_endpoint_deconfig() argument
1597 ipa->available = 0; /* Nothing more to do */ in ipa_endpoint_deconfig()
1600 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, in ipa_endpoint_init_one() argument
1605 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
1608 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
1609 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
1611 endpoint->ipa = ipa; in ipa_endpoint_init_one()
1619 ipa->initialized |= BIT(endpoint->endpoint_id); in ipa_endpoint_init_one()
1624 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_exit_one()
1629 void ipa_endpoint_exit(struct ipa *ipa) in ipa_endpoint_exit() argument
1631 u32 initialized = ipa->initialized; in ipa_endpoint_exit()
1638 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
1640 memset(ipa->name_map, 0, sizeof(ipa->name_map)); in ipa_endpoint_exit()
1641 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); in ipa_endpoint_exit()
1645 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, in ipa_endpoint_init() argument
1651 if (!ipa_endpoint_data_valid(ipa, count, data)) in ipa_endpoint_init()
1654 ipa->initialized = 0; in ipa_endpoint_init()
1661 ipa_endpoint_init_one(ipa, name, data); in ipa_endpoint_init()
1667 if (!ipa_filter_map_valid(ipa, filter_map)) in ipa_endpoint_init()
1673 ipa_endpoint_exit(ipa); in ipa_endpoint_init()