Lines Matching full:ipa
16 #include "ipa.h"
81 * IPA hardware as a number of KB. We don't use "hard byte in ipa_endpoint_validate_build()
102 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid_one() argument
107 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid_one()
183 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid() argument
187 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_data_valid()
217 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp)) in ipa_endpoint_data_valid()
225 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, in ipa_endpoint_data_valid() argument
237 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
247 * Note that suspend is not supported starting with IPA v4.0.
253 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl() local
258 /* Suspend is not supported for IPA v4.0+. Delay doesn't work in ipa_endpoint_init_ctrl()
259 * correctly on IPA v4.2. in ipa_endpoint_init_ctrl()
262 * assert(ipa->version != IPA_VERSION_4.2); in ipa_endpoint_init_ctrl()
264 * assert(ipa->version == IPA_VERSION_3_5_1); in ipa_endpoint_init_ctrl()
268 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
273 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_ctrl()
285 /* Delay mode doesn't work properly for IPA v4.2 */ in ipa_endpoint_program_delay()
286 if (endpoint->ipa->version != IPA_VERSION_4_2) in ipa_endpoint_program_delay()
293 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active() local
297 /* assert(mask & ipa->available); */ in ipa_endpoint_aggr_active()
298 offset = ipa_reg_state_aggr_active_offset(ipa->version); in ipa_endpoint_aggr_active()
299 val = ioread32(ipa->reg_virt + offset); in ipa_endpoint_aggr_active()
307 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close() local
309 /* assert(mask & ipa->available); */ in ipa_endpoint_force_close()
310 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); in ipa_endpoint_force_close()
317 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
319 * issue in IPA version 3.5.1 where the suspend interrupt will not be
324 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr() local
336 ipa_interrupt_simulate_suspend(ipa->interrupt); in ipa_endpoint_suspend_aggr()
345 if (endpoint->ipa->version != IPA_VERSION_3_5_1) in ipa_endpoint_program_suspend()
346 return enable; /* For IPA v4.0+, no change made */ in ipa_endpoint_program_suspend()
353 * generate a SUSPEND IPA interrupt. If enabling suspend, have in ipa_endpoint_program_suspend()
363 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) in ipa_endpoint_modem_pause_all() argument
367 /* DELAY mode doesn't work correctly on IPA v4.2 */ in ipa_endpoint_modem_pause_all()
368 if (ipa->version == IPA_VERSION_4_2) in ipa_endpoint_modem_pause_all()
372 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_pause_all()
386 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) in ipa_endpoint_modem_exception_reset_all() argument
388 u32 initialized = ipa->initialized; in ipa_endpoint_modem_exception_reset_all()
393 * bound on that by assuming all initialized endpoints are modem->IPA. in ipa_endpoint_modem_exception_reset_all()
398 trans = ipa_cmd_trans_alloc(ipa, count); in ipa_endpoint_modem_exception_reset_all()
400 dev_err(&ipa->pdev->dev, in ipa_endpoint_modem_exception_reset_all()
413 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
461 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_cfg()
470 * packet size field, and we have the IPA hardware populate both for each
502 /* Where IPA will write the metadata value */ in ipa_endpoint_init_hdr()
506 /* Where IPA will write the length */ in ipa_endpoint_init_hdr()
520 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr()
548 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_ext()
567 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
582 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
591 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_mode()
642 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_aggr()
646 * tick represents 128 cycles of the IPA core clock. Return the value
650 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) in ipa_reg_init_hol_block_timer_val() argument
663 rate = ipa_clock_rate(ipa); in ipa_reg_init_hol_block_timer_val()
668 /* IPA v3.5.1 just records the tick count */ in ipa_reg_init_hol_block_timer_val()
669 if (ipa->version == IPA_VERSION_3_5_1) in ipa_reg_init_hol_block_timer_val()
672 /* For IPA v4.2, the tick count is represented by base and in ipa_reg_init_hol_block_timer_val()
702 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer() local
708 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); in ipa_endpoint_init_hol_block_timer()
709 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_timer()
721 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_enable()
724 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) in ipa_endpoint_modem_hol_block_clear_all() argument
729 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; in ipa_endpoint_modem_hol_block_clear_all()
753 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_deaggr()
772 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_seq()
821 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status() local
834 status_endpoint_id = ipa->name_map[name]->endpoint_id; in ipa_endpoint_status()
840 /* The next field is present for IPA v4.0 and above */ in ipa_endpoint_status()
844 iowrite32(val, ipa->reg_virt + offset); in ipa_endpoint_status()
940 gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish()
948 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish_enable()
1084 dev_err(&endpoint->ipa->pdev->dev, in ipa_endpoint_status_parse()
1167 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release() local
1170 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1184 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) in ipa_endpoint_default_route_set() argument
1195 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); in ipa_endpoint_default_route_set()
1198 void ipa_endpoint_default_route_clear(struct ipa *ipa) in ipa_endpoint_default_route_clear() argument
1200 ipa_endpoint_default_route_set(ipa, 0); in ipa_endpoint_default_route_clear()
1209 * taken to ensure the IPA pipeline is properly cleared.
1215 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_reset_rx_aggr()
1216 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr() local
1217 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_reset_rx_aggr()
1282 legacy = ipa->version == IPA_VERSION_3_5_1; in ipa_endpoint_reset_rx_aggr()
1304 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset() local
1309 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1313 * IPA v3.5.1 enables the doorbell engine. Newer versions do not. in ipa_endpoint_reset()
1315 legacy = ipa->version == IPA_VERSION_3_5_1; in ipa_endpoint_reset()
1320 gsi_channel_reset(&ipa->gsi, channel_id, legacy); in ipa_endpoint_reset()
1323 dev_err(&ipa->pdev->dev, in ipa_endpoint_reset()
1347 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one() local
1348 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_enable_one()
1353 dev_err(&ipa->pdev->dev, in ipa_endpoint_enable_one()
1361 ipa_interrupt_suspend_enable(ipa->interrupt, in ipa_endpoint_enable_one()
1366 ipa->enabled |= BIT(endpoint->endpoint_id); in ipa_endpoint_enable_one()
1374 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one() local
1375 struct gsi *gsi = &ipa->gsi; in ipa_endpoint_disable_one()
1378 if (!(ipa->enabled & mask)) in ipa_endpoint_disable_one()
1381 ipa->enabled ^= mask; in ipa_endpoint_disable_one()
1385 ipa_interrupt_suspend_disable(ipa->interrupt, in ipa_endpoint_disable_one()
1392 dev_err(&ipa->pdev->dev, in ipa_endpoint_disable_one()
1399 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_suspend_one()
1400 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1404 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_suspend_one()
1412 /* IPA v3.5.1 doesn't use channel stop for suspend */ in ipa_endpoint_suspend_one()
1413 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_suspend_one()
1422 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_resume_one()
1423 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1427 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_resume_one()
1433 /* IPA v3.5.1 doesn't use channel start for resume */ in ipa_endpoint_resume_one()
1434 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_resume_one()
1443 void ipa_endpoint_suspend(struct ipa *ipa) in ipa_endpoint_suspend() argument
1445 if (!ipa->setup_complete) in ipa_endpoint_suspend()
1448 if (ipa->modem_netdev) in ipa_endpoint_suspend()
1449 ipa_modem_suspend(ipa->modem_netdev); in ipa_endpoint_suspend()
1451 ipa_cmd_tag_process(ipa); in ipa_endpoint_suspend()
1453 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_suspend()
1454 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_suspend()
1457 void ipa_endpoint_resume(struct ipa *ipa) in ipa_endpoint_resume() argument
1459 if (!ipa->setup_complete) in ipa_endpoint_resume()
1462 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]); in ipa_endpoint_resume()
1463 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]); in ipa_endpoint_resume()
1465 if (ipa->modem_netdev) in ipa_endpoint_resume()
1466 ipa_modem_resume(ipa->modem_netdev); in ipa_endpoint_resume()
1471 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1494 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); in ipa_endpoint_setup_one()
1499 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_teardown_one()
1507 void ipa_endpoint_setup(struct ipa *ipa) in ipa_endpoint_setup() argument
1509 u32 initialized = ipa->initialized; in ipa_endpoint_setup()
1511 ipa->set_up = 0; in ipa_endpoint_setup()
1517 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1521 void ipa_endpoint_teardown(struct ipa *ipa) in ipa_endpoint_teardown() argument
1523 u32 set_up = ipa->set_up; in ipa_endpoint_teardown()
1530 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1532 ipa->set_up = 0; in ipa_endpoint_teardown()
1535 int ipa_endpoint_config(struct ipa *ipa) in ipa_endpoint_config() argument
1537 struct device *dev = &ipa->pdev->dev; in ipa_endpoint_config()
1549 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); in ipa_endpoint_config()
1551 /* Our RX is an IPA producer */ in ipa_endpoint_config()
1561 /* Our TX is an IPA consumer */ in ipa_endpoint_config()
1565 ipa->available = rx_mask | tx_mask; in ipa_endpoint_config()
1568 if (ipa->initialized & ~ipa->available) { in ipa_endpoint_config()
1570 ipa->initialized & ~ipa->available); in ipa_endpoint_config()
1574 initialized = ipa->initialized; in ipa_endpoint_config()
1582 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
1593 void ipa_endpoint_deconfig(struct ipa *ipa) in ipa_endpoint_deconfig() argument
1595 ipa->available = 0; /* Nothing more to do */ in ipa_endpoint_deconfig()
1598 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, in ipa_endpoint_init_one() argument
1603 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
1606 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
1607 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
1609 endpoint->ipa = ipa; in ipa_endpoint_init_one()
1617 ipa->initialized |= BIT(endpoint->endpoint_id); in ipa_endpoint_init_one()
1622 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_exit_one()
1627 void ipa_endpoint_exit(struct ipa *ipa) in ipa_endpoint_exit() argument
1629 u32 initialized = ipa->initialized; in ipa_endpoint_exit()
1636 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
1638 memset(ipa->name_map, 0, sizeof(ipa->name_map)); in ipa_endpoint_exit()
1639 memset(ipa->channel_map, 0, sizeof(ipa->channel_map)); in ipa_endpoint_exit()
1643 u32 ipa_endpoint_init(struct ipa *ipa, u32 count, in ipa_endpoint_init() argument
1649 if (!ipa_endpoint_data_valid(ipa, count, data)) in ipa_endpoint_init()
1652 ipa->initialized = 0; in ipa_endpoint_init()
1659 ipa_endpoint_init_one(ipa, name, data); in ipa_endpoint_init()
1665 if (!ipa_filter_map_valid(ipa, filter_map)) in ipa_endpoint_init()
1671 ipa_endpoint_exit(ipa); in ipa_endpoint_init()