Lines Matching full:endpoint
114 if (data->endpoint.filter_support) { in ipa_endpoint_data_valid_one()
116 "RX endpoint %u\n", in ipa_endpoint_data_valid_one()
124 if (data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
125 other_name = data->endpoint.config.tx.status_endpoint; in ipa_endpoint_data_valid_one()
127 dev_err(dev, "status endpoint name %u out of range " in ipa_endpoint_data_valid_one()
128 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
133 /* Status endpoint must be defined... */ in ipa_endpoint_data_valid_one()
136 dev_err(dev, "DMA endpoint name %u undefined " in ipa_endpoint_data_valid_one()
137 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
142 /* ...and has to be an RX endpoint... */ in ipa_endpoint_data_valid_one()
145 "status endpoint for endpoint %u not RX\n", in ipa_endpoint_data_valid_one()
150 /* ...and if it's to be an AP endpoint... */ in ipa_endpoint_data_valid_one()
153 if (!other_data->endpoint.config.status_enable) { in ipa_endpoint_data_valid_one()
155 "status not enabled for endpoint %u\n", in ipa_endpoint_data_valid_one()
162 if (data->endpoint.config.dma_mode) { in ipa_endpoint_data_valid_one()
163 other_name = data->endpoint.config.dma_endpoint; in ipa_endpoint_data_valid_one()
165 dev_err(dev, "DMA endpoint name %u out of range " in ipa_endpoint_data_valid_one()
166 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
173 dev_err(dev, "DMA endpoint name %u undefined " in ipa_endpoint_data_valid_one()
174 "for endpoint %u\n", in ipa_endpoint_data_valid_one()
200 dev_err(dev, "command TX endpoint not defined\n"); in ipa_endpoint_data_valid()
204 dev_err(dev, "LAN RX endpoint not defined\n"); in ipa_endpoint_data_valid()
208 dev_err(dev, "AP->modem TX endpoint not defined\n"); in ipa_endpoint_data_valid()
212 dev_err(dev, "AP<-modem RX endpoint not defined\n"); in ipa_endpoint_data_valid()
233 /* Allocate a transaction to use on a non-command endpoint */
234 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, in ipa_endpoint_trans_alloc() argument
237 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_trans_alloc()
238 u32 channel_id = endpoint->channel_id; in ipa_endpoint_trans_alloc()
241 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in ipa_endpoint_trans_alloc()
250 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) in ipa_endpoint_init_ctrl() argument
252 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_ctrl()
253 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_ctrl()
261 * if (endpoint->toward_ipa) in ipa_endpoint_init_ctrl()
266 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; in ipa_endpoint_init_ctrl()
281 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) in ipa_endpoint_program_delay() argument
283 /* assert(endpoint->toward_ipa); */ in ipa_endpoint_program_delay()
286 if (endpoint->ipa->version != IPA_VERSION_4_2) in ipa_endpoint_program_delay()
287 (void)ipa_endpoint_init_ctrl(endpoint, enable); in ipa_endpoint_program_delay()
290 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) in ipa_endpoint_aggr_active() argument
292 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_aggr_active()
293 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_aggr_active()
304 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) in ipa_endpoint_force_close() argument
306 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_force_close()
307 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_force_close()
315 * @endpoint: Endpoint on which to emulate a suspend
317 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
322 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) in ipa_endpoint_suspend_aggr() argument
324 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_suspend_aggr()
326 if (!endpoint->data->aggregation) in ipa_endpoint_suspend_aggr()
329 /* Nothing to do if the endpoint doesn't have aggregation open */ in ipa_endpoint_suspend_aggr()
330 if (!ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_suspend_aggr()
334 ipa_endpoint_force_close(endpoint); in ipa_endpoint_suspend_aggr()
341 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) in ipa_endpoint_program_suspend() argument
345 if (endpoint->ipa->version != IPA_VERSION_3_5_1) in ipa_endpoint_program_suspend()
348 /* assert(!endpoint->toward_ipa); */ in ipa_endpoint_program_suspend()
350 suspended = ipa_endpoint_init_ctrl(endpoint, enable); in ipa_endpoint_program_suspend()
357 ipa_endpoint_suspend_aggr(endpoint); in ipa_endpoint_program_suspend()
372 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_pause_all() local
374 if (endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_pause_all()
378 if (endpoint->toward_ipa) in ipa_endpoint_modem_pause_all()
379 ipa_endpoint_program_delay(endpoint, enable); in ipa_endpoint_modem_pause_all()
381 (void)ipa_endpoint_program_suspend(endpoint, enable); in ipa_endpoint_modem_pause_all()
385 /* Reset all modem endpoints to use the default exception endpoint */
392 /* We need one command per modem TX endpoint. We can get an upper in ipa_endpoint_modem_exception_reset_all()
407 struct ipa_endpoint *endpoint; in ipa_endpoint_modem_exception_reset_all() local
413 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_modem_exception_reset_all()
414 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) in ipa_endpoint_modem_exception_reset_all()
420 * means status is disabled on the endpoint, and as a in ipa_endpoint_modem_exception_reset_all()
434 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) in ipa_endpoint_init_cfg() argument
436 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_cfg()
440 if (endpoint->data->checksum) { in ipa_endpoint_init_cfg()
441 if (endpoint->toward_ipa) { in ipa_endpoint_init_cfg()
461 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_cfg()
465 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
466 * @endpoint: Endpoint pointer
480 * endpoint's METADATA_MASK register defines which byte within the modem
485 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) in ipa_endpoint_init_hdr() argument
487 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_hdr()
490 if (endpoint->data->qmap) { in ipa_endpoint_init_hdr()
494 if (endpoint->toward_ipa && endpoint->data->checksum) in ipa_endpoint_init_hdr()
499 if (!endpoint->toward_ipa) { in ipa_endpoint_init_hdr()
520 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr()
523 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) in ipa_endpoint_init_hdr_ext() argument
525 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_hdr_ext()
526 u32 pad_align = endpoint->data->rx.pad_align; in ipa_endpoint_init_hdr_ext()
537 if (endpoint->data->qmap && !endpoint->toward_ipa) { in ipa_endpoint_init_hdr_ext()
545 if (!endpoint->toward_ipa) in ipa_endpoint_init_hdr_ext()
548 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_ext()
552 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) in ipa_endpoint_init_hdr_metadata_mask() argument
554 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hdr_metadata_mask()
558 if (endpoint->toward_ipa) in ipa_endpoint_init_hdr_metadata_mask()
564 if (endpoint->data->qmap) in ipa_endpoint_init_hdr_metadata_mask()
567 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hdr_metadata_mask()
570 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) in ipa_endpoint_init_mode() argument
572 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_mode()
575 if (!endpoint->toward_ipa) in ipa_endpoint_init_mode()
578 if (endpoint->data->dma_mode) { in ipa_endpoint_init_mode()
579 enum ipa_endpoint_name name = endpoint->data->dma_endpoint; in ipa_endpoint_init_mode()
582 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; in ipa_endpoint_init_mode()
591 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_mode()
606 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) in ipa_endpoint_init_aggr() argument
608 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_aggr()
611 if (endpoint->data->aggregation) { in ipa_endpoint_init_aggr()
612 if (!endpoint->toward_ipa) { in ipa_endpoint_init_aggr()
627 if (endpoint->data->rx.aggr_close_eof) in ipa_endpoint_init_aggr()
642 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_aggr()
698 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, in ipa_endpoint_init_hol_block_timer() argument
701 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_timer()
702 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_init_hol_block_timer()
713 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) in ipa_endpoint_init_hol_block_enable() argument
715 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_init_hol_block_enable()
721 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_hol_block_enable()
729 struct ipa_endpoint *endpoint = &ipa->endpoint[i]; in ipa_endpoint_modem_hol_block_clear_all() local
731 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) in ipa_endpoint_modem_hol_block_clear_all()
734 ipa_endpoint_init_hol_block_enable(endpoint, false); in ipa_endpoint_modem_hol_block_clear_all()
735 ipa_endpoint_init_hol_block_timer(endpoint, 0); in ipa_endpoint_modem_hol_block_clear_all()
736 ipa_endpoint_init_hol_block_enable(endpoint, true); in ipa_endpoint_modem_hol_block_clear_all()
740 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) in ipa_endpoint_init_deaggr() argument
742 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_deaggr()
745 if (!endpoint->toward_ipa) in ipa_endpoint_init_deaggr()
753 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_deaggr()
756 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) in ipa_endpoint_init_seq() argument
758 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); in ipa_endpoint_init_seq()
759 u32 seq_type = endpoint->seq_type; in ipa_endpoint_init_seq()
762 if (!endpoint->toward_ipa) in ipa_endpoint_init_seq()
772 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_endpoint_init_seq()
777 * @endpoint: Endpoint pointer
782 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) in ipa_endpoint_skb_tx() argument
788 /* Make sure source endpoint's TLV FIFO has enough entries to in ipa_endpoint_skb_tx()
793 if (1 + nr_frags > endpoint->trans_tre_max) { in ipa_endpoint_skb_tx()
799 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags); in ipa_endpoint_skb_tx()
818 static void ipa_endpoint_status(struct ipa_endpoint *endpoint) in ipa_endpoint_status() argument
820 u32 endpoint_id = endpoint->endpoint_id; in ipa_endpoint_status()
821 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_status()
827 if (endpoint->data->status_enable) { in ipa_endpoint_status()
829 if (endpoint->toward_ipa) { in ipa_endpoint_status()
833 name = endpoint->data->tx.status_endpoint; in ipa_endpoint_status()
847 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) in ipa_endpoint_replenish_one() argument
860 trans = ipa_endpoint_trans_alloc(endpoint, 1); in ipa_endpoint_replenish_one()
873 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { in ipa_endpoint_replenish_one()
875 endpoint->replenish_ready = 0; in ipa_endpoint_replenish_one()
892 * @endpoint: Endpoint to be replenished
896 * for an endpoint. These are supplied to the hardware, which fills
899 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count) in ipa_endpoint_replenish() argument
904 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) { in ipa_endpoint_replenish()
906 atomic_add(count, &endpoint->replenish_saved); in ipa_endpoint_replenish()
911 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) { in ipa_endpoint_replenish()
913 atomic_add(count, &endpoint->replenish_backlog); in ipa_endpoint_replenish()
917 while (atomic_dec_not_zero(&endpoint->replenish_backlog)) in ipa_endpoint_replenish()
918 if (ipa_endpoint_replenish_one(endpoint)) in ipa_endpoint_replenish()
921 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_replenish()
924 atomic_add(count, &endpoint->replenish_backlog); in ipa_endpoint_replenish()
929 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_replenish()
932 backlog = atomic_add_return(count + 1, &endpoint->replenish_backlog); in ipa_endpoint_replenish()
940 gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish()
941 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) in ipa_endpoint_replenish()
942 schedule_delayed_work(&endpoint->replenish_work, in ipa_endpoint_replenish()
946 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) in ipa_endpoint_replenish_enable() argument
948 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_replenish_enable()
952 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_replenish_enable()
953 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) in ipa_endpoint_replenish_enable()
954 atomic_add(saved, &endpoint->replenish_backlog); in ipa_endpoint_replenish_enable()
957 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); in ipa_endpoint_replenish_enable()
958 if (atomic_read(&endpoint->replenish_backlog) == max_backlog) in ipa_endpoint_replenish_enable()
959 ipa_endpoint_replenish(endpoint, 0); in ipa_endpoint_replenish_enable()
962 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) in ipa_endpoint_replenish_disable() argument
966 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_replenish_disable()
967 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) in ipa_endpoint_replenish_disable()
968 atomic_add(backlog, &endpoint->replenish_saved); in ipa_endpoint_replenish_disable()
974 struct ipa_endpoint *endpoint; in ipa_endpoint_replenish_work() local
976 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); in ipa_endpoint_replenish_work()
978 ipa_endpoint_replenish(endpoint, 0); in ipa_endpoint_replenish_work()
981 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, in ipa_endpoint_skb_copy() argument
994 if (endpoint->netdev) in ipa_endpoint_skb_copy()
995 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_copy()
1000 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, in ipa_endpoint_skb_build() argument
1006 if (!endpoint->netdev) in ipa_endpoint_skb_build()
1018 ipa_modem_skb_rx(endpoint->netdev, skb); in ipa_endpoint_skb_build()
1039 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, in ipa_endpoint_status_skip() argument
1050 if (endpoint_id != endpoint->endpoint_id) in ipa_endpoint_status_skip()
1071 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, in ipa_endpoint_status_parse() argument
1084 dev_err(&endpoint->ipa->pdev->dev, in ipa_endpoint_status_parse()
1091 if (ipa_endpoint_status_skip(endpoint, status)) { in ipa_endpoint_status_parse()
1104 align = endpoint->data->rx.pad_align ? : 1; in ipa_endpoint_status_parse()
1107 if (endpoint->data->checksum) in ipa_endpoint_status_parse()
1120 ipa_endpoint_skb_copy(endpoint, data2, len2, extra); in ipa_endpoint_status_parse()
1130 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, in ipa_endpoint_tx_complete() argument
1136 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, in ipa_endpoint_rx_complete() argument
1141 ipa_endpoint_replenish(endpoint, 1); in ipa_endpoint_rx_complete()
1148 if (endpoint->data->status_enable) in ipa_endpoint_rx_complete()
1149 ipa_endpoint_status_parse(endpoint, page, trans->len); in ipa_endpoint_rx_complete()
1150 else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) in ipa_endpoint_rx_complete()
1154 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, in ipa_endpoint_trans_complete() argument
1157 if (endpoint->toward_ipa) in ipa_endpoint_trans_complete()
1158 ipa_endpoint_tx_complete(endpoint, trans); in ipa_endpoint_trans_complete()
1160 ipa_endpoint_rx_complete(endpoint, trans); in ipa_endpoint_trans_complete()
1163 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, in ipa_endpoint_trans_release() argument
1166 if (endpoint->toward_ipa) { in ipa_endpoint_trans_release()
1167 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_trans_release()
1170 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) { in ipa_endpoint_trans_release()
1204 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1205 * @endpoint: Endpoint to be reset
1207 * If aggregation is active on an RX endpoint when a reset is performed
1213 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) in ipa_endpoint_reset_rx_aggr() argument
1215 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_reset_rx_aggr()
1216 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset_rx_aggr()
1237 ipa_endpoint_force_close(endpoint); in ipa_endpoint_reset_rx_aggr()
1244 gsi_channel_reset(gsi, endpoint->channel_id, false); in ipa_endpoint_reset_rx_aggr()
1247 suspended = ipa_endpoint_program_suspend(endpoint, false); in ipa_endpoint_reset_rx_aggr()
1250 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1254 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr); in ipa_endpoint_reset_rx_aggr()
1261 if (!ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_reset_rx_aggr()
1267 if (ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_reset_rx_aggr()
1268 dev_err(dev, "endpoint %u still active during reset\n", in ipa_endpoint_reset_rx_aggr()
1269 endpoint->endpoint_id); in ipa_endpoint_reset_rx_aggr()
1271 gsi_trans_read_byte_done(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1273 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1283 gsi_channel_reset(gsi, endpoint->channel_id, legacy); in ipa_endpoint_reset_rx_aggr()
1290 (void)gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_reset_rx_aggr()
1293 (void)ipa_endpoint_program_suspend(endpoint, true); in ipa_endpoint_reset_rx_aggr()
1301 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) in ipa_endpoint_reset() argument
1303 u32 channel_id = endpoint->channel_id; in ipa_endpoint_reset()
1304 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_reset()
1309 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation in ipa_endpoint_reset()
1316 special = !endpoint->toward_ipa && endpoint->data->aggregation; in ipa_endpoint_reset()
1317 if (special && ipa_endpoint_aggr_active(endpoint)) in ipa_endpoint_reset()
1318 ret = ipa_endpoint_reset_rx_aggr(endpoint); in ipa_endpoint_reset()
1324 "error %d resetting channel %u for endpoint %u\n", in ipa_endpoint_reset()
1325 ret, endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_reset()
1328 static void ipa_endpoint_program(struct ipa_endpoint *endpoint) in ipa_endpoint_program() argument
1330 if (endpoint->toward_ipa) in ipa_endpoint_program()
1331 ipa_endpoint_program_delay(endpoint, false); in ipa_endpoint_program()
1333 (void)ipa_endpoint_program_suspend(endpoint, false); in ipa_endpoint_program()
1334 ipa_endpoint_init_cfg(endpoint); in ipa_endpoint_program()
1335 ipa_endpoint_init_hdr(endpoint); in ipa_endpoint_program()
1336 ipa_endpoint_init_hdr_ext(endpoint); in ipa_endpoint_program()
1337 ipa_endpoint_init_hdr_metadata_mask(endpoint); in ipa_endpoint_program()
1338 ipa_endpoint_init_mode(endpoint); in ipa_endpoint_program()
1339 ipa_endpoint_init_aggr(endpoint); in ipa_endpoint_program()
1340 ipa_endpoint_init_deaggr(endpoint); in ipa_endpoint_program()
1341 ipa_endpoint_init_seq(endpoint); in ipa_endpoint_program()
1342 ipa_endpoint_status(endpoint); in ipa_endpoint_program()
1345 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint) in ipa_endpoint_enable_one() argument
1347 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_enable_one()
1351 ret = gsi_channel_start(gsi, endpoint->channel_id); in ipa_endpoint_enable_one()
1354 "error %d starting %cX channel %u for endpoint %u\n", in ipa_endpoint_enable_one()
1355 ret, endpoint->toward_ipa ? 'T' : 'R', in ipa_endpoint_enable_one()
1356 endpoint->channel_id, endpoint->endpoint_id); in ipa_endpoint_enable_one()
1360 if (!endpoint->toward_ipa) { in ipa_endpoint_enable_one()
1362 endpoint->endpoint_id); in ipa_endpoint_enable_one()
1363 ipa_endpoint_replenish_enable(endpoint); in ipa_endpoint_enable_one()
1366 ipa->enabled |= BIT(endpoint->endpoint_id); in ipa_endpoint_enable_one()
1371 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) in ipa_endpoint_disable_one() argument
1373 u32 mask = BIT(endpoint->endpoint_id); in ipa_endpoint_disable_one()
1374 struct ipa *ipa = endpoint->ipa; in ipa_endpoint_disable_one()
1383 if (!endpoint->toward_ipa) { in ipa_endpoint_disable_one()
1384 ipa_endpoint_replenish_disable(endpoint); in ipa_endpoint_disable_one()
1386 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1390 ret = gsi_channel_stop(gsi, endpoint->channel_id); in ipa_endpoint_disable_one()
1393 "error %d attempting to stop endpoint %u\n", ret, in ipa_endpoint_disable_one()
1394 endpoint->endpoint_id); in ipa_endpoint_disable_one()
1397 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) in ipa_endpoint_suspend_one() argument
1399 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_suspend_one()
1400 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_suspend_one()
1404 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_suspend_one()
1407 if (!endpoint->toward_ipa) { in ipa_endpoint_suspend_one()
1408 ipa_endpoint_replenish_disable(endpoint); in ipa_endpoint_suspend_one()
1409 (void)ipa_endpoint_program_suspend(endpoint, true); in ipa_endpoint_suspend_one()
1413 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_suspend_one()
1414 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); in ipa_endpoint_suspend_one()
1417 endpoint->channel_id); in ipa_endpoint_suspend_one()
1420 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) in ipa_endpoint_resume_one() argument
1422 struct device *dev = &endpoint->ipa->pdev->dev; in ipa_endpoint_resume_one()
1423 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_resume_one()
1427 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) in ipa_endpoint_resume_one()
1430 if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1431 (void)ipa_endpoint_program_suspend(endpoint, false); in ipa_endpoint_resume_one()
1434 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; in ipa_endpoint_resume_one()
1435 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); in ipa_endpoint_resume_one()
1438 endpoint->channel_id); in ipa_endpoint_resume_one()
1439 else if (!endpoint->toward_ipa) in ipa_endpoint_resume_one()
1440 ipa_endpoint_replenish_enable(endpoint); in ipa_endpoint_resume_one()
1469 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) in ipa_endpoint_setup_one() argument
1471 struct gsi *gsi = &endpoint->ipa->gsi; in ipa_endpoint_setup_one()
1472 u32 channel_id = endpoint->channel_id; in ipa_endpoint_setup_one()
1475 if (endpoint->ee_id != GSI_EE_AP) in ipa_endpoint_setup_one()
1478 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); in ipa_endpoint_setup_one()
1479 if (!endpoint->toward_ipa) { in ipa_endpoint_setup_one()
1483 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); in ipa_endpoint_setup_one()
1484 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); in ipa_endpoint_setup_one()
1485 atomic_set(&endpoint->replenish_saved, in ipa_endpoint_setup_one()
1486 gsi_channel_tre_max(gsi, endpoint->channel_id)); in ipa_endpoint_setup_one()
1487 atomic_set(&endpoint->replenish_backlog, 0); in ipa_endpoint_setup_one()
1488 INIT_DELAYED_WORK(&endpoint->replenish_work, in ipa_endpoint_setup_one()
1492 ipa_endpoint_program(endpoint); in ipa_endpoint_setup_one()
1494 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id); in ipa_endpoint_setup_one()
1497 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint) in ipa_endpoint_teardown_one() argument
1499 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_teardown_one()
1501 if (!endpoint->toward_ipa) in ipa_endpoint_teardown_one()
1502 cancel_delayed_work_sync(&endpoint->replenish_work); in ipa_endpoint_teardown_one()
1504 ipa_endpoint_reset(endpoint); in ipa_endpoint_teardown_one()
1517 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_setup()
1530 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_teardown()
1569 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n", in ipa_endpoint_config()
1577 struct ipa_endpoint *endpoint; in ipa_endpoint_config() local
1582 endpoint = &ipa->endpoint[endpoint_id]; in ipa_endpoint_config()
1583 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) { in ipa_endpoint_config()
1584 dev_err(dev, "endpoint id %u wrong direction\n", in ipa_endpoint_config()
1601 struct ipa_endpoint *endpoint; in ipa_endpoint_init_one() local
1603 endpoint = &ipa->endpoint[data->endpoint_id]; in ipa_endpoint_init_one()
1606 ipa->channel_map[data->channel_id] = endpoint; in ipa_endpoint_init_one()
1607 ipa->name_map[name] = endpoint; in ipa_endpoint_init_one()
1609 endpoint->ipa = ipa; in ipa_endpoint_init_one()
1610 endpoint->ee_id = data->ee_id; in ipa_endpoint_init_one()
1611 endpoint->seq_type = data->endpoint.seq_type; in ipa_endpoint_init_one()
1612 endpoint->channel_id = data->channel_id; in ipa_endpoint_init_one()
1613 endpoint->endpoint_id = data->endpoint_id; in ipa_endpoint_init_one()
1614 endpoint->toward_ipa = data->toward_ipa; in ipa_endpoint_init_one()
1615 endpoint->data = &data->endpoint.config; in ipa_endpoint_init_one()
1617 ipa->initialized |= BIT(endpoint->endpoint_id); in ipa_endpoint_init_one()
1620 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint) in ipa_endpoint_exit_one() argument
1622 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id); in ipa_endpoint_exit_one()
1624 memset(endpoint, 0, sizeof(*endpoint)); in ipa_endpoint_exit_one()
1636 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]); in ipa_endpoint_exit()
1661 if (data->endpoint.filter_support) in ipa_endpoint_init()