1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2021 Linaro Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
13
14 #include "gsi.h"
15 #include "gsi_trans.h"
16 #include "ipa.h"
17 #include "ipa_data.h"
18 #include "ipa_endpoint.h"
19 #include "ipa_cmd.h"
20 #include "ipa_mem.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
23 #include "ipa_gsi.h"
24 #include "ipa_power.h"
25
26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
27
28 #define IPA_REPLENISH_BATCH 16
29
30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
31 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
32
33 /* The amount of RX buffer space consumed by standard skb overhead */
34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
35
36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
37 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
38
39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
40 #define IPA_AGGR_TIME_LIMIT 500 /* microseconds */
41
42 /** enum ipa_status_opcode - status element opcode hardware values */
43 enum ipa_status_opcode {
44 IPA_STATUS_OPCODE_PACKET = 0x01,
45 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
46 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
47 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
48 };
49
50 /** enum ipa_status_exception - status element exception type */
51 enum ipa_status_exception {
52 /* 0 means no exception */
53 IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
54 };
55
56 /* Status element provided by hardware */
57 struct ipa_status {
58 u8 opcode; /* enum ipa_status_opcode */
59 u8 exception; /* enum ipa_status_exception */
60 __le16 mask;
61 __le16 pkt_len;
62 u8 endp_src_idx;
63 u8 endp_dst_idx;
64 __le32 metadata;
65 __le32 flags1;
66 __le64 flags2;
67 __le32 flags3;
68 __le32 flags4;
69 };
70
71 /* Field masks for struct ipa_status structure fields */
72 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
73 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
74 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
75 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
76 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
77
ipa_endpoint_data_valid_one(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * all_data,const struct ipa_gsi_endpoint_data * data)78 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
79 const struct ipa_gsi_endpoint_data *all_data,
80 const struct ipa_gsi_endpoint_data *data)
81 {
82 const struct ipa_gsi_endpoint_data *other_data;
83 struct device *dev = &ipa->pdev->dev;
84 enum ipa_endpoint_name other_name;
85
86 if (ipa_gsi_endpoint_data_empty(data))
87 return true;
88
89 if (!data->toward_ipa) {
90 if (data->endpoint.filter_support) {
91 dev_err(dev, "filtering not supported for "
92 "RX endpoint %u\n",
93 data->endpoint_id);
94 return false;
95 }
96
97 return true; /* Nothing more to check for RX */
98 }
99
100 if (data->endpoint.config.status_enable) {
101 other_name = data->endpoint.config.tx.status_endpoint;
102 if (other_name >= count) {
103 dev_err(dev, "status endpoint name %u out of range "
104 "for endpoint %u\n",
105 other_name, data->endpoint_id);
106 return false;
107 }
108
109 /* Status endpoint must be defined... */
110 other_data = &all_data[other_name];
111 if (ipa_gsi_endpoint_data_empty(other_data)) {
112 dev_err(dev, "DMA endpoint name %u undefined "
113 "for endpoint %u\n",
114 other_name, data->endpoint_id);
115 return false;
116 }
117
118 /* ...and has to be an RX endpoint... */
119 if (other_data->toward_ipa) {
120 dev_err(dev,
121 "status endpoint for endpoint %u not RX\n",
122 data->endpoint_id);
123 return false;
124 }
125
126 /* ...and if it's to be an AP endpoint... */
127 if (other_data->ee_id == GSI_EE_AP) {
128 /* ...make sure it has status enabled. */
129 if (!other_data->endpoint.config.status_enable) {
130 dev_err(dev,
131 "status not enabled for endpoint %u\n",
132 other_data->endpoint_id);
133 return false;
134 }
135 }
136 }
137
138 if (data->endpoint.config.dma_mode) {
139 other_name = data->endpoint.config.dma_endpoint;
140 if (other_name >= count) {
141 dev_err(dev, "DMA endpoint name %u out of range "
142 "for endpoint %u\n",
143 other_name, data->endpoint_id);
144 return false;
145 }
146
147 other_data = &all_data[other_name];
148 if (ipa_gsi_endpoint_data_empty(other_data)) {
149 dev_err(dev, "DMA endpoint name %u undefined "
150 "for endpoint %u\n",
151 other_name, data->endpoint_id);
152 return false;
153 }
154 }
155
156 return true;
157 }
158
aggr_byte_limit_max(enum ipa_version version)159 static u32 aggr_byte_limit_max(enum ipa_version version)
160 {
161 if (version < IPA_VERSION_4_5)
162 return field_max(aggr_byte_limit_fmask(true));
163
164 return field_max(aggr_byte_limit_fmask(false));
165 }
166
ipa_endpoint_data_valid(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * data)167 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
168 const struct ipa_gsi_endpoint_data *data)
169 {
170 const struct ipa_gsi_endpoint_data *dp = data;
171 struct device *dev = &ipa->pdev->dev;
172 enum ipa_endpoint_name name;
173 u32 limit;
174
175 if (count > IPA_ENDPOINT_COUNT) {
176 dev_err(dev, "too many endpoints specified (%u > %u)\n",
177 count, IPA_ENDPOINT_COUNT);
178 return false;
179 }
180
181 /* The aggregation byte limit defines the point at which an
182 * aggregation window will close. It is programmed into the
183 * IPA hardware as a number of KB. We don't use "hard byte
184 * limit" aggregation, which means that we need to supply
185 * enough space in a receive buffer to hold a complete MTU
186 * plus normal skb overhead *after* that aggregation byte
187 * limit has been crossed.
188 *
189 * This check ensures we don't define a receive buffer size
190 * that would exceed what we can represent in the field that
191 * is used to program its size.
192 */
193 limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
194 limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
195 if (limit < IPA_RX_BUFFER_SIZE) {
196 dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
197 IPA_RX_BUFFER_SIZE, limit);
198 return false;
199 }
200
201 /* Make sure needed endpoints have defined data */
202 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
203 dev_err(dev, "command TX endpoint not defined\n");
204 return false;
205 }
206 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
207 dev_err(dev, "LAN RX endpoint not defined\n");
208 return false;
209 }
210 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
211 dev_err(dev, "AP->modem TX endpoint not defined\n");
212 return false;
213 }
214 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
215 dev_err(dev, "AP<-modem RX endpoint not defined\n");
216 return false;
217 }
218
219 for (name = 0; name < count; name++, dp++)
220 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
221 return false;
222
223 return true;
224 }
225
226 /* Allocate a transaction to use on a non-command endpoint */
ipa_endpoint_trans_alloc(struct ipa_endpoint * endpoint,u32 tre_count)227 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
228 u32 tre_count)
229 {
230 struct gsi *gsi = &endpoint->ipa->gsi;
231 u32 channel_id = endpoint->channel_id;
232 enum dma_data_direction direction;
233
234 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
235
236 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
237 }
238
239 /* suspend_delay represents suspend for RX, delay for TX endpoints.
240 * Note that suspend is not supported starting with IPA v4.0.
241 */
242 static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint * endpoint,bool suspend_delay)243 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
244 {
245 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
246 struct ipa *ipa = endpoint->ipa;
247 bool state;
248 u32 mask;
249 u32 val;
250
251 /* Suspend is not supported for IPA v4.0+. Delay doesn't work
252 * correctly on IPA v4.2.
253 */
254 if (endpoint->toward_ipa)
255 WARN_ON(ipa->version == IPA_VERSION_4_2);
256 else
257 WARN_ON(ipa->version >= IPA_VERSION_4_0);
258
259 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
260
261 val = ioread32(ipa->reg_virt + offset);
262 state = !!(val & mask);
263
264 /* Don't bother if it's already in the requested state */
265 if (suspend_delay != state) {
266 val ^= mask;
267 iowrite32(val, ipa->reg_virt + offset);
268 }
269
270 return state;
271 }
272
273 /* We currently don't care what the previous state was for delay mode */
274 static void
ipa_endpoint_program_delay(struct ipa_endpoint * endpoint,bool enable)275 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
276 {
277 WARN_ON(!endpoint->toward_ipa);
278
279 /* Delay mode doesn't work properly for IPA v4.2 */
280 if (endpoint->ipa->version != IPA_VERSION_4_2)
281 (void)ipa_endpoint_init_ctrl(endpoint, enable);
282 }
283
ipa_endpoint_aggr_active(struct ipa_endpoint * endpoint)284 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
285 {
286 u32 mask = BIT(endpoint->endpoint_id);
287 struct ipa *ipa = endpoint->ipa;
288 u32 offset;
289 u32 val;
290
291 WARN_ON(!(mask & ipa->available));
292
293 offset = ipa_reg_state_aggr_active_offset(ipa->version);
294 val = ioread32(ipa->reg_virt + offset);
295
296 return !!(val & mask);
297 }
298
ipa_endpoint_force_close(struct ipa_endpoint * endpoint)299 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
300 {
301 u32 mask = BIT(endpoint->endpoint_id);
302 struct ipa *ipa = endpoint->ipa;
303
304 WARN_ON(!(mask & ipa->available));
305
306 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
307 }
308
309 /**
310 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
311 * @endpoint: Endpoint on which to emulate a suspend
312 *
313 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
314 * with an open aggregation frame. This is to work around a hardware
315 * issue in IPA version 3.5.1 where the suspend interrupt will not be
316 * generated when it should be.
317 */
ipa_endpoint_suspend_aggr(struct ipa_endpoint * endpoint)318 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
319 {
320 struct ipa *ipa = endpoint->ipa;
321
322 if (!endpoint->data->aggregation)
323 return;
324
325 /* Nothing to do if the endpoint doesn't have aggregation open */
326 if (!ipa_endpoint_aggr_active(endpoint))
327 return;
328
329 /* Force close aggregation */
330 ipa_endpoint_force_close(endpoint);
331
332 ipa_interrupt_simulate_suspend(ipa->interrupt);
333 }
334
335 /* Returns previous suspend state (true means suspend was enabled) */
336 static bool
ipa_endpoint_program_suspend(struct ipa_endpoint * endpoint,bool enable)337 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
338 {
339 bool suspended;
340
341 if (endpoint->ipa->version >= IPA_VERSION_4_0)
342 return enable; /* For IPA v4.0+, no change made */
343
344 WARN_ON(endpoint->toward_ipa);
345
346 suspended = ipa_endpoint_init_ctrl(endpoint, enable);
347
348 /* A client suspended with an open aggregation frame will not
349 * generate a SUSPEND IPA interrupt. If enabling suspend, have
350 * ipa_endpoint_suspend_aggr() handle this.
351 */
352 if (enable && !suspended)
353 ipa_endpoint_suspend_aggr(endpoint);
354
355 return suspended;
356 }
357
358 /* Enable or disable delay or suspend mode on all modem endpoints */
ipa_endpoint_modem_pause_all(struct ipa * ipa,bool enable)359 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
360 {
361 u32 endpoint_id;
362
363 /* DELAY mode doesn't work correctly on IPA v4.2 */
364 if (ipa->version == IPA_VERSION_4_2)
365 return;
366
367 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
368 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
369
370 if (endpoint->ee_id != GSI_EE_MODEM)
371 continue;
372
373 /* Set TX delay mode or RX suspend mode */
374 if (endpoint->toward_ipa)
375 ipa_endpoint_program_delay(endpoint, enable);
376 else
377 (void)ipa_endpoint_program_suspend(endpoint, enable);
378 }
379 }
380
381 /* Reset all modem endpoints to use the default exception endpoint */
ipa_endpoint_modem_exception_reset_all(struct ipa * ipa)382 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
383 {
384 u32 initialized = ipa->initialized;
385 struct gsi_trans *trans;
386 u32 count;
387
388 /* We need one command per modem TX endpoint. We can get an upper
389 * bound on that by assuming all initialized endpoints are modem->IPA.
390 * That won't happen, and we could be more precise, but this is fine
391 * for now. End the transaction with commands to clear the pipeline.
392 */
393 count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
394 trans = ipa_cmd_trans_alloc(ipa, count);
395 if (!trans) {
396 dev_err(&ipa->pdev->dev,
397 "no transaction to reset modem exception endpoints\n");
398 return -EBUSY;
399 }
400
401 while (initialized) {
402 u32 endpoint_id = __ffs(initialized);
403 struct ipa_endpoint *endpoint;
404 u32 offset;
405
406 initialized ^= BIT(endpoint_id);
407
408 /* We only reset modem TX endpoints */
409 endpoint = &ipa->endpoint[endpoint_id];
410 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
411 continue;
412
413 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
414
415 /* Value written is 0, and all bits are updated. That
416 * means status is disabled on the endpoint, and as a
417 * result all other fields in the register are ignored.
418 */
419 ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
420 }
421
422 ipa_cmd_pipeline_clear_add(trans);
423
424 /* XXX This should have a 1 second timeout */
425 gsi_trans_commit_wait(trans);
426
427 ipa_cmd_pipeline_clear_wait(ipa);
428
429 return 0;
430 }
431
ipa_endpoint_init_cfg(struct ipa_endpoint * endpoint)432 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
433 {
434 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
435 enum ipa_cs_offload_en enabled;
436 u32 val = 0;
437
438 /* FRAG_OFFLOAD_EN is 0 */
439 if (endpoint->data->checksum) {
440 enum ipa_version version = endpoint->ipa->version;
441
442 if (endpoint->toward_ipa) {
443 u32 checksum_offset;
444
445 /* Checksum header offset is in 4-byte units */
446 checksum_offset = sizeof(struct rmnet_map_header);
447 checksum_offset /= sizeof(u32);
448 val |= u32_encode_bits(checksum_offset,
449 CS_METADATA_HDR_OFFSET_FMASK);
450
451 enabled = version < IPA_VERSION_4_5
452 ? IPA_CS_OFFLOAD_UL
453 : IPA_CS_OFFLOAD_INLINE;
454 } else {
455 enabled = version < IPA_VERSION_4_5
456 ? IPA_CS_OFFLOAD_DL
457 : IPA_CS_OFFLOAD_INLINE;
458 }
459 } else {
460 enabled = IPA_CS_OFFLOAD_NONE;
461 }
462 val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
463 /* CS_GEN_QMB_MASTER_SEL is 0 */
464
465 iowrite32(val, endpoint->ipa->reg_virt + offset);
466 }
467
ipa_endpoint_init_nat(struct ipa_endpoint * endpoint)468 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
469 {
470 u32 offset;
471 u32 val;
472
473 if (!endpoint->toward_ipa)
474 return;
475
476 offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
477 val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
478
479 iowrite32(val, endpoint->ipa->reg_virt + offset);
480 }
481
482 static u32
ipa_qmap_header_size(enum ipa_version version,struct ipa_endpoint * endpoint)483 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
484 {
485 u32 header_size = sizeof(struct rmnet_map_header);
486
487 /* Without checksum offload, we just have the MAP header */
488 if (!endpoint->data->checksum)
489 return header_size;
490
491 if (version < IPA_VERSION_4_5) {
492 /* Checksum header inserted for AP TX endpoints only */
493 if (endpoint->toward_ipa)
494 header_size += sizeof(struct rmnet_map_ul_csum_header);
495 } else {
496 /* Checksum header is used in both directions */
497 header_size += sizeof(struct rmnet_map_v5_csum_header);
498 }
499
500 return header_size;
501 }
502
503 /**
504 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
505 * @endpoint: Endpoint pointer
506 *
507 * We program QMAP endpoints so each packet received is preceded by a QMAP
508 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
509 * packet size field, and we have the IPA hardware populate both for each
510 * received packet. The header is configured (in the HDR_EXT register)
511 * to use big endian format.
512 *
513 * The packet size is written into the QMAP header's pkt_len field. That
514 * location is defined here using the HDR_OFST_PKT_SIZE field.
515 *
516 * The mux_id comes from a 4-byte metadata value supplied with each packet
517 * by the modem. It is *not* a QMAP header, but it does contain the mux_id
518 * value that we want, in its low-order byte. A bitmask defined in the
519 * endpoint's METADATA_MASK register defines which byte within the modem
520 * metadata contains the mux_id. And the OFST_METADATA field programmed
521 * here indicates where the extracted byte should be placed within the QMAP
522 * header.
523 */
ipa_endpoint_init_hdr(struct ipa_endpoint * endpoint)524 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
525 {
526 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
527 struct ipa *ipa = endpoint->ipa;
528 u32 val = 0;
529
530 if (endpoint->data->qmap) {
531 enum ipa_version version = ipa->version;
532 size_t header_size;
533
534 header_size = ipa_qmap_header_size(version, endpoint);
535 val = ipa_header_size_encoded(version, header_size);
536
537 /* Define how to fill fields in a received QMAP header */
538 if (!endpoint->toward_ipa) {
539 u32 offset; /* Field offset within header */
540
541 /* Where IPA will write the metadata value */
542 offset = offsetof(struct rmnet_map_header, mux_id);
543 val |= ipa_metadata_offset_encoded(version, offset);
544
545 /* Where IPA will write the length */
546 offset = offsetof(struct rmnet_map_header, pkt_len);
547 /* Upper bits are stored in HDR_EXT with IPA v4.5 */
548 if (version >= IPA_VERSION_4_5)
549 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
550
551 val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
552 val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
553 }
554 /* For QMAP TX, metadata offset is 0 (modem assumes this) */
555 val |= HDR_OFST_METADATA_VALID_FMASK;
556
557 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
558 /* HDR_A5_MUX is 0 */
559 /* HDR_LEN_INC_DEAGG_HDR is 0 */
560 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
561 }
562
563 iowrite32(val, ipa->reg_virt + offset);
564 }
565
ipa_endpoint_init_hdr_ext(struct ipa_endpoint * endpoint)566 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
567 {
568 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
569 u32 pad_align = endpoint->data->rx.pad_align;
570 struct ipa *ipa = endpoint->ipa;
571 u32 val = 0;
572
573 if (endpoint->data->qmap) {
574 /* We have a header, so we must specify its endianness */
575 val |= HDR_ENDIANNESS_FMASK; /* big endian */
576
577 /* A QMAP header contains a 6 bit pad field at offset 0.
578 * The RMNet driver assumes this field is meaningful in
579 * packets it receives, and assumes the header's payload
580 * length includes that padding. The RMNet driver does
581 * *not* pad packets it sends, however, so the pad field
582 * (although 0) should be ignored.
583 */
584 if (!endpoint->toward_ipa) {
585 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
586 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
587 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
588 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
589 }
590 }
591
592 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
593 if (!endpoint->toward_ipa)
594 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
595
596 /* IPA v4.5 adds some most-significant bits to a few fields,
597 * two of which are defined in the HDR (not HDR_EXT) register.
598 */
599 if (ipa->version >= IPA_VERSION_4_5) {
600 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
601 if (endpoint->data->qmap && !endpoint->toward_ipa) {
602 u32 offset;
603
604 offset = offsetof(struct rmnet_map_header, pkt_len);
605 offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
606 val |= u32_encode_bits(offset,
607 HDR_OFST_PKT_SIZE_MSB_FMASK);
608 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
609 }
610 }
611 iowrite32(val, ipa->reg_virt + offset);
612 }
613
ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint * endpoint)614 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
615 {
616 u32 endpoint_id = endpoint->endpoint_id;
617 u32 val = 0;
618 u32 offset;
619
620 if (endpoint->toward_ipa)
621 return; /* Register not valid for TX endpoints */
622
623 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
624
625 /* Note that HDR_ENDIANNESS indicates big endian header fields */
626 if (endpoint->data->qmap)
627 val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
628
629 iowrite32(val, endpoint->ipa->reg_virt + offset);
630 }
631
ipa_endpoint_init_mode(struct ipa_endpoint * endpoint)632 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
633 {
634 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
635 u32 val;
636
637 if (!endpoint->toward_ipa)
638 return; /* Register not valid for RX endpoints */
639
640 if (endpoint->data->dma_mode) {
641 enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
642 u32 dma_endpoint_id;
643
644 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
645
646 val = u32_encode_bits(IPA_DMA, MODE_FMASK);
647 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
648 } else {
649 val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
650 }
651 /* All other bits unspecified (and 0) */
652
653 iowrite32(val, endpoint->ipa->reg_virt + offset);
654 }
655
656 /* Compute the aggregation size value to use for a given buffer size */
ipa_aggr_size_kb(u32 rx_buffer_size)657 static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
658 {
659 /* We don't use "hard byte limit" aggregation, so we define the
660 * aggregation limit such that our buffer has enough space *after*
661 * that limit to receive a full MTU of data, plus overhead.
662 */
663 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
664
665 return rx_buffer_size / SZ_1K;
666 }
667
668 /* Encoded values for AGGR endpoint register fields */
aggr_byte_limit_encoded(enum ipa_version version,u32 limit)669 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
670 {
671 if (version < IPA_VERSION_4_5)
672 return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
673
674 return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
675 }
676
677 /* Encode the aggregation timer limit (microseconds) based on IPA version */
aggr_time_limit_encoded(enum ipa_version version,u32 limit)678 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
679 {
680 u32 gran_sel;
681 u32 fmask;
682 u32 val;
683
684 if (version < IPA_VERSION_4_5) {
685 /* We set aggregation granularity in ipa_hardware_config() */
686 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
687
688 return u32_encode_bits(limit, aggr_time_limit_fmask(true));
689 }
690
691 /* IPA v4.5 expresses the time limit using Qtime. The AP has
692 * pulse generators 0 and 1 available, which were configured
693 * in ipa_qtime_config() to have granularity 100 usec and
694 * 1 msec, respectively. Use pulse generator 0 if possible,
695 * otherwise fall back to pulse generator 1.
696 */
697 fmask = aggr_time_limit_fmask(false);
698 val = DIV_ROUND_CLOSEST(limit, 100);
699 if (val > field_max(fmask)) {
700 /* Have to use pulse generator 1 (millisecond granularity) */
701 gran_sel = AGGR_GRAN_SEL_FMASK;
702 val = DIV_ROUND_CLOSEST(limit, 1000);
703 } else {
704 /* We can use pulse generator 0 (100 usec granularity) */
705 gran_sel = 0;
706 }
707
708 return gran_sel | u32_encode_bits(val, fmask);
709 }
710
aggr_sw_eof_active_encoded(enum ipa_version version,bool enabled)711 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
712 {
713 u32 val = enabled ? 1 : 0;
714
715 if (version < IPA_VERSION_4_5)
716 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
717
718 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
719 }
720
ipa_endpoint_init_aggr(struct ipa_endpoint * endpoint)721 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
722 {
723 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
724 enum ipa_version version = endpoint->ipa->version;
725 u32 val = 0;
726
727 if (endpoint->data->aggregation) {
728 if (!endpoint->toward_ipa) {
729 u32 buffer_size;
730 bool close_eof;
731 u32 limit;
732
733 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
734 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
735
736 buffer_size = IPA_RX_BUFFER_SIZE - NET_SKB_PAD;
737 limit = ipa_aggr_size_kb(buffer_size);
738 val |= aggr_byte_limit_encoded(version, limit);
739
740 limit = IPA_AGGR_TIME_LIMIT;
741 val |= aggr_time_limit_encoded(version, limit);
742
743 /* AGGR_PKT_LIMIT is 0 (unlimited) */
744
745 close_eof = endpoint->data->rx.aggr_close_eof;
746 val |= aggr_sw_eof_active_encoded(version, close_eof);
747 } else {
748 val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
749 AGGR_EN_FMASK);
750 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
751 /* other fields ignored */
752 }
753 /* AGGR_FORCE_CLOSE is 0 */
754 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
755 } else {
756 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
757 /* other fields ignored */
758 }
759
760 iowrite32(val, endpoint->ipa->reg_virt + offset);
761 }
762
763 /* Return the Qtime-based head-of-line blocking timer value that
764 * represents the given number of microseconds. The result
765 * includes both the timer value and the selected timer granularity.
766 */
hol_block_timer_qtime_val(struct ipa * ipa,u32 microseconds)767 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
768 {
769 u32 gran_sel;
770 u32 val;
771
772 /* IPA v4.5 expresses time limits using Qtime. The AP has
773 * pulse generators 0 and 1 available, which were configured
774 * in ipa_qtime_config() to have granularity 100 usec and
775 * 1 msec, respectively. Use pulse generator 0 if possible,
776 * otherwise fall back to pulse generator 1.
777 */
778 val = DIV_ROUND_CLOSEST(microseconds, 100);
779 if (val > field_max(TIME_LIMIT_FMASK)) {
780 /* Have to use pulse generator 1 (millisecond granularity) */
781 gran_sel = GRAN_SEL_FMASK;
782 val = DIV_ROUND_CLOSEST(microseconds, 1000);
783 } else {
784 /* We can use pulse generator 0 (100 usec granularity) */
785 gran_sel = 0;
786 }
787
788 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
789 }
790
791 /* The head-of-line blocking timer is defined as a tick count. For
792 * IPA version 4.5 the tick count is based on the Qtimer, which is
793 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
794 * each tick represents 128 cycles of the IPA core clock.
795 *
796 * Return the encoded value that should be written to that register
797 * that represents the timeout period provided. For IPA v4.2 this
798 * encodes a base and scale value, while for earlier versions the
799 * value is a simple tick count.
800 */
hol_block_timer_val(struct ipa * ipa,u32 microseconds)801 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
802 {
803 u32 width;
804 u32 scale;
805 u64 ticks;
806 u64 rate;
807 u32 high;
808 u32 val;
809
810 if (!microseconds)
811 return 0; /* Nothing to compute if timer period is 0 */
812
813 if (ipa->version >= IPA_VERSION_4_5)
814 return hol_block_timer_qtime_val(ipa, microseconds);
815
816 /* Use 64 bit arithmetic to avoid overflow... */
817 rate = ipa_core_clock_rate(ipa);
818 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
819 /* ...but we still need to fit into a 32-bit register */
820 WARN_ON(ticks > U32_MAX);
821
822 /* IPA v3.5.1 through v4.1 just record the tick count */
823 if (ipa->version < IPA_VERSION_4_2)
824 return (u32)ticks;
825
826 /* For IPA v4.2, the tick count is represented by base and
827 * scale fields within the 32-bit timer register, where:
828 * ticks = base << scale;
829 * The best precision is achieved when the base value is as
830 * large as possible. Find the highest set bit in the tick
831 * count, and extract the number of bits in the base field
832 * such that high bit is included.
833 */
834 high = fls(ticks); /* 1..32 */
835 width = HWEIGHT32(BASE_VALUE_FMASK);
836 scale = high > width ? high - width : 0;
837 if (scale) {
838 /* If we're scaling, round up to get a closer result */
839 ticks += 1 << (scale - 1);
840 /* High bit was set, so rounding might have affected it */
841 if (fls(ticks) != high)
842 scale++;
843 }
844
845 val = u32_encode_bits(scale, SCALE_FMASK);
846 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
847
848 return val;
849 }
850
851 /* If microseconds is 0, timeout is immediate */
ipa_endpoint_init_hol_block_timer(struct ipa_endpoint * endpoint,u32 microseconds)852 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
853 u32 microseconds)
854 {
855 u32 endpoint_id = endpoint->endpoint_id;
856 struct ipa *ipa = endpoint->ipa;
857 u32 offset;
858 u32 val;
859
860 /* This should only be changed when HOL_BLOCK_EN is disabled */
861 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
862 val = hol_block_timer_val(ipa, microseconds);
863 iowrite32(val, ipa->reg_virt + offset);
864 }
865
866 static void
ipa_endpoint_init_hol_block_enable(struct ipa_endpoint * endpoint,bool enable)867 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
868 {
869 u32 endpoint_id = endpoint->endpoint_id;
870 u32 offset;
871 u32 val;
872
873 val = enable ? HOL_BLOCK_EN_FMASK : 0;
874 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
875 iowrite32(val, endpoint->ipa->reg_virt + offset);
876 /* When enabling, the register must be written twice for IPA v4.5+ */
877 if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
878 iowrite32(val, endpoint->ipa->reg_virt + offset);
879 }
880
ipa_endpoint_modem_hol_block_clear_all(struct ipa * ipa)881 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
882 {
883 u32 i;
884
885 for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
886 struct ipa_endpoint *endpoint = &ipa->endpoint[i];
887
888 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
889 continue;
890
891 ipa_endpoint_init_hol_block_enable(endpoint, false);
892 ipa_endpoint_init_hol_block_timer(endpoint, 0);
893 ipa_endpoint_init_hol_block_enable(endpoint, true);
894 }
895 }
896
ipa_endpoint_init_deaggr(struct ipa_endpoint * endpoint)897 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
898 {
899 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
900 u32 val = 0;
901
902 if (!endpoint->toward_ipa)
903 return; /* Register not valid for RX endpoints */
904
905 /* DEAGGR_HDR_LEN is 0 */
906 /* PACKET_OFFSET_VALID is 0 */
907 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
908 /* MAX_PACKET_LEN is 0 (not enforced) */
909
910 iowrite32(val, endpoint->ipa->reg_virt + offset);
911 }
912
ipa_endpoint_init_rsrc_grp(struct ipa_endpoint * endpoint)913 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
914 {
915 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
916 struct ipa *ipa = endpoint->ipa;
917 u32 val;
918
919 val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
920 iowrite32(val, ipa->reg_virt + offset);
921 }
922
ipa_endpoint_init_seq(struct ipa_endpoint * endpoint)923 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
924 {
925 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
926 u32 val = 0;
927
928 if (!endpoint->toward_ipa)
929 return; /* Register not valid for RX endpoints */
930
931 /* Low-order byte configures primary packet processing */
932 val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK);
933
934 /* Second byte configures replicated packet processing */
935 val |= u32_encode_bits(endpoint->data->tx.seq_rep_type,
936 SEQ_REP_TYPE_FMASK);
937
938 iowrite32(val, endpoint->ipa->reg_virt + offset);
939 }
940
941 /**
942 * ipa_endpoint_skb_tx() - Transmit a socket buffer
943 * @endpoint: Endpoint pointer
944 * @skb: Socket buffer to send
945 *
946 * Returns: 0 if successful, or a negative error code
947 */
ipa_endpoint_skb_tx(struct ipa_endpoint * endpoint,struct sk_buff * skb)948 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
949 {
950 struct gsi_trans *trans;
951 u32 nr_frags;
952 int ret;
953
954 /* Make sure source endpoint's TLV FIFO has enough entries to
955 * hold the linear portion of the skb and all its fragments.
956 * If not, see if we can linearize it before giving up.
957 */
958 nr_frags = skb_shinfo(skb)->nr_frags;
959 if (1 + nr_frags > endpoint->trans_tre_max) {
960 if (skb_linearize(skb))
961 return -E2BIG;
962 nr_frags = 0;
963 }
964
965 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
966 if (!trans)
967 return -EBUSY;
968
969 ret = gsi_trans_skb_add(trans, skb);
970 if (ret)
971 goto err_trans_free;
972 trans->data = skb; /* transaction owns skb now */
973
974 gsi_trans_commit(trans, !netdev_xmit_more());
975
976 return 0;
977
978 err_trans_free:
979 gsi_trans_free(trans);
980
981 return -ENOMEM;
982 }
983
ipa_endpoint_status(struct ipa_endpoint * endpoint)984 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
985 {
986 u32 endpoint_id = endpoint->endpoint_id;
987 struct ipa *ipa = endpoint->ipa;
988 u32 val = 0;
989 u32 offset;
990
991 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
992
993 if (endpoint->data->status_enable) {
994 val |= STATUS_EN_FMASK;
995 if (endpoint->toward_ipa) {
996 enum ipa_endpoint_name name;
997 u32 status_endpoint_id;
998
999 name = endpoint->data->tx.status_endpoint;
1000 status_endpoint_id = ipa->name_map[name]->endpoint_id;
1001
1002 val |= u32_encode_bits(status_endpoint_id,
1003 STATUS_ENDP_FMASK);
1004 }
1005 /* STATUS_LOCATION is 0, meaning status element precedes
1006 * packet (not present for IPA v4.5)
1007 */
1008 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
1009 }
1010
1011 iowrite32(val, ipa->reg_virt + offset);
1012 }
1013
ipa_endpoint_replenish_one(struct ipa_endpoint * endpoint)1014 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
1015 {
1016 struct gsi_trans *trans;
1017 bool doorbell = false;
1018 struct page *page;
1019 u32 offset;
1020 u32 len;
1021 int ret;
1022
1023 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
1024 if (!page)
1025 return -ENOMEM;
1026
1027 trans = ipa_endpoint_trans_alloc(endpoint, 1);
1028 if (!trans)
1029 goto err_free_pages;
1030
1031 /* Offset the buffer to make space for skb headroom */
1032 offset = NET_SKB_PAD;
1033 len = IPA_RX_BUFFER_SIZE - offset;
1034
1035 ret = gsi_trans_page_add(trans, page, len, offset);
1036 if (ret)
1037 goto err_trans_free;
1038 trans->data = page; /* transaction owns page now */
1039
1040 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
1041 doorbell = true;
1042 endpoint->replenish_ready = 0;
1043 }
1044
1045 gsi_trans_commit(trans, doorbell);
1046
1047 return 0;
1048
1049 err_trans_free:
1050 gsi_trans_free(trans);
1051 err_free_pages:
1052 put_page(page);
1053
1054 return -ENOMEM;
1055 }
1056
1057 /**
1058 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1059 * @endpoint: Endpoint to be replenished
1060 * @add_one: Whether this is replacing a just-consumed buffer
1061 *
1062 * The IPA hardware can hold a fixed number of receive buffers for an RX
1063 * endpoint, based on the number of entries in the underlying channel ring
1064 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1065 * more receive buffers can be supplied to the hardware. Replenishing for
1066 * an endpoint can be disabled, in which case requests to replenish a
1067 * buffer are "saved", and transferred to the backlog once it is re-enabled
1068 * again.
1069 */
ipa_endpoint_replenish(struct ipa_endpoint * endpoint,bool add_one)1070 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
1071 {
1072 struct gsi *gsi;
1073 u32 backlog;
1074 int delta;
1075
1076 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
1077 if (add_one)
1078 atomic_inc(&endpoint->replenish_saved);
1079 return;
1080 }
1081
1082 /* If already active, just update the backlog */
1083 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
1084 if (add_one)
1085 atomic_inc(&endpoint->replenish_backlog);
1086 return;
1087 }
1088
1089 while (atomic_dec_not_zero(&endpoint->replenish_backlog))
1090 if (ipa_endpoint_replenish_one(endpoint))
1091 goto try_again_later;
1092
1093 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1094
1095 if (add_one)
1096 atomic_inc(&endpoint->replenish_backlog);
1097
1098 return;
1099
1100 try_again_later:
1101 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1102
1103 /* The last one didn't succeed, so fix the backlog */
1104 delta = add_one ? 2 : 1;
1105 backlog = atomic_add_return(delta, &endpoint->replenish_backlog);
1106
1107 /* Whenever a receive buffer transaction completes we'll try to
1108 * replenish again. It's unlikely, but if we fail to supply even
1109 * one buffer, nothing will trigger another replenish attempt.
1110 * Receive buffer transactions use one TRE, so schedule work to
1111 * try replenishing again if our backlog is *all* available TREs.
1112 */
1113 gsi = &endpoint->ipa->gsi;
1114 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
1115 schedule_delayed_work(&endpoint->replenish_work,
1116 msecs_to_jiffies(1));
1117 }
1118
ipa_endpoint_replenish_enable(struct ipa_endpoint * endpoint)1119 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1120 {
1121 struct gsi *gsi = &endpoint->ipa->gsi;
1122 u32 max_backlog;
1123 u32 saved;
1124
1125 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1126 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
1127 atomic_add(saved, &endpoint->replenish_backlog);
1128
1129 /* Start replenishing if hardware currently has no buffers */
1130 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
1131 if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
1132 ipa_endpoint_replenish(endpoint, false);
1133 }
1134
ipa_endpoint_replenish_disable(struct ipa_endpoint * endpoint)1135 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1136 {
1137 u32 backlog;
1138
1139 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1140 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
1141 atomic_add(backlog, &endpoint->replenish_saved);
1142 }
1143
ipa_endpoint_replenish_work(struct work_struct * work)1144 static void ipa_endpoint_replenish_work(struct work_struct *work)
1145 {
1146 struct delayed_work *dwork = to_delayed_work(work);
1147 struct ipa_endpoint *endpoint;
1148
1149 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1150
1151 ipa_endpoint_replenish(endpoint, false);
1152 }
1153
ipa_endpoint_skb_copy(struct ipa_endpoint * endpoint,void * data,u32 len,u32 extra)1154 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1155 void *data, u32 len, u32 extra)
1156 {
1157 struct sk_buff *skb;
1158
1159 skb = __dev_alloc_skb(len, GFP_ATOMIC);
1160 if (skb) {
1161 skb_put(skb, len);
1162 memcpy(skb->data, data, len);
1163 skb->truesize += extra;
1164 }
1165
1166 /* Now receive it, or drop it if there's no netdev */
1167 if (endpoint->netdev)
1168 ipa_modem_skb_rx(endpoint->netdev, skb);
1169 else if (skb)
1170 dev_kfree_skb_any(skb);
1171 }
1172
ipa_endpoint_skb_build(struct ipa_endpoint * endpoint,struct page * page,u32 len)1173 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1174 struct page *page, u32 len)
1175 {
1176 struct sk_buff *skb;
1177
1178 /* Nothing to do if there's no netdev */
1179 if (!endpoint->netdev)
1180 return false;
1181
1182 WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD));
1183
1184 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
1185 if (skb) {
1186 /* Reserve the headroom and account for the data */
1187 skb_reserve(skb, NET_SKB_PAD);
1188 skb_put(skb, len);
1189 }
1190
1191 /* Receive the buffer (or record drop if unable to build it) */
1192 ipa_modem_skb_rx(endpoint->netdev, skb);
1193
1194 return skb != NULL;
1195 }
1196
1197 /* The format of a packet status element is the same for several status
1198 * types (opcodes). Other types aren't currently supported.
1199 */
ipa_status_format_packet(enum ipa_status_opcode opcode)1200 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1201 {
1202 switch (opcode) {
1203 case IPA_STATUS_OPCODE_PACKET:
1204 case IPA_STATUS_OPCODE_DROPPED_PACKET:
1205 case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1206 case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1207 return true;
1208 default:
1209 return false;
1210 }
1211 }
1212
ipa_endpoint_status_skip(struct ipa_endpoint * endpoint,const struct ipa_status * status)1213 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1214 const struct ipa_status *status)
1215 {
1216 u32 endpoint_id;
1217
1218 if (!ipa_status_format_packet(status->opcode))
1219 return true;
1220 if (!status->pkt_len)
1221 return true;
1222 endpoint_id = u8_get_bits(status->endp_dst_idx,
1223 IPA_STATUS_DST_IDX_FMASK);
1224 if (endpoint_id != endpoint->endpoint_id)
1225 return true;
1226
1227 return false; /* Don't skip this packet, process it */
1228 }
1229
ipa_endpoint_status_tag(struct ipa_endpoint * endpoint,const struct ipa_status * status)1230 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1231 const struct ipa_status *status)
1232 {
1233 struct ipa_endpoint *command_endpoint;
1234 struct ipa *ipa = endpoint->ipa;
1235 u32 endpoint_id;
1236
1237 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1238 return false; /* No valid tag */
1239
1240 /* The status contains a valid tag. We know the packet was sent to
1241 * this endpoint (already verified by ipa_endpoint_status_skip()).
1242 * If the packet came from the AP->command TX endpoint we know
1243 * this packet was sent as part of the pipeline clear process.
1244 */
1245 endpoint_id = u8_get_bits(status->endp_src_idx,
1246 IPA_STATUS_SRC_IDX_FMASK);
1247 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1248 if (endpoint_id == command_endpoint->endpoint_id) {
1249 complete(&ipa->completion);
1250 } else {
1251 dev_err(&ipa->pdev->dev,
1252 "unexpected tagged packet from endpoint %u\n",
1253 endpoint_id);
1254 }
1255
1256 return true;
1257 }
1258
1259 /* Return whether the status indicates the packet should be dropped */
ipa_endpoint_status_drop(struct ipa_endpoint * endpoint,const struct ipa_status * status)1260 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1261 const struct ipa_status *status)
1262 {
1263 u32 val;
1264
1265 /* If the status indicates a tagged transfer, we'll drop the packet */
1266 if (ipa_endpoint_status_tag(endpoint, status))
1267 return true;
1268
1269 /* Deaggregation exceptions we drop; all other types we consume */
1270 if (status->exception)
1271 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1272
1273 /* Drop the packet if it fails to match a routing rule; otherwise no */
1274 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1275
1276 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1277 }
1278
ipa_endpoint_status_parse(struct ipa_endpoint * endpoint,struct page * page,u32 total_len)1279 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1280 struct page *page, u32 total_len)
1281 {
1282 void *data = page_address(page) + NET_SKB_PAD;
1283 u32 unused = IPA_RX_BUFFER_SIZE - total_len;
1284 u32 resid = total_len;
1285
1286 while (resid) {
1287 const struct ipa_status *status = data;
1288 u32 align;
1289 u32 len;
1290
1291 if (resid < sizeof(*status)) {
1292 dev_err(&endpoint->ipa->pdev->dev,
1293 "short message (%u bytes < %zu byte status)\n",
1294 resid, sizeof(*status));
1295 break;
1296 }
1297
1298 /* Skip over status packets that lack packet data */
1299 if (ipa_endpoint_status_skip(endpoint, status)) {
1300 data += sizeof(*status);
1301 resid -= sizeof(*status);
1302 continue;
1303 }
1304
1305 /* Compute the amount of buffer space consumed by the packet,
1306 * including the status element. If the hardware is configured
1307 * to pad packet data to an aligned boundary, account for that.
1308 * And if checksum offload is enabled a trailer containing
1309 * computed checksum information will be appended.
1310 */
1311 align = endpoint->data->rx.pad_align ? : 1;
1312 len = le16_to_cpu(status->pkt_len);
1313 len = sizeof(*status) + ALIGN(len, align);
1314 if (endpoint->data->checksum)
1315 len += sizeof(struct rmnet_map_dl_csum_trailer);
1316
1317 if (!ipa_endpoint_status_drop(endpoint, status)) {
1318 void *data2;
1319 u32 extra;
1320 u32 len2;
1321
1322 /* Client receives only packet data (no status) */
1323 data2 = data + sizeof(*status);
1324 len2 = le16_to_cpu(status->pkt_len);
1325
1326 /* Have the true size reflect the extra unused space in
1327 * the original receive buffer. Distribute the "cost"
1328 * proportionately across all aggregated packets in the
1329 * buffer.
1330 */
1331 extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1332 ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1333 }
1334
1335 /* Consume status and the full packet it describes */
1336 data += len;
1337 resid -= len;
1338 }
1339 }
1340
1341 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
ipa_endpoint_tx_complete(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1342 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1343 struct gsi_trans *trans)
1344 {
1345 }
1346
1347 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
ipa_endpoint_rx_complete(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1348 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1349 struct gsi_trans *trans)
1350 {
1351 struct page *page;
1352
1353 ipa_endpoint_replenish(endpoint, true);
1354
1355 if (trans->cancelled)
1356 return;
1357
1358 /* Parse or build a socket buffer using the actual received length */
1359 page = trans->data;
1360 if (endpoint->data->status_enable)
1361 ipa_endpoint_status_parse(endpoint, page, trans->len);
1362 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1363 trans->data = NULL; /* Pages have been consumed */
1364 }
1365
ipa_endpoint_trans_complete(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1366 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1367 struct gsi_trans *trans)
1368 {
1369 if (endpoint->toward_ipa)
1370 ipa_endpoint_tx_complete(endpoint, trans);
1371 else
1372 ipa_endpoint_rx_complete(endpoint, trans);
1373 }
1374
ipa_endpoint_trans_release(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1375 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1376 struct gsi_trans *trans)
1377 {
1378 if (endpoint->toward_ipa) {
1379 struct ipa *ipa = endpoint->ipa;
1380
1381 /* Nothing to do for command transactions */
1382 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1383 struct sk_buff *skb = trans->data;
1384
1385 if (skb)
1386 dev_kfree_skb_any(skb);
1387 }
1388 } else {
1389 struct page *page = trans->data;
1390
1391 if (page)
1392 put_page(page);
1393 }
1394 }
1395
ipa_endpoint_default_route_set(struct ipa * ipa,u32 endpoint_id)1396 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1397 {
1398 u32 val;
1399
1400 /* ROUTE_DIS is 0 */
1401 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1402 val |= ROUTE_DEF_HDR_TABLE_FMASK;
1403 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1404 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1405 val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1406
1407 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1408 }
1409
ipa_endpoint_default_route_clear(struct ipa * ipa)1410 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1411 {
1412 ipa_endpoint_default_route_set(ipa, 0);
1413 }
1414
1415 /**
1416 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1417 * @endpoint: Endpoint to be reset
1418 *
1419 * If aggregation is active on an RX endpoint when a reset is performed
1420 * on its underlying GSI channel, a special sequence of actions must be
1421 * taken to ensure the IPA pipeline is properly cleared.
1422 *
1423 * Return: 0 if successful, or a negative error code
1424 */
ipa_endpoint_reset_rx_aggr(struct ipa_endpoint * endpoint)1425 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1426 {
1427 struct device *dev = &endpoint->ipa->pdev->dev;
1428 struct ipa *ipa = endpoint->ipa;
1429 struct gsi *gsi = &ipa->gsi;
1430 bool suspended = false;
1431 dma_addr_t addr;
1432 u32 retries;
1433 u32 len = 1;
1434 void *virt;
1435 int ret;
1436
1437 virt = kzalloc(len, GFP_KERNEL);
1438 if (!virt)
1439 return -ENOMEM;
1440
1441 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1442 if (dma_mapping_error(dev, addr)) {
1443 ret = -ENOMEM;
1444 goto out_kfree;
1445 }
1446
1447 /* Force close aggregation before issuing the reset */
1448 ipa_endpoint_force_close(endpoint);
1449
1450 /* Reset and reconfigure the channel with the doorbell engine
1451 * disabled. Then poll until we know aggregation is no longer
1452 * active. We'll re-enable the doorbell (if appropriate) when
1453 * we reset again below.
1454 */
1455 gsi_channel_reset(gsi, endpoint->channel_id, false);
1456
1457 /* Make sure the channel isn't suspended */
1458 suspended = ipa_endpoint_program_suspend(endpoint, false);
1459
1460 /* Start channel and do a 1 byte read */
1461 ret = gsi_channel_start(gsi, endpoint->channel_id);
1462 if (ret)
1463 goto out_suspend_again;
1464
1465 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1466 if (ret)
1467 goto err_endpoint_stop;
1468
1469 /* Wait for aggregation to be closed on the channel */
1470 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1471 do {
1472 if (!ipa_endpoint_aggr_active(endpoint))
1473 break;
1474 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1475 } while (retries--);
1476
1477 /* Check one last time */
1478 if (ipa_endpoint_aggr_active(endpoint))
1479 dev_err(dev, "endpoint %u still active during reset\n",
1480 endpoint->endpoint_id);
1481
1482 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1483
1484 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1485 if (ret)
1486 goto out_suspend_again;
1487
1488 /* Finally, reset and reconfigure the channel again (re-enabling
1489 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1490 * complete the channel reset sequence. Finish by suspending the
1491 * channel again (if necessary).
1492 */
1493 gsi_channel_reset(gsi, endpoint->channel_id, true);
1494
1495 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1496
1497 goto out_suspend_again;
1498
1499 err_endpoint_stop:
1500 (void)gsi_channel_stop(gsi, endpoint->channel_id);
1501 out_suspend_again:
1502 if (suspended)
1503 (void)ipa_endpoint_program_suspend(endpoint, true);
1504 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1505 out_kfree:
1506 kfree(virt);
1507
1508 return ret;
1509 }
1510
ipa_endpoint_reset(struct ipa_endpoint * endpoint)1511 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1512 {
1513 u32 channel_id = endpoint->channel_id;
1514 struct ipa *ipa = endpoint->ipa;
1515 bool special;
1516 int ret = 0;
1517
1518 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1519 * is active, we need to handle things specially to recover.
1520 * All other cases just need to reset the underlying GSI channel.
1521 */
1522 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1523 endpoint->data->aggregation;
1524 if (special && ipa_endpoint_aggr_active(endpoint))
1525 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1526 else
1527 gsi_channel_reset(&ipa->gsi, channel_id, true);
1528
1529 if (ret)
1530 dev_err(&ipa->pdev->dev,
1531 "error %d resetting channel %u for endpoint %u\n",
1532 ret, endpoint->channel_id, endpoint->endpoint_id);
1533 }
1534
ipa_endpoint_program(struct ipa_endpoint * endpoint)1535 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1536 {
1537 if (endpoint->toward_ipa)
1538 ipa_endpoint_program_delay(endpoint, false);
1539 else
1540 (void)ipa_endpoint_program_suspend(endpoint, false);
1541 ipa_endpoint_init_cfg(endpoint);
1542 ipa_endpoint_init_nat(endpoint);
1543 ipa_endpoint_init_hdr(endpoint);
1544 ipa_endpoint_init_hdr_ext(endpoint);
1545 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1546 ipa_endpoint_init_mode(endpoint);
1547 ipa_endpoint_init_aggr(endpoint);
1548 ipa_endpoint_init_deaggr(endpoint);
1549 ipa_endpoint_init_rsrc_grp(endpoint);
1550 ipa_endpoint_init_seq(endpoint);
1551 ipa_endpoint_status(endpoint);
1552 }
1553
ipa_endpoint_enable_one(struct ipa_endpoint * endpoint)1554 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1555 {
1556 struct ipa *ipa = endpoint->ipa;
1557 struct gsi *gsi = &ipa->gsi;
1558 int ret;
1559
1560 ret = gsi_channel_start(gsi, endpoint->channel_id);
1561 if (ret) {
1562 dev_err(&ipa->pdev->dev,
1563 "error %d starting %cX channel %u for endpoint %u\n",
1564 ret, endpoint->toward_ipa ? 'T' : 'R',
1565 endpoint->channel_id, endpoint->endpoint_id);
1566 return ret;
1567 }
1568
1569 if (!endpoint->toward_ipa) {
1570 ipa_interrupt_suspend_enable(ipa->interrupt,
1571 endpoint->endpoint_id);
1572 ipa_endpoint_replenish_enable(endpoint);
1573 }
1574
1575 ipa->enabled |= BIT(endpoint->endpoint_id);
1576
1577 return 0;
1578 }
1579
ipa_endpoint_disable_one(struct ipa_endpoint * endpoint)1580 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1581 {
1582 u32 mask = BIT(endpoint->endpoint_id);
1583 struct ipa *ipa = endpoint->ipa;
1584 struct gsi *gsi = &ipa->gsi;
1585 int ret;
1586
1587 if (!(ipa->enabled & mask))
1588 return;
1589
1590 ipa->enabled ^= mask;
1591
1592 if (!endpoint->toward_ipa) {
1593 ipa_endpoint_replenish_disable(endpoint);
1594 ipa_interrupt_suspend_disable(ipa->interrupt,
1595 endpoint->endpoint_id);
1596 }
1597
1598 /* Note that if stop fails, the channel's state is not well-defined */
1599 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1600 if (ret)
1601 dev_err(&ipa->pdev->dev,
1602 "error %d attempting to stop endpoint %u\n", ret,
1603 endpoint->endpoint_id);
1604 }
1605
ipa_endpoint_suspend_one(struct ipa_endpoint * endpoint)1606 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1607 {
1608 struct device *dev = &endpoint->ipa->pdev->dev;
1609 struct gsi *gsi = &endpoint->ipa->gsi;
1610 int ret;
1611
1612 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1613 return;
1614
1615 if (!endpoint->toward_ipa) {
1616 ipa_endpoint_replenish_disable(endpoint);
1617 (void)ipa_endpoint_program_suspend(endpoint, true);
1618 }
1619
1620 ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1621 if (ret)
1622 dev_err(dev, "error %d suspending channel %u\n", ret,
1623 endpoint->channel_id);
1624 }
1625
ipa_endpoint_resume_one(struct ipa_endpoint * endpoint)1626 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1627 {
1628 struct device *dev = &endpoint->ipa->pdev->dev;
1629 struct gsi *gsi = &endpoint->ipa->gsi;
1630 int ret;
1631
1632 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1633 return;
1634
1635 if (!endpoint->toward_ipa)
1636 (void)ipa_endpoint_program_suspend(endpoint, false);
1637
1638 ret = gsi_channel_resume(gsi, endpoint->channel_id);
1639 if (ret)
1640 dev_err(dev, "error %d resuming channel %u\n", ret,
1641 endpoint->channel_id);
1642 else if (!endpoint->toward_ipa)
1643 ipa_endpoint_replenish_enable(endpoint);
1644 }
1645
ipa_endpoint_suspend(struct ipa * ipa)1646 void ipa_endpoint_suspend(struct ipa *ipa)
1647 {
1648 if (!ipa->setup_complete)
1649 return;
1650
1651 if (ipa->modem_netdev)
1652 ipa_modem_suspend(ipa->modem_netdev);
1653
1654 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1655 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1656 }
1657
ipa_endpoint_resume(struct ipa * ipa)1658 void ipa_endpoint_resume(struct ipa *ipa)
1659 {
1660 if (!ipa->setup_complete)
1661 return;
1662
1663 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1664 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1665
1666 if (ipa->modem_netdev)
1667 ipa_modem_resume(ipa->modem_netdev);
1668 }
1669
ipa_endpoint_setup_one(struct ipa_endpoint * endpoint)1670 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1671 {
1672 struct gsi *gsi = &endpoint->ipa->gsi;
1673 u32 channel_id = endpoint->channel_id;
1674
1675 /* Only AP endpoints get set up */
1676 if (endpoint->ee_id != GSI_EE_AP)
1677 return;
1678
1679 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1680 if (!endpoint->toward_ipa) {
1681 /* RX transactions require a single TRE, so the maximum
1682 * backlog is the same as the maximum outstanding TREs.
1683 */
1684 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1685 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1686 atomic_set(&endpoint->replenish_saved,
1687 gsi_channel_tre_max(gsi, endpoint->channel_id));
1688 atomic_set(&endpoint->replenish_backlog, 0);
1689 INIT_DELAYED_WORK(&endpoint->replenish_work,
1690 ipa_endpoint_replenish_work);
1691 }
1692
1693 ipa_endpoint_program(endpoint);
1694
1695 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1696 }
1697
ipa_endpoint_teardown_one(struct ipa_endpoint * endpoint)1698 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1699 {
1700 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1701
1702 if (!endpoint->toward_ipa)
1703 cancel_delayed_work_sync(&endpoint->replenish_work);
1704
1705 ipa_endpoint_reset(endpoint);
1706 }
1707
ipa_endpoint_setup(struct ipa * ipa)1708 void ipa_endpoint_setup(struct ipa *ipa)
1709 {
1710 u32 initialized = ipa->initialized;
1711
1712 ipa->set_up = 0;
1713 while (initialized) {
1714 u32 endpoint_id = __ffs(initialized);
1715
1716 initialized ^= BIT(endpoint_id);
1717
1718 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1719 }
1720 }
1721
ipa_endpoint_teardown(struct ipa * ipa)1722 void ipa_endpoint_teardown(struct ipa *ipa)
1723 {
1724 u32 set_up = ipa->set_up;
1725
1726 while (set_up) {
1727 u32 endpoint_id = __fls(set_up);
1728
1729 set_up ^= BIT(endpoint_id);
1730
1731 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1732 }
1733 ipa->set_up = 0;
1734 }
1735
ipa_endpoint_config(struct ipa * ipa)1736 int ipa_endpoint_config(struct ipa *ipa)
1737 {
1738 struct device *dev = &ipa->pdev->dev;
1739 u32 initialized;
1740 u32 rx_base;
1741 u32 rx_mask;
1742 u32 tx_mask;
1743 int ret = 0;
1744 u32 max;
1745 u32 val;
1746
1747 /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
1748 * Furthermore, the endpoints were not grouped such that TX
1749 * endpoint numbers started with 0 and RX endpoints had numbers
1750 * higher than all TX endpoints, so we can't do the simple
1751 * direction check used for newer hardware below.
1752 *
1753 * For hardware that doesn't support the FLAVOR_0 register,
1754 * just set the available mask to support any endpoint, and
1755 * assume the configuration is valid.
1756 */
1757 if (ipa->version < IPA_VERSION_3_5) {
1758 ipa->available = ~0;
1759 return 0;
1760 }
1761
1762 /* Find out about the endpoints supplied by the hardware, and ensure
1763 * the highest one doesn't exceed the number we support.
1764 */
1765 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1766
1767 /* Our RX is an IPA producer */
1768 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
1769 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
1770 if (max > IPA_ENDPOINT_MAX) {
1771 dev_err(dev, "too many endpoints (%u > %u)\n",
1772 max, IPA_ENDPOINT_MAX);
1773 return -EINVAL;
1774 }
1775 rx_mask = GENMASK(max - 1, rx_base);
1776
1777 /* Our TX is an IPA consumer */
1778 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
1779 tx_mask = GENMASK(max - 1, 0);
1780
1781 ipa->available = rx_mask | tx_mask;
1782
1783 /* Check for initialized endpoints not supported by the hardware */
1784 if (ipa->initialized & ~ipa->available) {
1785 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1786 ipa->initialized & ~ipa->available);
1787 ret = -EINVAL; /* Report other errors too */
1788 }
1789
1790 initialized = ipa->initialized;
1791 while (initialized) {
1792 u32 endpoint_id = __ffs(initialized);
1793 struct ipa_endpoint *endpoint;
1794
1795 initialized ^= BIT(endpoint_id);
1796
1797 /* Make sure it's pointing in the right direction */
1798 endpoint = &ipa->endpoint[endpoint_id];
1799 if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
1800 dev_err(dev, "endpoint id %u wrong direction\n",
1801 endpoint_id);
1802 ret = -EINVAL;
1803 }
1804 }
1805
1806 return ret;
1807 }
1808
ipa_endpoint_deconfig(struct ipa * ipa)1809 void ipa_endpoint_deconfig(struct ipa *ipa)
1810 {
1811 ipa->available = 0; /* Nothing more to do */
1812 }
1813
ipa_endpoint_init_one(struct ipa * ipa,enum ipa_endpoint_name name,const struct ipa_gsi_endpoint_data * data)1814 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1815 const struct ipa_gsi_endpoint_data *data)
1816 {
1817 struct ipa_endpoint *endpoint;
1818
1819 endpoint = &ipa->endpoint[data->endpoint_id];
1820
1821 if (data->ee_id == GSI_EE_AP)
1822 ipa->channel_map[data->channel_id] = endpoint;
1823 ipa->name_map[name] = endpoint;
1824
1825 endpoint->ipa = ipa;
1826 endpoint->ee_id = data->ee_id;
1827 endpoint->channel_id = data->channel_id;
1828 endpoint->endpoint_id = data->endpoint_id;
1829 endpoint->toward_ipa = data->toward_ipa;
1830 endpoint->data = &data->endpoint.config;
1831
1832 ipa->initialized |= BIT(endpoint->endpoint_id);
1833 }
1834
ipa_endpoint_exit_one(struct ipa_endpoint * endpoint)1835 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1836 {
1837 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1838
1839 memset(endpoint, 0, sizeof(*endpoint));
1840 }
1841
ipa_endpoint_exit(struct ipa * ipa)1842 void ipa_endpoint_exit(struct ipa *ipa)
1843 {
1844 u32 initialized = ipa->initialized;
1845
1846 while (initialized) {
1847 u32 endpoint_id = __fls(initialized);
1848
1849 initialized ^= BIT(endpoint_id);
1850
1851 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1852 }
1853 memset(ipa->name_map, 0, sizeof(ipa->name_map));
1854 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1855 }
1856
1857 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
ipa_endpoint_init(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * data)1858 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1859 const struct ipa_gsi_endpoint_data *data)
1860 {
1861 enum ipa_endpoint_name name;
1862 u32 filter_map;
1863
1864 if (!ipa_endpoint_data_valid(ipa, count, data))
1865 return 0; /* Error */
1866
1867 ipa->initialized = 0;
1868
1869 filter_map = 0;
1870 for (name = 0; name < count; name++, data++) {
1871 if (ipa_gsi_endpoint_data_empty(data))
1872 continue; /* Skip over empty slots */
1873
1874 ipa_endpoint_init_one(ipa, name, data);
1875
1876 if (data->endpoint.filter_support)
1877 filter_map |= BIT(data->endpoint_id);
1878 }
1879
1880 if (!ipa_filter_map_valid(ipa, filter_map))
1881 goto err_endpoint_exit;
1882
1883 return filter_map; /* Non-zero bitmask */
1884
1885 err_endpoint_exit:
1886 ipa_endpoint_exit(ipa);
1887
1888 return 0; /* Error */
1889 }
1890