• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2019-2020 Linaro Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
13 
14 #include "gsi.h"
15 #include "gsi_trans.h"
16 #include "ipa.h"
17 #include "ipa_data.h"
18 #include "ipa_endpoint.h"
19 #include "ipa_cmd.h"
20 #include "ipa_mem.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
23 #include "ipa_gsi.h"
24 #include "ipa_clock.h"
25 
26 #define atomic_dec_not_zero(v)	atomic_add_unless((v), -1, 0)
27 
28 #define IPA_REPLENISH_BATCH	16
29 
30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
31 #define IPA_RX_BUFFER_SIZE	8192	/* PAGE_SIZE > 4096 wastes a LOT */
32 
33 /* The amount of RX buffer space consumed by standard skb overhead */
34 #define IPA_RX_BUFFER_OVERHEAD	(PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
35 
36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
37 #define IPA_ENDPOINT_QMAP_METADATA_MASK		0x000000ff /* host byte order */
38 
39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX	3
40 #define IPA_AGGR_TIME_LIMIT_DEFAULT		500	/* microseconds */
41 
42 /** enum ipa_status_opcode - status element opcode hardware values */
43 enum ipa_status_opcode {
44 	IPA_STATUS_OPCODE_PACKET		= 0x01,
45 	IPA_STATUS_OPCODE_DROPPED_PACKET	= 0x04,
46 	IPA_STATUS_OPCODE_SUSPENDED_PACKET	= 0x08,
47 	IPA_STATUS_OPCODE_PACKET_2ND_PASS	= 0x40,
48 };
49 
50 /** enum ipa_status_exception - status element exception type */
51 enum ipa_status_exception {
52 	/* 0 means no exception */
53 	IPA_STATUS_EXCEPTION_DEAGGR		= 0x01,
54 };
55 
56 /* Status element provided by hardware */
57 struct ipa_status {
58 	u8 opcode;		/* enum ipa_status_opcode */
59 	u8 exception;		/* enum ipa_status_exception */
60 	__le16 mask;
61 	__le16 pkt_len;
62 	u8 endp_src_idx;
63 	u8 endp_dst_idx;
64 	__le32 metadata;
65 	__le32 flags1;
66 	__le64 flags2;
67 	__le32 flags3;
68 	__le32 flags4;
69 };
70 
71 /* Field masks for struct ipa_status structure fields */
72 #define IPA_STATUS_DST_IDX_FMASK		GENMASK(4, 0)
73 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK	GENMASK(31, 22)
74 
75 #ifdef IPA_VALIDATE
76 
ipa_endpoint_validate_build(void)77 static void ipa_endpoint_validate_build(void)
78 {
79 	/* The aggregation byte limit defines the point at which an
80 	 * aggregation window will close.  It is programmed into the
81 	 * IPA hardware as a number of KB.  We don't use "hard byte
82 	 * limit" aggregation, which means that we need to supply
83 	 * enough space in a receive buffer to hold a complete MTU
84 	 * plus normal skb overhead *after* that aggregation byte
85 	 * limit has been crossed.
86 	 *
87 	 * This check just ensures we don't define a receive buffer
88 	 * size that would exceed what we can represent in the field
89 	 * that is used to program its size.
90 	 */
91 	BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
92 		     field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
93 		     IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
94 
95 	/* I honestly don't know where this requirement comes from.  But
96 	 * it holds, and if we someday need to loosen the constraint we
97 	 * can try to track it down.
98 	 */
99 	BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
100 }
101 
ipa_endpoint_data_valid_one(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * all_data,const struct ipa_gsi_endpoint_data * data)102 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
103 			    const struct ipa_gsi_endpoint_data *all_data,
104 			    const struct ipa_gsi_endpoint_data *data)
105 {
106 	const struct ipa_gsi_endpoint_data *other_data;
107 	struct device *dev = &ipa->pdev->dev;
108 	enum ipa_endpoint_name other_name;
109 
110 	if (ipa_gsi_endpoint_data_empty(data))
111 		return true;
112 
113 	if (!data->toward_ipa) {
114 		if (data->endpoint.filter_support) {
115 			dev_err(dev, "filtering not supported for "
116 					"RX endpoint %u\n",
117 				data->endpoint_id);
118 			return false;
119 		}
120 
121 		return true;	/* Nothing more to check for RX */
122 	}
123 
124 	if (data->endpoint.config.status_enable) {
125 		other_name = data->endpoint.config.tx.status_endpoint;
126 		if (other_name >= count) {
127 			dev_err(dev, "status endpoint name %u out of range "
128 					"for endpoint %u\n",
129 				other_name, data->endpoint_id);
130 			return false;
131 		}
132 
133 		/* Status endpoint must be defined... */
134 		other_data = &all_data[other_name];
135 		if (ipa_gsi_endpoint_data_empty(other_data)) {
136 			dev_err(dev, "DMA endpoint name %u undefined "
137 					"for endpoint %u\n",
138 				other_name, data->endpoint_id);
139 			return false;
140 		}
141 
142 		/* ...and has to be an RX endpoint... */
143 		if (other_data->toward_ipa) {
144 			dev_err(dev,
145 				"status endpoint for endpoint %u not RX\n",
146 				data->endpoint_id);
147 			return false;
148 		}
149 
150 		/* ...and if it's to be an AP endpoint... */
151 		if (other_data->ee_id == GSI_EE_AP) {
152 			/* ...make sure it has status enabled. */
153 			if (!other_data->endpoint.config.status_enable) {
154 				dev_err(dev,
155 					"status not enabled for endpoint %u\n",
156 					other_data->endpoint_id);
157 				return false;
158 			}
159 		}
160 	}
161 
162 	if (data->endpoint.config.dma_mode) {
163 		other_name = data->endpoint.config.dma_endpoint;
164 		if (other_name >= count) {
165 			dev_err(dev, "DMA endpoint name %u out of range "
166 					"for endpoint %u\n",
167 				other_name, data->endpoint_id);
168 			return false;
169 		}
170 
171 		other_data = &all_data[other_name];
172 		if (ipa_gsi_endpoint_data_empty(other_data)) {
173 			dev_err(dev, "DMA endpoint name %u undefined "
174 					"for endpoint %u\n",
175 				other_name, data->endpoint_id);
176 			return false;
177 		}
178 	}
179 
180 	return true;
181 }
182 
ipa_endpoint_data_valid(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * data)183 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
184 				    const struct ipa_gsi_endpoint_data *data)
185 {
186 	const struct ipa_gsi_endpoint_data *dp = data;
187 	struct device *dev = &ipa->pdev->dev;
188 	enum ipa_endpoint_name name;
189 
190 	ipa_endpoint_validate_build();
191 
192 	if (count > IPA_ENDPOINT_COUNT) {
193 		dev_err(dev, "too many endpoints specified (%u > %u)\n",
194 			count, IPA_ENDPOINT_COUNT);
195 		return false;
196 	}
197 
198 	/* Make sure needed endpoints have defined data */
199 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
200 		dev_err(dev, "command TX endpoint not defined\n");
201 		return false;
202 	}
203 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
204 		dev_err(dev, "LAN RX endpoint not defined\n");
205 		return false;
206 	}
207 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
208 		dev_err(dev, "AP->modem TX endpoint not defined\n");
209 		return false;
210 	}
211 	if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
212 		dev_err(dev, "AP<-modem RX endpoint not defined\n");
213 		return false;
214 	}
215 
216 	for (name = 0; name < count; name++, dp++)
217 		if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
218 			return false;
219 
220 	return true;
221 }
222 
223 #else /* !IPA_VALIDATE */
224 
ipa_endpoint_data_valid(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * data)225 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
226 				    const struct ipa_gsi_endpoint_data *data)
227 {
228 	return true;
229 }
230 
231 #endif /* !IPA_VALIDATE */
232 
233 /* Allocate a transaction to use on a non-command endpoint */
ipa_endpoint_trans_alloc(struct ipa_endpoint * endpoint,u32 tre_count)234 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
235 						  u32 tre_count)
236 {
237 	struct gsi *gsi = &endpoint->ipa->gsi;
238 	u32 channel_id = endpoint->channel_id;
239 	enum dma_data_direction direction;
240 
241 	direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
242 
243 	return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
244 }
245 
246 /* suspend_delay represents suspend for RX, delay for TX endpoints.
247  * Note that suspend is not supported starting with IPA v4.0.
248  */
249 static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint * endpoint,bool suspend_delay)250 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
251 {
252 	u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
253 	struct ipa *ipa = endpoint->ipa;
254 	bool state;
255 	u32 mask;
256 	u32 val;
257 
258 	/* Suspend is not supported for IPA v4.0+.  Delay doesn't work
259 	 * correctly on IPA v4.2.
260 	 *
261 	 * if (endpoint->toward_ipa)
262 	 * 	assert(ipa->version != IPA_VERSION_4.2);
263 	 * else
264 	 * 	assert(ipa->version == IPA_VERSION_3_5_1);
265 	 */
266 	mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
267 
268 	val = ioread32(ipa->reg_virt + offset);
269 	/* Don't bother if it's already in the requested state */
270 	state = !!(val & mask);
271 	if (suspend_delay != state) {
272 		val ^= mask;
273 		iowrite32(val, ipa->reg_virt + offset);
274 	}
275 
276 	return state;
277 }
278 
279 /* We currently don't care what the previous state was for delay mode */
280 static void
ipa_endpoint_program_delay(struct ipa_endpoint * endpoint,bool enable)281 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
282 {
283 	/* assert(endpoint->toward_ipa); */
284 
285 	/* Delay mode doesn't work properly for IPA v4.2 */
286 	if (endpoint->ipa->version != IPA_VERSION_4_2)
287 		(void)ipa_endpoint_init_ctrl(endpoint, enable);
288 }
289 
ipa_endpoint_aggr_active(struct ipa_endpoint * endpoint)290 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
291 {
292 	u32 mask = BIT(endpoint->endpoint_id);
293 	struct ipa *ipa = endpoint->ipa;
294 	u32 offset;
295 	u32 val;
296 
297 	/* assert(mask & ipa->available); */
298 	offset = ipa_reg_state_aggr_active_offset(ipa->version);
299 	val = ioread32(ipa->reg_virt + offset);
300 
301 	return !!(val & mask);
302 }
303 
ipa_endpoint_force_close(struct ipa_endpoint * endpoint)304 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
305 {
306 	u32 mask = BIT(endpoint->endpoint_id);
307 	struct ipa *ipa = endpoint->ipa;
308 
309 	/* assert(mask & ipa->available); */
310 	iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
311 }
312 
313 /**
314  * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
315  * @endpoint:	Endpoint on which to emulate a suspend
316  *
317  *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
318  *  with an open aggregation frame.  This is to work around a hardware
319  *  issue in IPA version 3.5.1 where the suspend interrupt will not be
320  *  generated when it should be.
321  */
ipa_endpoint_suspend_aggr(struct ipa_endpoint * endpoint)322 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
323 {
324 	struct ipa *ipa = endpoint->ipa;
325 
326 	if (!endpoint->data->aggregation)
327 		return;
328 
329 	/* Nothing to do if the endpoint doesn't have aggregation open */
330 	if (!ipa_endpoint_aggr_active(endpoint))
331 		return;
332 
333 	/* Force close aggregation */
334 	ipa_endpoint_force_close(endpoint);
335 
336 	ipa_interrupt_simulate_suspend(ipa->interrupt);
337 }
338 
339 /* Returns previous suspend state (true means suspend was enabled) */
340 static bool
ipa_endpoint_program_suspend(struct ipa_endpoint * endpoint,bool enable)341 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
342 {
343 	bool suspended;
344 
345 	if (endpoint->ipa->version != IPA_VERSION_3_5_1)
346 		return enable;	/* For IPA v4.0+, no change made */
347 
348 	/* assert(!endpoint->toward_ipa); */
349 
350 	suspended = ipa_endpoint_init_ctrl(endpoint, enable);
351 
352 	/* A client suspended with an open aggregation frame will not
353 	 * generate a SUSPEND IPA interrupt.  If enabling suspend, have
354 	 * ipa_endpoint_suspend_aggr() handle this.
355 	 */
356 	if (enable && !suspended)
357 		ipa_endpoint_suspend_aggr(endpoint);
358 
359 	return suspended;
360 }
361 
362 /* Enable or disable delay or suspend mode on all modem endpoints */
ipa_endpoint_modem_pause_all(struct ipa * ipa,bool enable)363 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
364 {
365 	u32 endpoint_id;
366 
367 	/* DELAY mode doesn't work correctly on IPA v4.2 */
368 	if (ipa->version == IPA_VERSION_4_2)
369 		return;
370 
371 	for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
372 		struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
373 
374 		if (endpoint->ee_id != GSI_EE_MODEM)
375 			continue;
376 
377 		/* Set TX delay mode or RX suspend mode */
378 		if (endpoint->toward_ipa)
379 			ipa_endpoint_program_delay(endpoint, enable);
380 		else
381 			(void)ipa_endpoint_program_suspend(endpoint, enable);
382 	}
383 }
384 
385 /* Reset all modem endpoints to use the default exception endpoint */
ipa_endpoint_modem_exception_reset_all(struct ipa * ipa)386 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
387 {
388 	u32 initialized = ipa->initialized;
389 	struct gsi_trans *trans;
390 	u32 count;
391 
392 	/* We need one command per modem TX endpoint.  We can get an upper
393 	 * bound on that by assuming all initialized endpoints are modem->IPA.
394 	 * That won't happen, and we could be more precise, but this is fine
395 	 * for now.  We need to end the transaction with a "tag process."
396 	 */
397 	count = hweight32(initialized) + ipa_cmd_tag_process_count();
398 	trans = ipa_cmd_trans_alloc(ipa, count);
399 	if (!trans) {
400 		dev_err(&ipa->pdev->dev,
401 			"no transaction to reset modem exception endpoints\n");
402 		return -EBUSY;
403 	}
404 
405 	while (initialized) {
406 		u32 endpoint_id = __ffs(initialized);
407 		struct ipa_endpoint *endpoint;
408 		u32 offset;
409 
410 		initialized ^= BIT(endpoint_id);
411 
412 		/* We only reset modem TX endpoints */
413 		endpoint = &ipa->endpoint[endpoint_id];
414 		if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
415 			continue;
416 
417 		offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
418 
419 		/* Value written is 0, and all bits are updated.  That
420 		 * means status is disabled on the endpoint, and as a
421 		 * result all other fields in the register are ignored.
422 		 */
423 		ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
424 	}
425 
426 	ipa_cmd_tag_process_add(trans);
427 
428 	/* XXX This should have a 1 second timeout */
429 	gsi_trans_commit_wait(trans);
430 
431 	return 0;
432 }
433 
ipa_endpoint_init_cfg(struct ipa_endpoint * endpoint)434 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
435 {
436 	u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
437 	u32 val = 0;
438 
439 	/* FRAG_OFFLOAD_EN is 0 */
440 	if (endpoint->data->checksum) {
441 		if (endpoint->toward_ipa) {
442 			u32 checksum_offset;
443 
444 			val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
445 					       CS_OFFLOAD_EN_FMASK);
446 			/* Checksum header offset is in 4-byte units */
447 			checksum_offset = sizeof(struct rmnet_map_header);
448 			checksum_offset /= sizeof(u32);
449 			val |= u32_encode_bits(checksum_offset,
450 					       CS_METADATA_HDR_OFFSET_FMASK);
451 		} else {
452 			val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
453 					       CS_OFFLOAD_EN_FMASK);
454 		}
455 	} else {
456 		val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
457 				       CS_OFFLOAD_EN_FMASK);
458 	}
459 	/* CS_GEN_QMB_MASTER_SEL is 0 */
460 
461 	iowrite32(val, endpoint->ipa->reg_virt + offset);
462 }
463 
464 /**
465  * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
466  * @endpoint:	Endpoint pointer
467  *
468  * We program QMAP endpoints so each packet received is preceded by a QMAP
469  * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
470  * packet size field, and we have the IPA hardware populate both for each
471  * received packet.  The header is configured (in the HDR_EXT register)
472  * to use big endian format.
473  *
474  * The packet size is written into the QMAP header's pkt_len field.  That
475  * location is defined here using the HDR_OFST_PKT_SIZE field.
476  *
477  * The mux_id comes from a 4-byte metadata value supplied with each packet
478  * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
479  * value that we want, in its low-order byte.  A bitmask defined in the
480  * endpoint's METADATA_MASK register defines which byte within the modem
481  * metadata contains the mux_id.  And the OFST_METADATA field programmed
482  * here indicates where the extracted byte should be placed within the QMAP
483  * header.
484  */
ipa_endpoint_init_hdr(struct ipa_endpoint * endpoint)485 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
486 {
487 	u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
488 	u32 val = 0;
489 
490 	if (endpoint->data->qmap) {
491 		size_t header_size = sizeof(struct rmnet_map_header);
492 
493 		/* We might supply a checksum header after the QMAP header */
494 		if (endpoint->toward_ipa && endpoint->data->checksum)
495 			header_size += sizeof(struct rmnet_map_ul_csum_header);
496 		val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
497 
498 		/* Define how to fill fields in a received QMAP header */
499 		if (!endpoint->toward_ipa) {
500 			u32 off;	/* Field offset within header */
501 
502 			/* Where IPA will write the metadata value */
503 			off = offsetof(struct rmnet_map_header, mux_id);
504 			val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
505 
506 			/* Where IPA will write the length */
507 			off = offsetof(struct rmnet_map_header, pkt_len);
508 			val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
509 			val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK);
510 		}
511 		/* For QMAP TX, metadata offset is 0 (modem assumes this) */
512 		val |= HDR_OFST_METADATA_VALID_FMASK;
513 
514 		/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
515 		/* HDR_A5_MUX is 0 */
516 		/* HDR_LEN_INC_DEAGG_HDR is 0 */
517 		/* HDR_METADATA_REG_VALID is 0 (TX only) */
518 	}
519 
520 	iowrite32(val, endpoint->ipa->reg_virt + offset);
521 }
522 
ipa_endpoint_init_hdr_ext(struct ipa_endpoint * endpoint)523 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
524 {
525 	u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
526 	u32 pad_align = endpoint->data->rx.pad_align;
527 	u32 val = 0;
528 
529 	val |= HDR_ENDIANNESS_FMASK;		/* big endian */
530 
531 	/* A QMAP header contains a 6 bit pad field at offset 0.  The RMNet
532 	 * driver assumes this field is meaningful in packets it receives,
533 	 * and assumes the header's payload length includes that padding.
534 	 * The RMNet driver does *not* pad packets it sends, however, so
535 	 * the pad field (although 0) should be ignored.
536 	 */
537 	if (endpoint->data->qmap && !endpoint->toward_ipa) {
538 		val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
539 		/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
540 		val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
541 		/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
542 	}
543 
544 	/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
545 	if (!endpoint->toward_ipa)
546 		val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
547 
548 	iowrite32(val, endpoint->ipa->reg_virt + offset);
549 }
550 
551 
ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint * endpoint)552 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
553 {
554 	u32 endpoint_id = endpoint->endpoint_id;
555 	u32 val = 0;
556 	u32 offset;
557 
558 	if (endpoint->toward_ipa)
559 		return;		/* Register not valid for TX endpoints */
560 
561 	offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
562 
563 	/* Note that HDR_ENDIANNESS indicates big endian header fields */
564 	if (endpoint->data->qmap)
565 		val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
566 
567 	iowrite32(val, endpoint->ipa->reg_virt + offset);
568 }
569 
ipa_endpoint_init_mode(struct ipa_endpoint * endpoint)570 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
571 {
572 	u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
573 	u32 val;
574 
575 	if (!endpoint->toward_ipa)
576 		return;		/* Register not valid for RX endpoints */
577 
578 	if (endpoint->data->dma_mode) {
579 		enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
580 		u32 dma_endpoint_id;
581 
582 		dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
583 
584 		val = u32_encode_bits(IPA_DMA, MODE_FMASK);
585 		val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
586 	} else {
587 		val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
588 	}
589 	/* All other bits unspecified (and 0) */
590 
591 	iowrite32(val, endpoint->ipa->reg_virt + offset);
592 }
593 
594 /* Compute the aggregation size value to use for a given buffer size */
ipa_aggr_size_kb(u32 rx_buffer_size)595 static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
596 {
597 	/* We don't use "hard byte limit" aggregation, so we define the
598 	 * aggregation limit such that our buffer has enough space *after*
599 	 * that limit to receive a full MTU of data, plus overhead.
600 	 */
601 	rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
602 
603 	return rx_buffer_size / SZ_1K;
604 }
605 
ipa_endpoint_init_aggr(struct ipa_endpoint * endpoint)606 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
607 {
608 	u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
609 	u32 val = 0;
610 
611 	if (endpoint->data->aggregation) {
612 		if (!endpoint->toward_ipa) {
613 			u32 buffer_size;
614 			u32 limit;
615 
616 			val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
617 			val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
618 
619 			buffer_size = IPA_RX_BUFFER_SIZE - NET_SKB_PAD;
620 			limit = ipa_aggr_size_kb(buffer_size);
621 			val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
622 
623 			limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
624 			limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
625 			val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK);
626 
627 			/* AGGR_PKT_LIMIT is 0 (unlimited) */
628 
629 			if (endpoint->data->rx.aggr_close_eof)
630 				val |= AGGR_SW_EOF_ACTIVE_FMASK;
631 			/* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
632 		} else {
633 			val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
634 					       AGGR_EN_FMASK);
635 			val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
636 			/* other fields ignored */
637 		}
638 		/* AGGR_FORCE_CLOSE is 0 */
639 	} else {
640 		val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
641 		/* other fields ignored */
642 	}
643 
644 	iowrite32(val, endpoint->ipa->reg_virt + offset);
645 }
646 
647 /* The head-of-line blocking timer is defined as a tick count, where each
648  * tick represents 128 cycles of the IPA core clock.  Return the value
649  * that should be written to that register that represents the timeout
650  * period provided.
651  */
ipa_reg_init_hol_block_timer_val(struct ipa * ipa,u32 microseconds)652 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
653 {
654 	u32 width;
655 	u32 scale;
656 	u64 ticks;
657 	u64 rate;
658 	u32 high;
659 	u32 val;
660 
661 	if (!microseconds)
662 		return 0;	/* Nothing to compute if timer period is 0 */
663 
664 	/* Use 64 bit arithmetic to avoid overflow... */
665 	rate = ipa_clock_rate(ipa);
666 	ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
667 	/* ...but we still need to fit into a 32-bit register */
668 	WARN_ON(ticks > U32_MAX);
669 
670 	/* IPA v3.5.1 just records the tick count */
671 	if (ipa->version == IPA_VERSION_3_5_1)
672 		return (u32)ticks;
673 
674 	/* For IPA v4.2, the tick count is represented by base and
675 	 * scale fields within the 32-bit timer register, where:
676 	 *     ticks = base << scale;
677 	 * The best precision is achieved when the base value is as
678 	 * large as possible.  Find the highest set bit in the tick
679 	 * count, and extract the number of bits in the base field
680 	 * such that that high bit is included.
681 	 */
682 	high = fls(ticks);		/* 1..32 */
683 	width = HWEIGHT32(BASE_VALUE_FMASK);
684 	scale = high > width ? high - width : 0;
685 	if (scale) {
686 		/* If we're scaling, round up to get a closer result */
687 		ticks += 1 << (scale - 1);
688 		/* High bit was set, so rounding might have affected it */
689 		if (fls(ticks) != high)
690 			scale++;
691 	}
692 
693 	val = u32_encode_bits(scale, SCALE_FMASK);
694 	val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
695 
696 	return val;
697 }
698 
699 /* If microseconds is 0, timeout is immediate */
ipa_endpoint_init_hol_block_timer(struct ipa_endpoint * endpoint,u32 microseconds)700 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
701 					      u32 microseconds)
702 {
703 	u32 endpoint_id = endpoint->endpoint_id;
704 	struct ipa *ipa = endpoint->ipa;
705 	u32 offset;
706 	u32 val;
707 
708 	/* This should only be changed when HOL_BLOCK_EN is disabled */
709 	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
710 	val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
711 	iowrite32(val, ipa->reg_virt + offset);
712 }
713 
714 static void
ipa_endpoint_init_hol_block_enable(struct ipa_endpoint * endpoint,bool enable)715 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
716 {
717 	u32 endpoint_id = endpoint->endpoint_id;
718 	u32 offset;
719 	u32 val;
720 
721 	val = enable ? HOL_BLOCK_EN_FMASK : 0;
722 	offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
723 	iowrite32(val, endpoint->ipa->reg_virt + offset);
724 }
725 
ipa_endpoint_modem_hol_block_clear_all(struct ipa * ipa)726 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
727 {
728 	u32 i;
729 
730 	for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
731 		struct ipa_endpoint *endpoint = &ipa->endpoint[i];
732 
733 		if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
734 			continue;
735 
736 		ipa_endpoint_init_hol_block_enable(endpoint, false);
737 		ipa_endpoint_init_hol_block_timer(endpoint, 0);
738 		ipa_endpoint_init_hol_block_enable(endpoint, true);
739 	}
740 }
741 
ipa_endpoint_init_deaggr(struct ipa_endpoint * endpoint)742 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
743 {
744 	u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
745 	u32 val = 0;
746 
747 	if (!endpoint->toward_ipa)
748 		return;		/* Register not valid for RX endpoints */
749 
750 	/* DEAGGR_HDR_LEN is 0 */
751 	/* PACKET_OFFSET_VALID is 0 */
752 	/* PACKET_OFFSET_LOCATION is ignored (not valid) */
753 	/* MAX_PACKET_LEN is 0 (not enforced) */
754 
755 	iowrite32(val, endpoint->ipa->reg_virt + offset);
756 }
757 
ipa_endpoint_init_seq(struct ipa_endpoint * endpoint)758 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
759 {
760 	u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
761 	u32 seq_type = endpoint->seq_type;
762 	u32 val = 0;
763 
764 	if (!endpoint->toward_ipa)
765 		return;		/* Register not valid for RX endpoints */
766 
767 	/* Sequencer type is made up of four nibbles */
768 	val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
769 	val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
770 	/* The second two apply to replicated packets */
771 	val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK);
772 	val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK);
773 
774 	iowrite32(val, endpoint->ipa->reg_virt + offset);
775 }
776 
777 /**
778  * ipa_endpoint_skb_tx() - Transmit a socket buffer
779  * @endpoint:	Endpoint pointer
780  * @skb:	Socket buffer to send
781  *
782  * Returns:	0 if successful, or a negative error code
783  */
ipa_endpoint_skb_tx(struct ipa_endpoint * endpoint,struct sk_buff * skb)784 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
785 {
786 	struct gsi_trans *trans;
787 	u32 nr_frags;
788 	int ret;
789 
790 	/* Make sure source endpoint's TLV FIFO has enough entries to
791 	 * hold the linear portion of the skb and all its fragments.
792 	 * If not, see if we can linearize it before giving up.
793 	 */
794 	nr_frags = skb_shinfo(skb)->nr_frags;
795 	if (1 + nr_frags > endpoint->trans_tre_max) {
796 		if (skb_linearize(skb))
797 			return -E2BIG;
798 		nr_frags = 0;
799 	}
800 
801 	trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
802 	if (!trans)
803 		return -EBUSY;
804 
805 	ret = gsi_trans_skb_add(trans, skb);
806 	if (ret)
807 		goto err_trans_free;
808 	trans->data = skb;	/* transaction owns skb now */
809 
810 	gsi_trans_commit(trans, !netdev_xmit_more());
811 
812 	return 0;
813 
814 err_trans_free:
815 	gsi_trans_free(trans);
816 
817 	return -ENOMEM;
818 }
819 
ipa_endpoint_status(struct ipa_endpoint * endpoint)820 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
821 {
822 	u32 endpoint_id = endpoint->endpoint_id;
823 	struct ipa *ipa = endpoint->ipa;
824 	u32 val = 0;
825 	u32 offset;
826 
827 	offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
828 
829 	if (endpoint->data->status_enable) {
830 		val |= STATUS_EN_FMASK;
831 		if (endpoint->toward_ipa) {
832 			enum ipa_endpoint_name name;
833 			u32 status_endpoint_id;
834 
835 			name = endpoint->data->tx.status_endpoint;
836 			status_endpoint_id = ipa->name_map[name]->endpoint_id;
837 
838 			val |= u32_encode_bits(status_endpoint_id,
839 					       STATUS_ENDP_FMASK);
840 		}
841 		/* STATUS_LOCATION is 0 (status element precedes packet) */
842 		/* The next field is present for IPA v4.0 and above */
843 		/* STATUS_PKT_SUPPRESS_FMASK is 0 */
844 	}
845 
846 	iowrite32(val, ipa->reg_virt + offset);
847 }
848 
ipa_endpoint_replenish_one(struct ipa_endpoint * endpoint)849 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
850 {
851 	struct gsi_trans *trans;
852 	bool doorbell = false;
853 	struct page *page;
854 	u32 offset;
855 	u32 len;
856 	int ret;
857 
858 	page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
859 	if (!page)
860 		return -ENOMEM;
861 
862 	trans = ipa_endpoint_trans_alloc(endpoint, 1);
863 	if (!trans)
864 		goto err_free_pages;
865 
866 	/* Offset the buffer to make space for skb headroom */
867 	offset = NET_SKB_PAD;
868 	len = IPA_RX_BUFFER_SIZE - offset;
869 
870 	ret = gsi_trans_page_add(trans, page, len, offset);
871 	if (ret)
872 		goto err_trans_free;
873 	trans->data = page;	/* transaction owns page now */
874 
875 	if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
876 		doorbell = true;
877 		endpoint->replenish_ready = 0;
878 	}
879 
880 	gsi_trans_commit(trans, doorbell);
881 
882 	return 0;
883 
884 err_trans_free:
885 	gsi_trans_free(trans);
886 err_free_pages:
887 	put_page(page);
888 
889 	return -ENOMEM;
890 }
891 
892 /**
893  * ipa_endpoint_replenish() - Replenish the Rx packets cache.
894  * @endpoint:	Endpoint to be replenished
895  * @count:	Number of buffers to send to hardware
896  *
897  * Allocate RX packet wrapper structures with maximal socket buffers
898  * for an endpoint.  These are supplied to the hardware, which fills
899  * them with incoming data.
900  */
ipa_endpoint_replenish(struct ipa_endpoint * endpoint,u32 count)901 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
902 {
903 	struct gsi *gsi;
904 	u32 backlog;
905 
906 	if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
907 		if (count)
908 			atomic_add(count, &endpoint->replenish_saved);
909 		return;
910 	}
911 
912 	/* If already active, just update the backlog */
913 	if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
914 		if (count)
915 			atomic_add(count, &endpoint->replenish_backlog);
916 		return;
917 	}
918 
919 	while (atomic_dec_not_zero(&endpoint->replenish_backlog))
920 		if (ipa_endpoint_replenish_one(endpoint))
921 			goto try_again_later;
922 
923 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
924 
925 	if (count)
926 		atomic_add(count, &endpoint->replenish_backlog);
927 
928 	return;
929 
930 try_again_later:
931 	clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
932 
933 	/* The last one didn't succeed, so fix the backlog */
934 	backlog = atomic_add_return(count + 1, &endpoint->replenish_backlog);
935 
936 	/* Whenever a receive buffer transaction completes we'll try to
937 	 * replenish again.  It's unlikely, but if we fail to supply even
938 	 * one buffer, nothing will trigger another replenish attempt.
939 	 * Receive buffer transactions use one TRE, so schedule work to
940 	 * try replenishing again if our backlog is *all* available TREs.
941 	 */
942 	gsi = &endpoint->ipa->gsi;
943 	if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
944 		schedule_delayed_work(&endpoint->replenish_work,
945 				      msecs_to_jiffies(1));
946 }
947 
ipa_endpoint_replenish_enable(struct ipa_endpoint * endpoint)948 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
949 {
950 	struct gsi *gsi = &endpoint->ipa->gsi;
951 	u32 max_backlog;
952 	u32 saved;
953 
954 	set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
955 	while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
956 		atomic_add(saved, &endpoint->replenish_backlog);
957 
958 	/* Start replenishing if hardware currently has no buffers */
959 	max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
960 	if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
961 		ipa_endpoint_replenish(endpoint, 0);
962 }
963 
ipa_endpoint_replenish_disable(struct ipa_endpoint * endpoint)964 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
965 {
966 	u32 backlog;
967 
968 	clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
969 	while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
970 		atomic_add(backlog, &endpoint->replenish_saved);
971 }
972 
ipa_endpoint_replenish_work(struct work_struct * work)973 static void ipa_endpoint_replenish_work(struct work_struct *work)
974 {
975 	struct delayed_work *dwork = to_delayed_work(work);
976 	struct ipa_endpoint *endpoint;
977 
978 	endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
979 
980 	ipa_endpoint_replenish(endpoint, 0);
981 }
982 
ipa_endpoint_skb_copy(struct ipa_endpoint * endpoint,void * data,u32 len,u32 extra)983 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
984 				  void *data, u32 len, u32 extra)
985 {
986 	struct sk_buff *skb;
987 
988 	skb = __dev_alloc_skb(len, GFP_ATOMIC);
989 	if (skb) {
990 		skb_put(skb, len);
991 		memcpy(skb->data, data, len);
992 		skb->truesize += extra;
993 	}
994 
995 	/* Now receive it, or drop it if there's no netdev */
996 	if (endpoint->netdev)
997 		ipa_modem_skb_rx(endpoint->netdev, skb);
998 	else if (skb)
999 		dev_kfree_skb_any(skb);
1000 }
1001 
ipa_endpoint_skb_build(struct ipa_endpoint * endpoint,struct page * page,u32 len)1002 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1003 				   struct page *page, u32 len)
1004 {
1005 	struct sk_buff *skb;
1006 
1007 	/* Nothing to do if there's no netdev */
1008 	if (!endpoint->netdev)
1009 		return false;
1010 
1011 	/* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
1012 	skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
1013 	if (skb) {
1014 		/* Reserve the headroom and account for the data */
1015 		skb_reserve(skb, NET_SKB_PAD);
1016 		skb_put(skb, len);
1017 	}
1018 
1019 	/* Receive the buffer (or record drop if unable to build it) */
1020 	ipa_modem_skb_rx(endpoint->netdev, skb);
1021 
1022 	return skb != NULL;
1023 }
1024 
1025 /* The format of a packet status element is the same for several status
1026  * types (opcodes).  Other types aren't currently supported.
1027  */
ipa_status_format_packet(enum ipa_status_opcode opcode)1028 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1029 {
1030 	switch (opcode) {
1031 	case IPA_STATUS_OPCODE_PACKET:
1032 	case IPA_STATUS_OPCODE_DROPPED_PACKET:
1033 	case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1034 	case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1035 		return true;
1036 	default:
1037 		return false;
1038 	}
1039 }
1040 
ipa_endpoint_status_skip(struct ipa_endpoint * endpoint,const struct ipa_status * status)1041 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1042 				     const struct ipa_status *status)
1043 {
1044 	u32 endpoint_id;
1045 
1046 	if (!ipa_status_format_packet(status->opcode))
1047 		return true;
1048 	if (!status->pkt_len)
1049 		return true;
1050 	endpoint_id = u32_get_bits(status->endp_dst_idx,
1051 				   IPA_STATUS_DST_IDX_FMASK);
1052 	if (endpoint_id != endpoint->endpoint_id)
1053 		return true;
1054 
1055 	return false;	/* Don't skip this packet, process it */
1056 }
1057 
1058 /* Return whether the status indicates the packet should be dropped */
ipa_status_drop_packet(const struct ipa_status * status)1059 static bool ipa_status_drop_packet(const struct ipa_status *status)
1060 {
1061 	u32 val;
1062 
1063 	/* Deaggregation exceptions we drop; all other types we consume */
1064 	if (status->exception)
1065 		return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1066 
1067 	/* Drop the packet if it fails to match a routing rule; otherwise no */
1068 	val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1069 
1070 	return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1071 }
1072 
ipa_endpoint_status_parse(struct ipa_endpoint * endpoint,struct page * page,u32 total_len)1073 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1074 				      struct page *page, u32 total_len)
1075 {
1076 	void *data = page_address(page) + NET_SKB_PAD;
1077 	u32 unused = IPA_RX_BUFFER_SIZE - total_len;
1078 	u32 resid = total_len;
1079 
1080 	while (resid) {
1081 		const struct ipa_status *status = data;
1082 		u32 align;
1083 		u32 len;
1084 
1085 		if (resid < sizeof(*status)) {
1086 			dev_err(&endpoint->ipa->pdev->dev,
1087 				"short message (%u bytes < %zu byte status)\n",
1088 				resid, sizeof(*status));
1089 			break;
1090 		}
1091 
1092 		/* Skip over status packets that lack packet data */
1093 		if (ipa_endpoint_status_skip(endpoint, status)) {
1094 			data += sizeof(*status);
1095 			resid -= sizeof(*status);
1096 			continue;
1097 		}
1098 
1099 		/* Compute the amount of buffer space consumed by the
1100 		 * packet, including the status element.  If the hardware
1101 		 * is configured to pad packet data to an aligned boundary,
1102 		 * account for that.  And if checksum offload is is enabled
1103 		 * a trailer containing computed checksum information will
1104 		 * be appended.
1105 		 */
1106 		align = endpoint->data->rx.pad_align ? : 1;
1107 		len = le16_to_cpu(status->pkt_len);
1108 		len = sizeof(*status) + ALIGN(len, align);
1109 		if (endpoint->data->checksum)
1110 			len += sizeof(struct rmnet_map_dl_csum_trailer);
1111 
1112 		/* Charge the new packet with a proportional fraction of
1113 		 * the unused space in the original receive buffer.
1114 		 * XXX Charge a proportion of the *whole* receive buffer?
1115 		 */
1116 		if (!ipa_status_drop_packet(status)) {
1117 			u32 extra = unused * len / total_len;
1118 			void *data2 = data + sizeof(*status);
1119 			u32 len2 = le16_to_cpu(status->pkt_len);
1120 
1121 			/* Client receives only packet data (no status) */
1122 			ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1123 		}
1124 
1125 		/* Consume status and the full packet it describes */
1126 		data += len;
1127 		resid -= len;
1128 	}
1129 }
1130 
1131 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
ipa_endpoint_tx_complete(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1132 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1133 				     struct gsi_trans *trans)
1134 {
1135 }
1136 
1137 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
ipa_endpoint_rx_complete(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1138 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1139 				     struct gsi_trans *trans)
1140 {
1141 	struct page *page;
1142 
1143 	ipa_endpoint_replenish(endpoint, 1);
1144 
1145 	if (trans->cancelled)
1146 		return;
1147 
1148 	/* Parse or build a socket buffer using the actual received length */
1149 	page = trans->data;
1150 	if (endpoint->data->status_enable)
1151 		ipa_endpoint_status_parse(endpoint, page, trans->len);
1152 	else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1153 		trans->data = NULL;	/* Pages have been consumed */
1154 }
1155 
ipa_endpoint_trans_complete(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1156 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1157 				 struct gsi_trans *trans)
1158 {
1159 	if (endpoint->toward_ipa)
1160 		ipa_endpoint_tx_complete(endpoint, trans);
1161 	else
1162 		ipa_endpoint_rx_complete(endpoint, trans);
1163 }
1164 
ipa_endpoint_trans_release(struct ipa_endpoint * endpoint,struct gsi_trans * trans)1165 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1166 				struct gsi_trans *trans)
1167 {
1168 	if (endpoint->toward_ipa) {
1169 		struct ipa *ipa = endpoint->ipa;
1170 
1171 		/* Nothing to do for command transactions */
1172 		if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1173 			struct sk_buff *skb = trans->data;
1174 
1175 			if (skb)
1176 				dev_kfree_skb_any(skb);
1177 		}
1178 	} else {
1179 		struct page *page = trans->data;
1180 
1181 		if (page)
1182 			put_page(page);
1183 	}
1184 }
1185 
ipa_endpoint_default_route_set(struct ipa * ipa,u32 endpoint_id)1186 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1187 {
1188 	u32 val;
1189 
1190 	/* ROUTE_DIS is 0 */
1191 	val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1192 	val |= ROUTE_DEF_HDR_TABLE_FMASK;
1193 	val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1194 	val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1195 	val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1196 
1197 	iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1198 }
1199 
ipa_endpoint_default_route_clear(struct ipa * ipa)1200 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1201 {
1202 	ipa_endpoint_default_route_set(ipa, 0);
1203 }
1204 
1205 /**
1206  * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1207  * @endpoint:	Endpoint to be reset
1208  *
1209  * If aggregation is active on an RX endpoint when a reset is performed
1210  * on its underlying GSI channel, a special sequence of actions must be
1211  * taken to ensure the IPA pipeline is properly cleared.
1212  *
1213  * Return:	0 if successful, or a negative error code
1214  */
ipa_endpoint_reset_rx_aggr(struct ipa_endpoint * endpoint)1215 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1216 {
1217 	struct device *dev = &endpoint->ipa->pdev->dev;
1218 	struct ipa *ipa = endpoint->ipa;
1219 	struct gsi *gsi = &ipa->gsi;
1220 	bool suspended = false;
1221 	dma_addr_t addr;
1222 	bool legacy;
1223 	u32 retries;
1224 	u32 len = 1;
1225 	void *virt;
1226 	int ret;
1227 
1228 	virt = kzalloc(len, GFP_KERNEL);
1229 	if (!virt)
1230 		return -ENOMEM;
1231 
1232 	addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1233 	if (dma_mapping_error(dev, addr)) {
1234 		ret = -ENOMEM;
1235 		goto out_kfree;
1236 	}
1237 
1238 	/* Force close aggregation before issuing the reset */
1239 	ipa_endpoint_force_close(endpoint);
1240 
1241 	/* Reset and reconfigure the channel with the doorbell engine
1242 	 * disabled.  Then poll until we know aggregation is no longer
1243 	 * active.  We'll re-enable the doorbell (if appropriate) when
1244 	 * we reset again below.
1245 	 */
1246 	gsi_channel_reset(gsi, endpoint->channel_id, false);
1247 
1248 	/* Make sure the channel isn't suspended */
1249 	suspended = ipa_endpoint_program_suspend(endpoint, false);
1250 
1251 	/* Start channel and do a 1 byte read */
1252 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1253 	if (ret)
1254 		goto out_suspend_again;
1255 
1256 	ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1257 	if (ret)
1258 		goto err_endpoint_stop;
1259 
1260 	/* Wait for aggregation to be closed on the channel */
1261 	retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1262 	do {
1263 		if (!ipa_endpoint_aggr_active(endpoint))
1264 			break;
1265 		msleep(1);
1266 	} while (retries--);
1267 
1268 	/* Check one last time */
1269 	if (ipa_endpoint_aggr_active(endpoint))
1270 		dev_err(dev, "endpoint %u still active during reset\n",
1271 			endpoint->endpoint_id);
1272 
1273 	gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1274 
1275 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1276 	if (ret)
1277 		goto out_suspend_again;
1278 
1279 	/* Finally, reset and reconfigure the channel again (re-enabling the
1280 	 * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1281 	 * complete the channel reset sequence.  Finish by suspending the
1282 	 * channel again (if necessary).
1283 	 */
1284 	legacy = ipa->version == IPA_VERSION_3_5_1;
1285 	gsi_channel_reset(gsi, endpoint->channel_id, legacy);
1286 
1287 	msleep(1);
1288 
1289 	goto out_suspend_again;
1290 
1291 err_endpoint_stop:
1292 	(void)gsi_channel_stop(gsi, endpoint->channel_id);
1293 out_suspend_again:
1294 	if (suspended)
1295 		(void)ipa_endpoint_program_suspend(endpoint, true);
1296 	dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1297 out_kfree:
1298 	kfree(virt);
1299 
1300 	return ret;
1301 }
1302 
ipa_endpoint_reset(struct ipa_endpoint * endpoint)1303 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1304 {
1305 	u32 channel_id = endpoint->channel_id;
1306 	struct ipa *ipa = endpoint->ipa;
1307 	bool special;
1308 	bool legacy;
1309 	int ret = 0;
1310 
1311 	/* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1312 	 * is active, we need to handle things specially to recover.
1313 	 * All other cases just need to reset the underlying GSI channel.
1314 	 *
1315 	 * IPA v3.5.1 enables the doorbell engine.  Newer versions do not.
1316 	 */
1317 	legacy = ipa->version == IPA_VERSION_3_5_1;
1318 	special = !endpoint->toward_ipa && endpoint->data->aggregation;
1319 	if (special && ipa_endpoint_aggr_active(endpoint))
1320 		ret = ipa_endpoint_reset_rx_aggr(endpoint);
1321 	else
1322 		gsi_channel_reset(&ipa->gsi, channel_id, legacy);
1323 
1324 	if (ret)
1325 		dev_err(&ipa->pdev->dev,
1326 			"error %d resetting channel %u for endpoint %u\n",
1327 			ret, endpoint->channel_id, endpoint->endpoint_id);
1328 }
1329 
ipa_endpoint_program(struct ipa_endpoint * endpoint)1330 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1331 {
1332 	if (endpoint->toward_ipa)
1333 		ipa_endpoint_program_delay(endpoint, false);
1334 	else
1335 		(void)ipa_endpoint_program_suspend(endpoint, false);
1336 	ipa_endpoint_init_cfg(endpoint);
1337 	ipa_endpoint_init_hdr(endpoint);
1338 	ipa_endpoint_init_hdr_ext(endpoint);
1339 	ipa_endpoint_init_hdr_metadata_mask(endpoint);
1340 	ipa_endpoint_init_mode(endpoint);
1341 	ipa_endpoint_init_aggr(endpoint);
1342 	ipa_endpoint_init_deaggr(endpoint);
1343 	ipa_endpoint_init_seq(endpoint);
1344 	ipa_endpoint_status(endpoint);
1345 }
1346 
ipa_endpoint_enable_one(struct ipa_endpoint * endpoint)1347 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1348 {
1349 	struct ipa *ipa = endpoint->ipa;
1350 	struct gsi *gsi = &ipa->gsi;
1351 	int ret;
1352 
1353 	ret = gsi_channel_start(gsi, endpoint->channel_id);
1354 	if (ret) {
1355 		dev_err(&ipa->pdev->dev,
1356 			"error %d starting %cX channel %u for endpoint %u\n",
1357 			ret, endpoint->toward_ipa ? 'T' : 'R',
1358 			endpoint->channel_id, endpoint->endpoint_id);
1359 		return ret;
1360 	}
1361 
1362 	if (!endpoint->toward_ipa) {
1363 		ipa_interrupt_suspend_enable(ipa->interrupt,
1364 					     endpoint->endpoint_id);
1365 		ipa_endpoint_replenish_enable(endpoint);
1366 	}
1367 
1368 	ipa->enabled |= BIT(endpoint->endpoint_id);
1369 
1370 	return 0;
1371 }
1372 
ipa_endpoint_disable_one(struct ipa_endpoint * endpoint)1373 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1374 {
1375 	u32 mask = BIT(endpoint->endpoint_id);
1376 	struct ipa *ipa = endpoint->ipa;
1377 	struct gsi *gsi = &ipa->gsi;
1378 	int ret;
1379 
1380 	if (!(ipa->enabled & mask))
1381 		return;
1382 
1383 	ipa->enabled ^= mask;
1384 
1385 	if (!endpoint->toward_ipa) {
1386 		ipa_endpoint_replenish_disable(endpoint);
1387 		ipa_interrupt_suspend_disable(ipa->interrupt,
1388 					      endpoint->endpoint_id);
1389 	}
1390 
1391 	/* Note that if stop fails, the channel's state is not well-defined */
1392 	ret = gsi_channel_stop(gsi, endpoint->channel_id);
1393 	if (ret)
1394 		dev_err(&ipa->pdev->dev,
1395 			"error %d attempting to stop endpoint %u\n", ret,
1396 			endpoint->endpoint_id);
1397 }
1398 
ipa_endpoint_suspend_one(struct ipa_endpoint * endpoint)1399 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1400 {
1401 	struct device *dev = &endpoint->ipa->pdev->dev;
1402 	struct gsi *gsi = &endpoint->ipa->gsi;
1403 	bool stop_channel;
1404 	int ret;
1405 
1406 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1407 		return;
1408 
1409 	if (!endpoint->toward_ipa) {
1410 		ipa_endpoint_replenish_disable(endpoint);
1411 		(void)ipa_endpoint_program_suspend(endpoint, true);
1412 	}
1413 
1414 	/* IPA v3.5.1 doesn't use channel stop for suspend */
1415 	stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1416 	ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
1417 	if (ret)
1418 		dev_err(dev, "error %d suspending channel %u\n", ret,
1419 			endpoint->channel_id);
1420 }
1421 
ipa_endpoint_resume_one(struct ipa_endpoint * endpoint)1422 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1423 {
1424 	struct device *dev = &endpoint->ipa->pdev->dev;
1425 	struct gsi *gsi = &endpoint->ipa->gsi;
1426 	bool start_channel;
1427 	int ret;
1428 
1429 	if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1430 		return;
1431 
1432 	if (!endpoint->toward_ipa)
1433 		(void)ipa_endpoint_program_suspend(endpoint, false);
1434 
1435 	/* IPA v3.5.1 doesn't use channel start for resume */
1436 	start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1437 	ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
1438 	if (ret)
1439 		dev_err(dev, "error %d resuming channel %u\n", ret,
1440 			endpoint->channel_id);
1441 	else if (!endpoint->toward_ipa)
1442 		ipa_endpoint_replenish_enable(endpoint);
1443 }
1444 
ipa_endpoint_suspend(struct ipa * ipa)1445 void ipa_endpoint_suspend(struct ipa *ipa)
1446 {
1447 	if (!ipa->setup_complete)
1448 		return;
1449 
1450 	if (ipa->modem_netdev)
1451 		ipa_modem_suspend(ipa->modem_netdev);
1452 
1453 	ipa_cmd_tag_process(ipa);
1454 
1455 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1456 	ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1457 }
1458 
ipa_endpoint_resume(struct ipa * ipa)1459 void ipa_endpoint_resume(struct ipa *ipa)
1460 {
1461 	if (!ipa->setup_complete)
1462 		return;
1463 
1464 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1465 	ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1466 
1467 	if (ipa->modem_netdev)
1468 		ipa_modem_resume(ipa->modem_netdev);
1469 }
1470 
ipa_endpoint_setup_one(struct ipa_endpoint * endpoint)1471 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1472 {
1473 	struct gsi *gsi = &endpoint->ipa->gsi;
1474 	u32 channel_id = endpoint->channel_id;
1475 
1476 	/* Only AP endpoints get set up */
1477 	if (endpoint->ee_id != GSI_EE_AP)
1478 		return;
1479 
1480 	endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1481 	if (!endpoint->toward_ipa) {
1482 		/* RX transactions require a single TRE, so the maximum
1483 		 * backlog is the same as the maximum outstanding TREs.
1484 		 */
1485 		clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1486 		clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1487 		atomic_set(&endpoint->replenish_saved,
1488 			   gsi_channel_tre_max(gsi, endpoint->channel_id));
1489 		atomic_set(&endpoint->replenish_backlog, 0);
1490 		INIT_DELAYED_WORK(&endpoint->replenish_work,
1491 				  ipa_endpoint_replenish_work);
1492 	}
1493 
1494 	ipa_endpoint_program(endpoint);
1495 
1496 	endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1497 }
1498 
ipa_endpoint_teardown_one(struct ipa_endpoint * endpoint)1499 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1500 {
1501 	endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1502 
1503 	if (!endpoint->toward_ipa)
1504 		cancel_delayed_work_sync(&endpoint->replenish_work);
1505 
1506 	ipa_endpoint_reset(endpoint);
1507 }
1508 
ipa_endpoint_setup(struct ipa * ipa)1509 void ipa_endpoint_setup(struct ipa *ipa)
1510 {
1511 	u32 initialized = ipa->initialized;
1512 
1513 	ipa->set_up = 0;
1514 	while (initialized) {
1515 		u32 endpoint_id = __ffs(initialized);
1516 
1517 		initialized ^= BIT(endpoint_id);
1518 
1519 		ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1520 	}
1521 }
1522 
ipa_endpoint_teardown(struct ipa * ipa)1523 void ipa_endpoint_teardown(struct ipa *ipa)
1524 {
1525 	u32 set_up = ipa->set_up;
1526 
1527 	while (set_up) {
1528 		u32 endpoint_id = __fls(set_up);
1529 
1530 		set_up ^= BIT(endpoint_id);
1531 
1532 		ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1533 	}
1534 	ipa->set_up = 0;
1535 }
1536 
ipa_endpoint_config(struct ipa * ipa)1537 int ipa_endpoint_config(struct ipa *ipa)
1538 {
1539 	struct device *dev = &ipa->pdev->dev;
1540 	u32 initialized;
1541 	u32 rx_base;
1542 	u32 rx_mask;
1543 	u32 tx_mask;
1544 	int ret = 0;
1545 	u32 max;
1546 	u32 val;
1547 
1548 	/* Find out about the endpoints supplied by the hardware, and ensure
1549 	 * the highest one doesn't exceed the number we support.
1550 	 */
1551 	val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1552 
1553 	/* Our RX is an IPA producer */
1554 	rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
1555 	max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
1556 	if (max > IPA_ENDPOINT_MAX) {
1557 		dev_err(dev, "too many endpoints (%u > %u)\n",
1558 			max, IPA_ENDPOINT_MAX);
1559 		return -EINVAL;
1560 	}
1561 	rx_mask = GENMASK(max - 1, rx_base);
1562 
1563 	/* Our TX is an IPA consumer */
1564 	max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
1565 	tx_mask = GENMASK(max - 1, 0);
1566 
1567 	ipa->available = rx_mask | tx_mask;
1568 
1569 	/* Check for initialized endpoints not supported by the hardware */
1570 	if (ipa->initialized & ~ipa->available) {
1571 		dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1572 			ipa->initialized & ~ipa->available);
1573 		ret = -EINVAL;		/* Report other errors too */
1574 	}
1575 
1576 	initialized = ipa->initialized;
1577 	while (initialized) {
1578 		u32 endpoint_id = __ffs(initialized);
1579 		struct ipa_endpoint *endpoint;
1580 
1581 		initialized ^= BIT(endpoint_id);
1582 
1583 		/* Make sure it's pointing in the right direction */
1584 		endpoint = &ipa->endpoint[endpoint_id];
1585 		if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
1586 			dev_err(dev, "endpoint id %u wrong direction\n",
1587 				endpoint_id);
1588 			ret = -EINVAL;
1589 		}
1590 	}
1591 
1592 	return ret;
1593 }
1594 
ipa_endpoint_deconfig(struct ipa * ipa)1595 void ipa_endpoint_deconfig(struct ipa *ipa)
1596 {
1597 	ipa->available = 0;	/* Nothing more to do */
1598 }
1599 
ipa_endpoint_init_one(struct ipa * ipa,enum ipa_endpoint_name name,const struct ipa_gsi_endpoint_data * data)1600 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1601 				  const struct ipa_gsi_endpoint_data *data)
1602 {
1603 	struct ipa_endpoint *endpoint;
1604 
1605 	endpoint = &ipa->endpoint[data->endpoint_id];
1606 
1607 	if (data->ee_id == GSI_EE_AP)
1608 		ipa->channel_map[data->channel_id] = endpoint;
1609 	ipa->name_map[name] = endpoint;
1610 
1611 	endpoint->ipa = ipa;
1612 	endpoint->ee_id = data->ee_id;
1613 	endpoint->seq_type = data->endpoint.seq_type;
1614 	endpoint->channel_id = data->channel_id;
1615 	endpoint->endpoint_id = data->endpoint_id;
1616 	endpoint->toward_ipa = data->toward_ipa;
1617 	endpoint->data = &data->endpoint.config;
1618 
1619 	ipa->initialized |= BIT(endpoint->endpoint_id);
1620 }
1621 
ipa_endpoint_exit_one(struct ipa_endpoint * endpoint)1622 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1623 {
1624 	endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1625 
1626 	memset(endpoint, 0, sizeof(*endpoint));
1627 }
1628 
ipa_endpoint_exit(struct ipa * ipa)1629 void ipa_endpoint_exit(struct ipa *ipa)
1630 {
1631 	u32 initialized = ipa->initialized;
1632 
1633 	while (initialized) {
1634 		u32 endpoint_id = __fls(initialized);
1635 
1636 		initialized ^= BIT(endpoint_id);
1637 
1638 		ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1639 	}
1640 	memset(ipa->name_map, 0, sizeof(ipa->name_map));
1641 	memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1642 }
1643 
1644 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
ipa_endpoint_init(struct ipa * ipa,u32 count,const struct ipa_gsi_endpoint_data * data)1645 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1646 		      const struct ipa_gsi_endpoint_data *data)
1647 {
1648 	enum ipa_endpoint_name name;
1649 	u32 filter_map;
1650 
1651 	if (!ipa_endpoint_data_valid(ipa, count, data))
1652 		return 0;	/* Error */
1653 
1654 	ipa->initialized = 0;
1655 
1656 	filter_map = 0;
1657 	for (name = 0; name < count; name++, data++) {
1658 		if (ipa_gsi_endpoint_data_empty(data))
1659 			continue;	/* Skip over empty slots */
1660 
1661 		ipa_endpoint_init_one(ipa, name, data);
1662 
1663 		if (data->endpoint.filter_support)
1664 			filter_map |= BIT(data->endpoint_id);
1665 	}
1666 
1667 	if (!ipa_filter_map_valid(ipa, filter_map))
1668 		goto err_endpoint_exit;
1669 
1670 	return filter_map;	/* Non-zero bitmask */
1671 
1672 err_endpoint_exit:
1673 	ipa_endpoint_exit(ipa);
1674 
1675 	return 0;	/* Error */
1676 }
1677